Merge pull request #3 from notcpuid/experimental

refactor(asmjit)
This commit is contained in:
William Long
2025-07-03 14:08:19 +03:00
committed by GitHub
155 changed files with 626 additions and 87610 deletions

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "pe-packer/asmjit"]
path = pe-packer/asmjit
url = https://github.com/asmjit/asmjit.git

1
pe-packer/asmjit Submodule

Submodule pe-packer/asmjit added at a3199e8857

View File

@@ -1,60 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_A64_H_INCLUDED
#define ASMJIT_A64_H_INCLUDED
//! \addtogroup asmjit_a64
//!
//! ### Emitters
//!
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
//! - \ref a64::Builder - AArch64 builder.
//! - \ref a64::Compiler - AArch64 compiler.
//! - \ref a64::Emitter - AArch64 emitter (abstract).
//!
//! ### Supported Instructions
//!
//! - Emitters:
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also utility
//! functions. The member functions provided are part of all AArch64 emitters.
//!
//! - Instruction representation:
//! - \ref a64::Inst::Id - instruction identifiers.
//!
//! ### Register Operands
//!
//! - \ref arm::Reg - Base class for any AArch32/AArch64 register.
//! - \ref arm::Gp - General purpose register:
//! - \ref arm::GpW - 32-bit register.
//! - \ref arm::GpX - 64-bit register.
//! - \ref arm::Vec - Vector (SIMD) register:
//! - \ref arm::VecB - 8-bit SIMD register.
//! - \ref arm::VecH - 16-bit SIMD register.
//! - \ref arm::VecS - 32-bit SIMD register.
//! - \ref arm::VecD - 64-bit SIMD register.
//! - \ref arm::VecV - 128-bit SIMD register.
//!
//! ### Memory Operands
//!
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
//!
//! ### Other
//!
//! - \ref arm::Shift - Shift operation and value.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./arm.h"
#include "./arm/a64assembler.h"
#include "./arm/a64builder.h"
#include "./arm/a64compiler.h"
#include "./arm/a64emitter.h"
#include "./arm/a64globals.h"
#include "./arm/a64instdb.h"
#include "./arm/a64operand.h"
#endif // ASMJIT_A64_H_INCLUDED

View File

@@ -1,76 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_H_INCLUDED
#define ASMJIT_ARM_H_INCLUDED
//! \addtogroup asmjit_arm
//!
//! ### Namespaces
//!
//! - \ref arm - arm namespace provides common functionality for both AArch32 and AArch64 backends.
//! - \ref a32 - a32 namespace provides support for AArch32 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch32 architecture.
//! - \ref a64 - a64 namespace provides support for AArch64 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch64 architecture.
//!
//! ### Emitters
//!
//! - AArch64
//! - \ref a32::Assembler - AArch32 assembler (must read, provides examples).
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
//! - \ref a32::Builder - AArch32 builder.
//! - \ref a64::Builder - AArch64 builder.
//! - \ref a32::Compiler - AArch32 compiler.
//! - \ref a64::Compiler - AArch64 compiler.
//! - \ref a32::Emitter - AArch32 emitter (abstract).
//! - \ref a64::Emitter - AArch64 emitter (abstract).
//!
//! ### Supported Instructions
//!
//! - AArch32:
//! - Emitters:
//! - \ref a32::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
//! utility functions. The member functions provided are part of all AArch32 emitters.
//! - Instruction representation:
//! - \ref a32::Inst::Id - instruction identifiers.
//!
//! - AArch64:
//! - Emitters:
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
//! utility functions. The member functions provided are part of all AArch64 emitters.
//! - Instruction representation:
//! - \ref a64::Inst::Id - instruction identifiers.
//!
//! ### Register Operands
//!
//! - \ref arm::Reg - Base class for any AArch32/AArch64 register.
//! - \ref arm::Gp - General purpose register:
//! - \ref arm::GpW - 32-bit register.
//! - \ref arm::GpX - 64-bit register (AArch64 only).
//! - \ref arm::Vec - Vector (SIMD) register:
//! - \ref arm::VecB - 8-bit SIMD register (AArch64 only).
//! - \ref arm::VecH - 16-bit SIMD register (AArch64 only).
//! - \ref arm::VecS - 32-bit SIMD register.
//! - \ref arm::VecD - 64-bit SIMD register.
//! - \ref arm::VecV - 128-bit SIMD register.
//!
//! ### Memory Operands
//!
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
//!
//! ### Other
//!
//! - \ref arm::Shift - Shift operation and value (both AArch32 and AArch64).
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./core.h"
#include "./arm/armglobals.h"
#include "./arm/armoperand.h"
#include "./arm/armutils.h"
#endif // ASMJIT_ARM_H_INCLUDED

View File

@@ -1,82 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#define ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#include "../core/type.h"
#include "../arm/a64globals.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
static const constexpr ArchTraits a64ArchTraits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFF,
// Reserved.
{ 0, 0, 0 },
// HW stack alignment (AArch64 requires stack aligned to 16 bytes at HW level).
16,
// Min/max stack offset - byte addressing is the worst, VecQ addressing the best.
4095, 65520,
// Instruction hints [Gp, Vec, ExtraVirt2, ExtraVirt3].
{{
InstHints::kPushPop,
InstHints::kPushPop,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegInfo.
#define V(index) OperandSignature{RegTraits<RegType(index)>::kSignature}
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
#define V(index) TypeId(RegTraits<RegType(index)>::kTypeId)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kARM_VecS : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kARM_VecD : RegType::kNone)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ArchTypeNameId::kByte,
ArchTypeNameId::kHWord,
ArchTypeNameId::kWord,
ArchTypeNameId::kXWord
}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,72 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#define ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#include "../core/assembler.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 assembler implementation.
class ASMJIT_VIRTAPI Assembler
: public BaseAssembler,
public EmitterExplicitT<Assembler> {
public:
typedef BaseAssembler Base;
//! \name Construction / Destruction
//! \{
ASMJIT_API Assembler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Assembler() noexcept override;
//! \}
//! \name Accessors
//! \{
//! Gets whether the current ARM mode is THUMB (alternative to 32-bit ARM encoding).
ASMJIT_INLINE_NODEBUG bool isInThumbMode() const noexcept { return _environment.isArchThumb(); }
//! Gets the current code alignment of the current mode (ARM vs THUMB).
ASMJIT_INLINE_NODEBUG uint32_t codeAlignment() const noexcept { return isInThumbMode() ? 2 : 4; }
//! \}
//! \name Emit
//! \{
ASMJIT_API Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
//! \}
//! \name Align
//! \{
ASMJIT_API Error align(AlignMode alignMode, uint32_t alignment) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED

View File

@@ -1,51 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_BUILDER)
#include "../arm/a64assembler.h"
#include "../arm/a64builder.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Builder - Construction & Destruction
// =========================================
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
assignEmitterFuncs(this);
if (code)
code->attach(this);
}
Builder::~Builder() noexcept {}
// a64::Builder - Events
// =====================
Error Builder::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error Builder::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// a64::Builder - Finalize
// =======================
Error Builder::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_BUILDER

View File

@@ -1,57 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64BUILDER_H_INCLUDED
#define ASMJIT_ARM_A64BUILDER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 builder implementation.
class ASMJIT_VIRTAPI Builder
: public BaseBuilder,
public EmitterExplicitT<Builder> {
public:
ASMJIT_NONCOPYABLE(Builder)
typedef BaseBuilder Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Builder(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Builder() noexcept override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_BUILDER
#endif // ASMJIT_ARM_A64BUILDER_H_INCLUDED

View File

@@ -1,60 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Compiler - Construction & Destruction
// ==========================================
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
assignEmitterFuncs(this);
if (code)
code->attach(this);
}
Compiler::~Compiler() noexcept {}
// a64::Compiler - Events
// ======================
Error Compiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = addPassT<ARMRAPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error Compiler::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// a64::Compiler - Finalize
// ========================
Error Compiler::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

View File

@@ -1,247 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64COMPILER_H_INCLUDED
#define ASMJIT_ARM_A64COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/type.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 compiler implementation.
class ASMJIT_VIRTAPI Compiler
: public BaseCompiler,
public EmitterExplicitT<Compiler> {
public:
ASMJIT_NONCOPYABLE(Compiler)
typedef BaseCompiler Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Compiler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API ~Compiler() noexcept override;
//! \}
//! \name Virtual Registers
//! \{
//! \cond INTERNAL
template<typename RegT, typename Type>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type) {
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
return reg;
}
template<typename RegT, typename Type, typename... Args>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type, const char* s, Args&&... args) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
if (sizeof...(Args) == 0)
_newReg(&reg, type, s);
else
_newRegFmt(&reg, type, s, std::forward<Args>(args)...);
return reg;
#else
DebugUtils::unused(s, std::forward<Args>(args)...);
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
return reg;
#endif
}
//! \endcond
template<typename RegT, typename... Args>
ASMJIT_INLINE_NODEBUG RegT newSimilarReg(const RegT& ref, Args&&... args) {
return _newRegInternal<RegT>(ref, std::forward<Args>(args)...);
}
template<typename... Args>
ASMJIT_INLINE_NODEBUG Reg newReg(TypeId typeId, Args&&... args) { return _newRegInternal<Reg>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp(TypeId typeId, Args&&... args) { return _newRegInternal<Gp>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVec(TypeId typeId, Args&&... args) { return _newRegInternal<Vec>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpw(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpx(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpz(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecS(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecD(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecQ(Args&&... args) { return _newRegInternal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
//! \}
//! \name Stack
//! \{
//! Creates a new memory chunk allocated on the current function's stack.
ASMJIT_INLINE_NODEBUG Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
Mem m(Globals::NoInit);
_newStack(&m, size, alignment, name);
return m;
}
//! \}
//! \name Constants
//! \{
//! Put data to a constant-pool and get a memory reference to it.
ASMJIT_INLINE_NODEBUG Mem newConst(ConstPoolScope scope, const void* data, size_t size) {
Mem m(Globals::NoInit);
_newConst(&m, scope, data, size);
return m;
}
//! Put a BYTE `val` to a constant-pool (8 bits).
ASMJIT_INLINE_NODEBUG Mem newByteConst(ConstPoolScope scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
//! Put a HWORD `val` to a constant-pool (16 bits).
ASMJIT_INLINE_NODEBUG Mem newHWordConst(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool (32 bits).
ASMJIT_INLINE_NODEBUG Mem newWordConst(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool (64 bits).
ASMJIT_INLINE_NODEBUG Mem newDWordConst(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt16Const(ConstPoolScope scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt16Const(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt32Const(ConstPoolScope scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt32Const(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt64Const(ConstPoolScope scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt64Const(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a SP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newFloatConst(ConstPoolScope scope, float val) noexcept { return newConst(scope, &val, 4); }
//! Put a DP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newDoubleConst(ConstPoolScope scope, double val) noexcept { return newConst(scope, &val, 8); }
//! \}
//! \name Instruction Options
//! \{
//! Force the compiler to not follow the conditional or unconditional jump.
ASMJIT_INLINE_NODEBUG Compiler& unfollow() noexcept { _instOptions |= InstOptions::kUnfollow; return *this; }
//! \}
//! \name Compiler specific
//! \{
//! Special pseudo-instruction that can be used to load a memory address into `o0` GP register.
//!
//! \note At the moment this instruction is only useful to load a stack allocated address into a GP register
//! for further use. It makes very little sense to use it for anything else. The semantics of this instruction
//! is the same as X86 `LEA` (load effective address) instruction.
ASMJIT_INLINE_NODEBUG Error loadAddressOf(const Gp& o0, const Mem& o1) { return _emitter()->_emitI(Inst::kIdAdr, o0, o1); }
//! \}
//! \name Function Call & Ret Intrinsics
//! \{
//! Invoke a function call without `target` type enforcement.
ASMJIT_INLINE_NODEBUG Error invoke_(InvokeNode** out, const Operand_& target, const FuncSignature& signature) {
return addInvokeNode(out, Inst::kIdBlr, target, signature);
}
//! Invoke a function call of the given `target` and `signature` and store the added node to `out`.
//!
//! Creates a new \ref InvokeNode, initializes all the necessary members to match the given function `signature`,
//! adds the node to the compiler, and stores its pointer to `out`. The operation is atomic, if anything fails
//! nullptr is stored in `out` and error code is returned.
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
//! Return.
ASMJIT_INLINE_NODEBUG Error ret() { return addRet(Operand(), Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const BaseReg& o0) { return addRet(o0, Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const BaseReg& o0, const BaseReg& o1) { return addRet(o0, o1); }
//! \}
//! \name Jump Tables Support
//! \{
using EmitterExplicitT<Compiler>::br;
//! Adds a jump to the given `target` with the provided jump `annotation`.
ASMJIT_INLINE_NODEBUG Error br(const BaseReg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdBr, target, annotation); }
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_A64COMPILER_H_INCLUDED

View File

@@ -1,464 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::EmitHelper - Emit Operations
// =================================
ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment) {
Emitter* emitter = _emitter->as<Emitter>();
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(typeId) && !TypeUtils::isAbstract(typeId));
emitter->setInlineComment(comment);
if (dst_.isReg() && src_.isMem()) {
Reg dst(dst_.as<Reg>());
Mem src(src_.as<Mem>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->ldrb(dst.as<Gp>(), src);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->ldrh(dst.as<Gp>(), src);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->ldr(dst.as<Gp>().w(), src);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->ldr(dst.as<Gp>().x(), src);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->ldr(dst.as<Vec>().s(), src);
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->ldr(dst.as<Vec>().d(), src);
if (TypeUtils::isVec128(typeId))
return emitter->ldr(dst.as<Vec>().q(), src);
break;
}
}
}
if (dst_.isMem() && src_.isReg()) {
Mem dst(dst_.as<Mem>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->strb(src.as<Gp>(), dst);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->strh(src.as<Gp>(), dst);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->str(src.as<Gp>().w(), dst);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->str(src.as<Gp>().x(), dst);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->str(src.as<Vec>().s(), dst);
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->str(src.as<Vec>().d(), dst);
if (TypeUtils::isVec128(typeId))
return emitter->str(src.as<Vec>().q(), dst);
break;
}
}
}
if (dst_.isReg() && src_.isReg()) {
Reg dst(dst_.as<Reg>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
case TypeId::kInt16:
case TypeId::kUInt16:
case TypeId::kInt32:
case TypeId::kUInt32:
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
if (TypeUtils::isVec128(typeId))
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
break;
}
}
}
emitter->setInlineComment(nullptr);
return DebugUtils::errored(kErrorInvalidState);
}
Error EmitHelper::emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
}
// TODO: [ARM] EmitArgMove is unfinished.
Error EmitHelper::emitArgMove(
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment) {
// Deduce optional `dstTypeId`, which may be `TypeId::kVoid` in some cases.
if (dstTypeId == TypeId::kVoid) {
const ArchTraits& archTraits = ArchTraits::byArch(_emitter->arch());
dstTypeId = archTraits.regTypeToTypeId(dst_.type());
}
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(dstTypeId) && !TypeUtils::isAbstract(dstTypeId));
ASMJIT_ASSERT(TypeUtils::isValid(srcTypeId) && !TypeUtils::isAbstract(srcTypeId));
Reg dst(dst_.as<Reg>());
Operand src(src_);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
if (TypeUtils::isInt(dstTypeId)) {
if (TypeUtils::isInt(srcTypeId)) {
uint32_t x = dstSize == 8;
dst.setSignature(OperandSignature{x ? uint32_t(GpX::kSignature) : uint32_t(GpW::kSignature)});
_emitter->setInlineComment(comment);
if (src.isReg()) {
src.setSignature(dst.signature());
return _emitter->emit(Inst::kIdMov, dst, src);
}
else if (src.isMem()) {
InstId instId = Inst::kIdNone;
switch (srcTypeId) {
case TypeId::kInt8: instId = Inst::kIdLdrsb; break;
case TypeId::kUInt8: instId = Inst::kIdLdrb; break;
case TypeId::kInt16: instId = Inst::kIdLdrsh; break;
case TypeId::kUInt16: instId = Inst::kIdLdrh; break;
case TypeId::kInt32: instId = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
case TypeId::kUInt32: instId = Inst::kIdLdr; x = 0; break;
case TypeId::kInt64: instId = Inst::kIdLdr; break;
case TypeId::kUInt64: instId = Inst::kIdLdr; break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
return _emitter->emit(instId, dst, src);
}
}
}
if (TypeUtils::isFloat(dstTypeId) || TypeUtils::isVec(dstTypeId)) {
if (TypeUtils::isFloat(srcTypeId) || TypeUtils::isVec(srcTypeId)) {
switch (srcSize) {
case 2: dst.as<Vec>().setSignature(OperandSignature{VecH::kSignature}); break;
case 4: dst.as<Vec>().setSignature(OperandSignature{VecS::kSignature}); break;
case 8: dst.as<Vec>().setSignature(OperandSignature{VecD::kSignature}); break;
case 16: dst.as<Vec>().setSignature(OperandSignature{VecV::kSignature}); break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
_emitter->setInlineComment(comment);
if (src.isReg()) {
InstId instId = srcSize <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
src.setSignature(dst.signature());
return _emitter->emit(instId, dst, src);
}
else if (src.isMem()) {
return _emitter->emit(Inst::kIdLdr_v, dst, src);
}
}
}
return DebugUtils::errored(kErrorInvalidState);
}
// a64::EmitHelper - Emit Prolog & Epilog
// ======================================
struct LoadStoreInstructions {
InstId singleInstId;
InstId pairInstId;
};
struct PrologEpilogInfo {
struct RegPair {
uint8_t ids[2];
uint16_t offset;
};
struct GroupData {
RegPair pairs[16];
uint32_t pairCount;
};
Support::Array<GroupData, 2> groups;
uint32_t sizeTotal;
Error init(const FuncFrame& frame) noexcept {
uint32_t offset = 0;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
GroupData& data = groups[group];
uint32_t n = 0;
uint32_t pairCount = 0;
RegPair* pairs = data.pairs;
uint32_t slotSize = frame.saveRestoreRegSize(group);
uint32_t savedRegs = frame.savedRegs(group);
if (group == RegGroup::kGp && frame.hasPreservedFP()) {
// Must be at the beginning of the push/pop sequence.
ASMJIT_ASSERT(pairCount == 0);
pairs[0].offset = uint16_t(offset);
pairs[0].ids[0] = Gp::kIdFp;
pairs[0].ids[1] = Gp::kIdLr;
offset += slotSize * 2;
pairCount++;
savedRegs &= ~Support::bitMask(Gp::kIdFp, Gp::kIdLr);
}
Support::BitWordIterator<uint32_t> it(savedRegs);
while (it.hasNext()) {
pairs[pairCount].ids[n] = uint8_t(it.next());
if (++n == 2) {
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
n = 0;
pairCount++;
}
}
if (n == 1) {
pairs[pairCount].ids[1] = uint8_t(BaseReg::kIdBad);
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
pairCount++;
}
data.pairCount = pairCount;
}
sizeTotal = offset;
return kErrorOk;
}
};
ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdStr , Inst::kIdStp },
{ Inst::kIdStr_v, Inst::kIdStp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (uint32_t i = 0; i < pairCount; i++) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(-int(adjustInitialOffset));
mem.makePreIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
mem.resetToFixedOffset();
if (i == 0 && frame.hasPreservedFP()) {
ASMJIT_PROPAGATE(emitter->mov(x29, sp));
}
}
}
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
// TODO: [ARM] Prolog - we must touch the pages otherwise it's undefined.
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
return kErrorOk;
}
// TODO: [ARM] Emit epilog.
ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdLdr , Inst::kIdLdp },
{ Inst::kIdLdr_v, Inst::kIdLdp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
for (int g = 1; g >= 0; g--) {
RegGroup group = RegGroup(g);
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (int i = int(pairCount) - 1; i >= 0; i--) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(int(adjustInitialOffset));
mem.makePostIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
mem.resetToFixedOffset();
}
}
ASMJIT_PROPAGATE(emitter->ret(x30));
return kErrorOk;
}
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitProlog(frame);
}
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitEpilog(frame);
}
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
EmitHelper emitHelper(emitter);
return emitHelper.emitArgsAssignment(frame, args);
}
void assignEmitterFuncs(BaseEmitter* emitter) {
emitter->_funcs.emitProlog = Emitter_emitProlog;
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
#ifndef ASMJIT_NO_LOGGING
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
#endif
#ifndef ASMJIT_NO_VALIDATION
emitter->_funcs.validate = InstInternal::validate;
#endif
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,50 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED
#define ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED
#include "../core/api-config.h"
#include "../core/emithelper_p.h"
#include "../core/func.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
class EmitHelper : public BaseEmitHelper {
public:
ASMJIT_INLINE_NODEBUG explicit EmitHelper(BaseEmitter* emitter = nullptr) noexcept
: BaseEmitHelper(emitter) {}
Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr) override;
Error emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment = nullptr) override;
Error emitArgMove(
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) override;
Error emitProlog(const FuncFrame& frame);
Error emitEpilog(const FuncFrame& frame);
};
void assignEmitterFuncs(BaseEmitter* emitter);
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64EMITHELPER_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_LOGGING)
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::FormatterInternal - Format Instruction
// ===========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
DebugUtils::unused(arch);
// Format instruction options and instruction mnemonic.
InstId instId = inst.realId();
if (instId < Inst::_kIdCount)
ASMJIT_PROPAGATE(InstInternal::instIdToString(arch, instId, sb));
else
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
CondCode cc = inst.armCondCode();
if (cc != CondCode::kAL) {
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatCondCode(sb, cc));
}
for (uint32_t i = 0; i < opCount; i++) {
const Operand_& op = operands[i];
if (op.isNone())
break;
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, flags, emitter, arch, op));
}
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_LOGGING

View File

@@ -1,42 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armformatter_p.h"
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace FormatterInternal {
using namespace arm::FormatterInternal;
Error ASMJIT_CDECL formatInstruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED

View File

@@ -1,189 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64func_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace FuncInternal {
static inline bool shouldThreatAsCDecl(CallConvId ccId) noexcept {
return ccId == CallConvId::kCDecl ||
ccId == CallConvId::kStdCall ||
ccId == CallConvId::kFastCall ||
ccId == CallConvId::kVectorCall ||
ccId == CallConvId::kThisCall ||
ccId == CallConvId::kRegParm1 ||
ccId == CallConvId::kRegParm2 ||
ccId == CallConvId::kRegParm3;
}
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32)
return RegType::kARM_VecS;
else if (typeId == TypeId::kFloat64)
return RegType::kARM_VecD;
else if (TypeUtils::isVec32(typeId))
return RegType::kARM_VecS;
else if (TypeUtils::isVec64(typeId))
return RegType::kARM_VecD;
else if (TypeUtils::isVec128(typeId))
return RegType::kARM_VecV;
else
return RegType::kNone;
}
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
cc.setArch(environment.arch());
cc.setSaveRestoreRegSize(RegGroup::kGp, 8);
cc.setSaveRestoreRegSize(RegGroup::kVec, 8);
cc.setSaveRestoreAlignment(RegGroup::kGp, 16);
cc.setSaveRestoreAlignment(RegGroup::kVec, 16);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt2, 1);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt3, 1);
cc.setPassedOrder(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setNaturalStackAlignment(16);
if (shouldThreatAsCDecl(ccId)) {
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
cc.setId(CallConvId::kCDecl);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(8, 9, 10, 11, 12, 13, 14, 15));
}
else {
cc.setId(ccId);
cc.setSaveRestoreRegSize(RegGroup::kVec, 16);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept {
DebugUtils::unused(signature);
const CallConv& cc = func.callConv();
uint32_t stackOffset = 0;
uint32_t i;
uint32_t argCount = func.argCount();
if (func.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
TypeId typeId = func._rets[valueIndex].typeId();
// Terminate at the first void type (end of the pack).
if (typeId == TypeId::kVoid)
break;
switch (typeId) {
case TypeId::kInt8:
case TypeId::kInt16:
case TypeId::kInt32: {
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kInt32);
break;
}
case TypeId::kUInt8:
case TypeId::kUInt16:
case TypeId::kUInt32: {
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kUInt32);
break;
}
case TypeId::kInt64:
case TypeId::kUInt64: {
func._rets[valueIndex].initReg(RegType::kARM_GpX, valueIndex, typeId);
break;
}
default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
return DebugUtils::errored(kErrorInvalidRegType);
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
break;
}
}
}
}
switch (cc.strategy()) {
case CallConvStrategy::kDefault: {
uint32_t gpzPos = 0;
uint32_t vecPos = 0;
for (i = 0; i < argCount; i++) {
FuncValue& arg = func._args[i][0];
TypeId typeId = arg.typeId();
if (TypeUtils::isInt(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
if (regId != BaseReg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX;
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kGp, Support::bitMask(regId));
gpzPos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), registerSize);
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup)
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
if (regId != BaseReg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
return DebugUtils::errored(kErrorInvalidRegType);
arg.initTypeId(typeId);
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kVec, Support::bitMask(regId));
vecPos++;
}
else {
uint32_t size = TypeUtils::sizeOf(typeId);
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
}
break;
}
default:
return DebugUtils::errored(kErrorInvalidState);
}
func._argStackSize = stackOffset;
return kErrorOk;
}
} // {FuncInternal}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,33 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#define ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#include "../core/func.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! AArch64-specific function API (calling conventions and other utilities).
namespace FuncInternal {
//! Initialize `CallConv` structure (AArch64 specific).
Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept;
//! Initialize `FuncDetail` (AArch64 specific).
Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept;
} // {FuncInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64FUNC_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,279 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/cpuinfo.h"
#include "../core/misc_p.h"
#include "../core/support_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::InstInternal - Text
// ========================
#ifndef ASMJIT_NO_TEXT
Error InstInternal::instIdToString(Arch arch, InstId instId, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
DebugUtils::unused(arch);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
return DebugUtils::errored(kErrorInvalidInstruction);
char nameData[32];
size_t nameSize = Support::decodeInstName(nameData, InstDB::_instNameIndexTable[realId], InstDB::_instNameStringTable);
return output.append(nameData, nameSize);
}
InstId InstInternal::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
DebugUtils::unused(arch);
if (ASMJIT_UNLIKELY(!s))
return Inst::kIdNone;
if (len == SIZE_MAX)
len = strlen(s);
if (ASMJIT_UNLIKELY(len == 0 || len > InstDB::kMaxNameSize))
return Inst::kIdNone;
uint32_t prefix = uint32_t(s[0]) - 'a';
if (ASMJIT_UNLIKELY(prefix > 'z' - 'a'))
return Inst::kIdNone;
size_t base = InstDB::instNameIndex[prefix].start;
size_t end = InstDB::instNameIndex[prefix].end;
if (ASMJIT_UNLIKELY(!base))
return Inst::kIdNone;
char nameData[32];
for (size_t lim = end - base; lim != 0; lim >>= 1) {
size_t instId = base + (lim >> 1);
size_t nameSize = Support::decodeInstName(nameData, InstDB::_instNameIndexTable[instId], InstDB::_instNameStringTable);
int result = Support::compareStringViews(s, len, nameData, nameSize);
if (result < 0)
continue;
if (result > 0) {
base = instId + 1;
lim--;
continue;
}
return InstId(instId);
}
return Inst::kIdNone;
}
#endif // !ASMJIT_NO_TEXT
// a64::InstInternal - Validate
// ============================
#ifndef ASMJIT_NO_VALIDATION
ASMJIT_FAVOR_SIZE Error InstInternal::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
// TODO:
DebugUtils::unused(arch, inst, operands, opCount, validationFlags);
return kErrorOk;
}
#endif // !ASMJIT_NO_VALIDATION
// a64::InstInternal - QueryRWInfo
// ===============================
#ifndef ASMJIT_NO_INTROSPECTION
struct InstRWInfoData {
uint8_t rwx[Globals::kMaxOpCount];
};
static const InstRWInfoData instRWInfoData[] = {
#define R uint8_t(OpRWFlags::kRead)
#define W uint8_t(OpRWFlags::kWrite)
#define X uint8_t(OpRWFlags::kRW)
{{ R, R, R, R, R, R }}, // kRWI_R
{{ R, W, R, R, R, R }}, // kRWI_RW
{{ R, X, R, R, R, R }}, // kRWI_RX
{{ R, R, W, R, R, R }}, // kRWI_RRW
{{ R, W, X, R, R, R }}, // kRWI_RWX
{{ W, R, R, R, R, R }}, // kRWI_W
{{ W, R, W, R, R, R }}, // kRWI_WRW
{{ W, R, X, R, R, R }}, // kRWI_WRX
{{ W, R, R, W, R, R }}, // kRWI_WRRW
{{ W, R, R, X, R, R }}, // kRWI_WRRX
{{ W, W, R, R, R, R }}, // kRWI_WW
{{ X, R, R, R, R, R }}, // kRWI_X
{{ X, R, X, R, R, R }}, // kRWI_XRX
{{ X, X, R, R, X, R }}, // kRWI_XXRRX
{{ W, R, R, R, R, R }}, // kRWI_LDn
{{ R, W, R, R, R, R }}, // kRWI_STn
{{ R, R, R, R, R, R }} // kRWI_TODO
#undef R
#undef W
#undef X
};
static const uint8_t elementTypeSize[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
// Unused in Release configuration as the assert is not compiled in.
DebugUtils::unused(arch);
// Only called when `arch` matches X86 family.
ASMJIT_ASSERT(Environment::isFamilyARM(arch));
// Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
return DebugUtils::errored(kErrorInvalidInstruction);
out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount);
out->_rmFeature = 0;
out->_extraReg.reset();
out->_readFlags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
out->_writeFlags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[realId];
const InstRWInfoData& rwInfo = instRWInfoData[instInfo.rwInfoIndex()];
if (instInfo.hasFlag(InstDB::kInstFlagConsecutive) && opCount > 2) {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = i < opCount - 1 ? (OpRWFlags)rwInfo.rwx[0] : (OpRWFlags)rwInfo.rwx[1];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = BaseReg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (i == 0)
op._consecutiveLeadCount = uint8_t(opCount - 1);
else
op.addOpFlags(OpRWFlags::kConsecutive);
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
}
}
}
}
else {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = (OpRWFlags)rwInfo.rwx[i];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = BaseReg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (srcOp.as<Vec>().hasElementIndex()) {
// Only part of the vector is accessed if element index [] is used.
uint32_t elementType = srcOp.as<Vec>().elementType();
uint32_t elementIndex = srcOp.as<Vec>().elementIndex();
uint32_t elementSize = elementTypeSize[elementType];
uint64_t accessMask = uint64_t(Support::lsbMask<uint32_t>(elementSize)) << (elementIndex * elementSize);
op._readByteMask &= accessMask;
op._writeByteMask &= accessMask;
}
// TODO: [ARM] RW info is not finished.
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
}
}
}
}
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
// a64::InstInternal - QueryFeatures
// =================================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstInternal::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
// TODO: [ARM] QueryFeatures not implemented yet.
DebugUtils::unused(arch, inst, operands, opCount, out);
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
// a64::InstInternal - Unit
// ========================
#if defined(ASMJIT_TEST)
UNIT(arm_inst_api_text) {
// TODO:
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,41 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#define ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#include "../core/inst.h"
#include "../core/operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstInternal {
#ifndef ASMJIT_NO_TEXT
Error ASMJIT_CDECL instIdToString(Arch arch, InstId instId, String& output) noexcept;
InstId ASMJIT_CDECL stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
Error ASMJIT_CDECL validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
Error ASMJIT_CDECL queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
Error ASMJIT_CDECL queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,72 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_INCLUDED
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! Instruction database (AArch64).
namespace InstDB {
//! Instruction flags.
enum InstFlags : uint32_t {
//! The instruction provides conditional execution.
kInstFlagCond = 0x00000001u,
//! SIMD instruction that processes elements in pairs.
kInstFlagPair = 0x00000002u,
//! SIMD instruction that does widening (Long).
kInstFlagLong = 0x00000004u,
//! SIMD instruction that does narrowing (Narrow).
kInstFlagNarrow = 0x00000008u,
//! SIMD element access of half-words can only be used with v0..15.
kInstFlagVH0_15 = 0x00000010u,
//! Instruction may consecutive registers if the number of operands is greater than 2.
kInstFlagConsecutive = 0x00000080u
};
//! Instruction information (AArch64).
struct InstInfo {
//! Instruction encoding type.
uint32_t _encoding : 8;
//! Index to data specific to each encoding type.
uint32_t _encodingDataIndex : 8;
uint32_t _reserved : 16;
uint16_t _rwInfoIndex;
uint16_t _flags;
//! \name Accessors
//! \{
ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \}
};
ASMJIT_VARAPI const InstInfo _instInfoTable[];
static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId));
return _instInfoTable[instId];
}
} // {InstDB}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTDB_H_INCLUDED

View File

@@ -1,877 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#include "../core/codeholder.h"
#include "../arm/a64instdb.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstDB {
// a64::InstDB - Constants Used by Instructions
// ============================================
// GP register types supported by base instructions.
static constexpr uint32_t kW = 0x1;
static constexpr uint32_t kX = 0x2;
static constexpr uint32_t kWX = 0x3;
// GP high register IDs supported by the instruction.
static constexpr uint32_t kZR = Gp::kIdZr;
static constexpr uint32_t kSP = Gp::kIdSp;
// a64::InstDB - RWInfo
// ====================
enum RWInfoType : uint32_t {
kRWI_R,
kRWI_RW,
kRWI_RX,
kRWI_RRW,
kRWI_RWX,
kRWI_W,
kRWI_WRW,
kRWI_WRX,
kRWI_WRRW,
kRWI_WRRX,
kRWI_WW,
kRWI_X,
kRWI_XRX,
kRWI_XXRRX,
kRWI_LDn,
kRWI_STn,
kRWI_SpecialStart = kRWI_LDn
};
// a64::InstDB - ElementType
// =========================
enum ElementType : uint8_t {
kET_None = Vec::kElementTypeNone,
kET_B = Vec::kElementTypeB,
kET_H = Vec::kElementTypeH,
kET_S = Vec::kElementTypeS,
kET_D = Vec::kElementTypeD,
kET_2H = Vec::kElementTypeH2,
kET_4B = Vec::kElementTypeB4
};
// a64::InstDB - GpType
// ====================
enum GpType : uint8_t {
kGp_W,
kGp_X,
kGp_X_SP
};
// a64::InstDB - OPSig
// ===================
enum kOpSignature : uint32_t {
kOp_GpW = GpW::kSignature,
kOp_GpX = GpX::kSignature,
kOp_B = VecB::kSignature,
kOp_H = VecH::kSignature,
kOp_S = VecS::kSignature,
kOp_D = VecD::kSignature,
kOp_Q = VecV::kSignature,
kOp_V8B = VecD::kSignature | Vec::kSignatureElementB,
kOp_V4H = VecD::kSignature | Vec::kSignatureElementH,
kOp_V2S = VecD::kSignature | Vec::kSignatureElementS,
kOp_V16B = VecV::kSignature | Vec::kSignatureElementB,
kOp_V8H = VecV::kSignature | Vec::kSignatureElementH,
kOp_V4S = VecV::kSignature | Vec::kSignatureElementS,
kOp_V2D = VecV::kSignature | Vec::kSignatureElementD
};
// a64::InstDB - HFConv
// ====================
enum kHFConv : uint32_t {
//! FP16 version of the instruction is not available.
kHF_N,
//! Doesn't do any change to the opcode.
kHF_0,
kHF_A,
kHF_B,
kHF_C,
kHF_D,
kHF_Count
};
// a64::InstDB - VOType
// ====================
//! Vector operand type combinations used by FP&SIMD instructions.
enum VOType : uint32_t {
kVO_V_B,
kVO_V_BH,
kVO_V_BH_4S,
kVO_V_BHS,
kVO_V_BHS_D2,
kVO_V_HS,
kVO_V_S,
kVO_V_B8H4,
kVO_V_B8H4S2,
kVO_V_B8D1,
kVO_V_H4S2,
kVO_V_B16,
kVO_V_B16H8,
kVO_V_B16H8S4,
kVO_V_B16D2,
kVO_V_H8S4,
kVO_V_S4,
kVO_V_D2,
kVO_SV_BHS,
kVO_SV_B8H4S2,
kVO_SV_HS,
kVO_V_Any,
kVO_SV_Any,
kVO_Count
};
// a64::InstDB - EncodingId
// ========================
// ${EncodingId:Begin}
// ------------------- Automatically generated, do not edit -------------------
enum EncodingId : uint32_t {
kEncodingNone = 0,
kEncodingBaseAddSub,
kEncodingBaseAdr,
kEncodingBaseAtDcIcTlbi,
kEncodingBaseAtomicCasp,
kEncodingBaseAtomicOp,
kEncodingBaseAtomicSt,
kEncodingBaseBfc,
kEncodingBaseBfi,
kEncodingBaseBfm,
kEncodingBaseBfx,
kEncodingBaseBranchCmp,
kEncodingBaseBranchReg,
kEncodingBaseBranchRel,
kEncodingBaseBranchTst,
kEncodingBaseCCmp,
kEncodingBaseCInc,
kEncodingBaseCSel,
kEncodingBaseCSet,
kEncodingBaseCmpCmn,
kEncodingBaseExtend,
kEncodingBaseExtract,
kEncodingBaseLdSt,
kEncodingBaseLdpStp,
kEncodingBaseLdxp,
kEncodingBaseLogical,
kEncodingBaseMov,
kEncodingBaseMovKNZ,
kEncodingBaseMrs,
kEncodingBaseMsr,
kEncodingBaseMvnNeg,
kEncodingBaseOp,
kEncodingBaseOpImm,
kEncodingBaseR,
kEncodingBaseRM_NoImm,
kEncodingBaseRM_SImm10,
kEncodingBaseRM_SImm9,
kEncodingBaseRR,
kEncodingBaseRRII,
kEncodingBaseRRR,
kEncodingBaseRRRR,
kEncodingBaseRev,
kEncodingBaseShift,
kEncodingBaseStx,
kEncodingBaseStxp,
kEncodingBaseSys,
kEncodingBaseTst,
kEncodingFSimdPair,
kEncodingFSimdSV,
kEncodingFSimdVV,
kEncodingFSimdVVV,
kEncodingFSimdVVVV,
kEncodingFSimdVVVe,
kEncodingISimdPair,
kEncodingISimdSV,
kEncodingISimdVV,
kEncodingISimdVVV,
kEncodingISimdVVVI,
kEncodingISimdVVVV,
kEncodingISimdVVVVx,
kEncodingISimdVVVe,
kEncodingISimdVVVx,
kEncodingISimdVVx,
kEncodingISimdWWV,
kEncodingSimdBicOrr,
kEncodingSimdCmp,
kEncodingSimdDot,
kEncodingSimdDup,
kEncodingSimdFcadd,
kEncodingSimdFccmpFccmpe,
kEncodingSimdFcm,
kEncodingSimdFcmla,
kEncodingSimdFcmpFcmpe,
kEncodingSimdFcsel,
kEncodingSimdFcvt,
kEncodingSimdFcvtLN,
kEncodingSimdFcvtSV,
kEncodingSimdFmlal,
kEncodingSimdFmov,
kEncodingSimdIns,
kEncodingSimdLdNStN,
kEncodingSimdLdSt,
kEncodingSimdLdpStp,
kEncodingSimdLdurStur,
kEncodingSimdMov,
kEncodingSimdMoviMvni,
kEncodingSimdShift,
kEncodingSimdShiftES,
kEncodingSimdSm3tt,
kEncodingSimdSmovUmov,
kEncodingSimdSxtlUxtl,
kEncodingSimdTblTbx
};
// ----------------------------------------------------------------------------
// ${EncodingId:End}
// a64::InstDB::EncodingData
// =========================
namespace EncodingData {
#define M_OPCODE(field, bits) \
uint32_t _##field : bits; \
ASMJIT_INLINE_NODEBUG constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
struct BaseOp {
uint32_t opcode;
};
struct BaseOpImm {
uint32_t opcode;
uint16_t immBits;
uint16_t immOffset;
};
struct BaseR {
uint32_t opcode;
uint32_t rType : 8;
uint32_t rHiId : 8;
uint32_t rShift : 8;
};
struct BaseRR {
uint32_t opcode;
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t aShift : 5;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t bShift : 5;
uint32_t uniform : 1;
};
struct BaseRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t dType : 2;
uint32_t dHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRII {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t aImmSize : 6;
uint32_t aImmDiscardLsb : 5;
uint32_t aImmOffset : 5;
uint32_t bImmSize : 6;
uint32_t bImmDiscardLsb : 5;
uint32_t bImmOffset : 5;
};
struct BaseAtDcIcTlbi {
uint32_t immVerifyMask : 14;
uint32_t immVerifyData : 14;
uint32_t mandatoryReg : 1;
};
struct BaseAdcSbc {
uint32_t opcode;
};
struct BaseAddSub {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
};
struct BaseAdr {
M_OPCODE(opcode, 22)
OffsetType offsetType : 8;
};
struct BaseBfm {
uint32_t opcode; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
};
struct BaseCmpCmn {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
};
struct BaseExtend {
M_OPCODE(opcode, 22) // sf|........|N|......|......|Rn|Rd|
uint32_t rType : 2;
uint32_t u : 1;
};
struct BaseLogical {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
uint32_t negateImm : 1 ; // True if this is an operation that must negate IMM.
};
struct BaseMvnNeg {
uint32_t opcode;
};
struct BaseShift {
M_OPCODE(registerOp, 22)
M_OPCODE(immediateOp, 22)
uint32_t ror : 2;
};
struct BaseTst {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
};
struct BaseRM_NoImm {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
};
struct BaseRM_SImm9 {
M_OPCODE(offsetOp, 22)
M_OPCODE(prePostOp, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BaseRM_SImm10 {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BaseLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t uOffsetShift : 3;
uint32_t uAltInstId : 14;
};
struct BaseLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t offsetShift : 3;
};
struct BaseStx {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseLdxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseStxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicOp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t zr : 1;
};
struct BaseAtomicSt {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicCasp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
typedef BaseOp BaseBranchReg;
typedef BaseOp BaseBranchRel;
typedef BaseOp BaseBranchCmp;
typedef BaseOp BaseBranchTst;
typedef BaseOp BaseExtract;
typedef BaseOp BaseBfc;
typedef BaseOp BaseBfi;
typedef BaseOp BaseBfx;
typedef BaseOp BaseCCmp;
typedef BaseOp BaseCInc;
typedef BaseOp BaseCSet;
typedef BaseOp BaseCSel;
typedef BaseOp BaseMovKNZ;
typedef BaseOp BaseMull;
struct FSimdGeneric {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp : 28;
uint32_t _vectorHf : 4;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); }
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
};
typedef FSimdGeneric FSimdVV;
typedef FSimdGeneric FSimdVVV;
typedef FSimdGeneric FSimdVVVV;
struct FSimdSV {
uint32_t opcode;
};
struct FSimdVVVe {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp;
uint32_t _elementOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); };
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t vectorHf() const noexcept { return kHF_C; }
constexpr uint32_t elementScalarOp() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
constexpr uint32_t elementVectorOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFcadd {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode << 10; }
};
struct SimdFcmla {
uint32_t _regularOp;
uint32_t _elementOp;
constexpr uint32_t regularOp() const noexcept { return uint32_t(_regularOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFccmpFccmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcm {
uint32_t _registerOp : 28;
uint32_t _registerHf : 4;
uint32_t _zeroOp : 28;
constexpr bool hasRegisterOp() const noexcept { return _registerOp != 0; }
constexpr bool hasZeroOp() const noexcept { return _zeroOp != 0; }
constexpr uint32_t registerScalarOp() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
constexpr uint32_t registerVectorOp() const noexcept { return uint32_t(_registerOp) << 10; }
constexpr uint32_t registerScalarHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t registerVectorHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t zeroScalarOp() const noexcept { return (uint32_t(_zeroOp) << 10) | (0x5u << 28); }
constexpr uint32_t zeroVectorOp() const noexcept { return (uint32_t(_zeroOp) << 10); }
};
struct SimdFcmpFcmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcvtLN {
uint32_t _opcode : 22;
uint32_t _isCvtxn : 1;
uint32_t _hasScalar : 1;
constexpr uint32_t scalarOp() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
constexpr uint32_t vectorOp() const noexcept { return (uint32_t(_opcode) << 10); }
constexpr uint32_t isCvtxn() const noexcept { return _isCvtxn; }
constexpr uint32_t hasScalar() const noexcept { return _hasScalar; }
};
struct SimdFcvtSV {
uint32_t _vectorIntOp;
uint32_t _vectorFpOp;
uint32_t _generalOp : 31;
uint32_t _isFloatToInt : 1;
constexpr uint32_t scalarIntOp() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorIntOp() const noexcept { return uint32_t(_vectorIntOp) << 10; }
constexpr uint32_t scalarFpOp() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorFpOp() const noexcept { return uint32_t(_vectorFpOp) << 10; }
constexpr uint32_t generalOp() const noexcept { return (uint32_t(_generalOp) << 10); }
constexpr uint32_t isFloatToInt() const noexcept { return _isFloatToInt; }
constexpr uint32_t isFixedPoint() const noexcept { return _vectorFpOp != 0; }
};
struct SimdFmlal {
uint32_t _vectorOp;
uint32_t _elementOp;
uint8_t _optionalQ;
uint8_t tA;
uint8_t tB;
uint8_t tElement;
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return uint32_t(_elementOp) << 10; }
constexpr uint32_t optionalQ() const noexcept { return _optionalQ; }
};
struct FSimdPair {
uint32_t _scalarOp;
uint32_t _vectorOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
};
struct ISimdVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
};
struct ISimdSV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
};
struct ISimdWWV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVe {
uint32_t regularOp : 26; // 22 bits used.
uint32_t regularVecType : 6;
uint32_t elementOp : 26; // 22 bits used.
uint32_t elementVecType : 6;
};
struct ISimdVVVI {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t immSize : 4;
uint32_t immShift : 4;
uint32_t imm64HasOneBitLess : 1;
};
struct ISimdVVVV {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct ISimdVVVVx {
uint32_t opcode;
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
uint32_t op3Signature;
};
struct SimdBicOrr {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp; // 22 bits used.
};
struct SimdCmp {
uint32_t regOp;
uint32_t zeroOp : 22;
uint32_t vecOpType : 6;
};
struct SimdDot {
uint32_t vectorOp; // 22 bits used.
uint32_t elementOp; // 22 bits used.
uint8_t tA; // Element-type of the first operand.
uint8_t tB; // Element-type of the second and third operands.
uint8_t tElement; // Element-type of the element index[] operand.
};
struct SimdMoviMvni {
uint32_t opcode : 31;
uint32_t inverted : 1;
};
struct SimdLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t uAltInstId : 16;
};
struct SimdLdNStN {
uint32_t singleOp;
uint32_t multipleOp : 22;
uint32_t n : 3;
uint32_t replicate : 1;
};
struct SimdLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
};
struct SimdLdurStur {
uint32_t opcode;
};
struct ISimdPair {
uint32_t opcode2; // 22 bits used.
uint32_t opcode3 : 26; // 22 bits used.
uint32_t opType3 : 6;
};
struct SimdShift {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp : 22; // 22 bits used.
uint32_t invertedImm : 1;
uint32_t vecOpType : 6;
};
struct SimdShiftES {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdSm3tt {
uint32_t opcode;
};
struct SimdSmovUmov {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t isSigned : 1;
};
struct SimdSxtlUxtl {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdTblTbx {
uint32_t opcode;
};
#undef M_OPCODE
// ${EncodingDataForward:Begin}
// ------------------- Automatically generated, do not edit -------------------
extern const BaseAddSub baseAddSub[4];
extern const BaseAdr baseAdr[2];
extern const BaseAtDcIcTlbi baseAtDcIcTlbi[4];
extern const BaseAtomicCasp baseAtomicCasp[4];
extern const BaseAtomicOp baseAtomicOp[123];
extern const BaseAtomicSt baseAtomicSt[48];
extern const BaseBfc baseBfc[1];
extern const BaseBfi baseBfi[3];
extern const BaseBfm baseBfm[3];
extern const BaseBfx baseBfx[3];
extern const BaseBranchCmp baseBranchCmp[2];
extern const BaseBranchReg baseBranchReg[3];
extern const BaseBranchRel baseBranchRel[2];
extern const BaseBranchTst baseBranchTst[2];
extern const BaseCCmp baseCCmp[2];
extern const BaseCInc baseCInc[3];
extern const BaseCSel baseCSel[4];
extern const BaseCSet baseCSet[2];
extern const BaseCmpCmn baseCmpCmn[2];
extern const BaseExtend baseExtend[5];
extern const BaseExtract baseExtract[1];
extern const BaseLdSt baseLdSt[9];
extern const BaseLdpStp baseLdpStp[6];
extern const BaseLdxp baseLdxp[2];
extern const BaseLogical baseLogical[8];
extern const BaseMovKNZ baseMovKNZ[3];
extern const BaseMvnNeg baseMvnNeg[3];
extern const BaseOp baseOp[23];
extern const BaseOpImm baseOpImm[14];
extern const BaseR baseR[10];
extern const BaseRM_NoImm baseRM_NoImm[21];
extern const BaseRM_SImm10 baseRM_SImm10[2];
extern const BaseRM_SImm9 baseRM_SImm9[23];
extern const BaseRR baseRR[15];
extern const BaseRRII baseRRII[2];
extern const BaseRRR baseRRR[26];
extern const BaseRRRR baseRRRR[6];
extern const BaseShift baseShift[8];
extern const BaseStx baseStx[3];
extern const BaseStxp baseStxp[2];
extern const BaseTst baseTst[1];
extern const FSimdPair fSimdPair[5];
extern const FSimdSV fSimdSV[4];
extern const FSimdVV fSimdVV[17];
extern const FSimdVVV fSimdVVV[13];
extern const FSimdVVVV fSimdVVVV[4];
extern const FSimdVVVe fSimdVVVe[4];
extern const ISimdPair iSimdPair[1];
extern const ISimdSV iSimdSV[7];
extern const ISimdVV iSimdVV[29];
extern const ISimdVVV iSimdVVV[65];
extern const ISimdVVVI iSimdVVVI[2];
extern const ISimdVVVV iSimdVVVV[2];
extern const ISimdVVVVx iSimdVVVVx[1];
extern const ISimdVVVe iSimdVVVe[25];
extern const ISimdVVVx iSimdVVVx[17];
extern const ISimdVVx iSimdVVx[13];
extern const ISimdWWV iSimdWWV[8];
extern const SimdBicOrr simdBicOrr[2];
extern const SimdCmp simdCmp[7];
extern const SimdDot simdDot[5];
extern const SimdFcadd simdFcadd[1];
extern const SimdFccmpFccmpe simdFccmpFccmpe[2];
extern const SimdFcm simdFcm[5];
extern const SimdFcmla simdFcmla[1];
extern const SimdFcmpFcmpe simdFcmpFcmpe[2];
extern const SimdFcvtLN simdFcvtLN[6];
extern const SimdFcvtSV simdFcvtSV[12];
extern const SimdFmlal simdFmlal[6];
extern const SimdLdNStN simdLdNStN[12];
extern const SimdLdSt simdLdSt[2];
extern const SimdLdpStp simdLdpStp[4];
extern const SimdLdurStur simdLdurStur[2];
extern const SimdMoviMvni simdMoviMvni[2];
extern const SimdShift simdShift[40];
extern const SimdShiftES simdShiftES[2];
extern const SimdSm3tt simdSm3tt[4];
extern const SimdSmovUmov simdSmovUmov[2];
extern const SimdSxtlUxtl simdSxtlUxtl[4];
extern const SimdTblTbx simdTblTbx[2];
// ----------------------------------------------------------------------------
// ${EncodingDataForward:End}
} // {EncodingData}
// a64::InstDB - InstNameIndex
// ===========================
// ${NameLimits:Begin}
// ------------------- Automatically generated, do not edit -------------------
enum : uint32_t { kMaxNameSize = 9 };
// ----------------------------------------------------------------------------
// ${NameLimits:End}
struct InstNameIndex {
uint16_t start;
uint16_t end;
};
// a64::InstDB - Tables
// ====================
#ifndef ASMJIT_NO_TEXT
extern const uint32_t _instNameIndexTable[];
extern const char _instNameStringTable[];
extern const InstNameIndex instNameIndex[26];
#endif // !ASMJIT_NO_TEXT
} // {InstDB}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_A64_ARMINSTDB_H_P_INCLUDED

View File

@@ -1,85 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/misc_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Operand - Tests
// ====================
#if defined(ASMJIT_TEST)
UNIT(a64_operand) {
INFO("Checking if a64::reg(...) matches built-in IDs");
EXPECT_EQ(w(5), w5);
EXPECT_EQ(x(5), x5);
INFO("Checking Gp register properties");
EXPECT_TRUE(Gp().isReg());
EXPECT_TRUE(w0.isReg());
EXPECT_TRUE(x0.isReg());
EXPECT_EQ(w0.id(), 0u);
EXPECT_EQ(x0.id(), 0u);
EXPECT_EQ(wzr.id(), Gp::kIdZr);
EXPECT_EQ(xzr.id(), Gp::kIdZr);
EXPECT_EQ(wsp.id(), Gp::kIdSp);
EXPECT_EQ(sp.id(), Gp::kIdSp);
EXPECT_EQ(w0.size(), 4u);
EXPECT_EQ(x0.size(), 8u);
EXPECT_EQ(w0.type(), RegType::kARM_GpW);
EXPECT_EQ(x0.type(), RegType::kARM_GpX);
EXPECT_EQ(w0.group(), RegGroup::kGp);
EXPECT_EQ(x0.group(), RegGroup::kGp);
INFO("Checking Vec register properties");
EXPECT_EQ(v0.type(), RegType::kARM_VecV);
EXPECT_EQ(d0.type(), RegType::kARM_VecD);
EXPECT_EQ(s0.type(), RegType::kARM_VecS);
EXPECT_EQ(h0.type(), RegType::kARM_VecH);
EXPECT_EQ(b0.type(), RegType::kARM_VecB);
EXPECT_EQ(v0.group(), RegGroup::kVec);
EXPECT_EQ(d0.group(), RegGroup::kVec);
EXPECT_EQ(s0.group(), RegGroup::kVec);
EXPECT_EQ(h0.group(), RegGroup::kVec);
EXPECT_EQ(b0.group(), RegGroup::kVec);
INFO("Checking Vec register element[] access");
Vec vd_1 = v15.d(1);
EXPECT_EQ(vd_1.type(), RegType::kARM_VecV);
EXPECT_EQ(vd_1.group(), RegGroup::kVec);
EXPECT_EQ(vd_1.id(), 15u);
EXPECT_TRUE(vd_1.isVecD2());
EXPECT_EQ(vd_1.elementType(), Vec::kElementTypeD);
EXPECT_TRUE(vd_1.hasElementIndex());
EXPECT_EQ(vd_1.elementIndex(), 1u);
Vec vs_3 = v15.s(3);
EXPECT_EQ(vs_3.type(), RegType::kARM_VecV);
EXPECT_EQ(vs_3.group(), RegGroup::kVec);
EXPECT_EQ(vs_3.id(), 15u);
EXPECT_TRUE(vs_3.isVecS4());
EXPECT_EQ(vs_3.elementType(), Vec::kElementTypeS);
EXPECT_TRUE(vs_3.hasElementIndex());
EXPECT_EQ(vs_3.elementIndex(), 3u);
Vec vb_4 = v15.b4(3);
EXPECT_EQ(vb_4.type(), RegType::kARM_VecV);
EXPECT_EQ(vb_4.group(), RegGroup::kVec);
EXPECT_EQ(vb_4.id(), 15u);
EXPECT_TRUE(vb_4.isVecB4x4());
EXPECT_EQ(vb_4.elementType(), Vec::kElementTypeB4);
EXPECT_TRUE(vb_4.hasElementIndex());
EXPECT_EQ(vb_4.elementIndex(), 3u);
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -1,312 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64OPERAND_H_INCLUDED
#define ASMJIT_ARM_A64OPERAND_H_INCLUDED
#include "../arm/armoperand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
using arm::Reg;
using arm::Mem;
using arm::Gp;
using arm::GpW;
using arm::GpX;
using arm::Vec;
using arm::VecB;
using arm::VecH;
using arm::VecS;
using arm::VecD;
using arm::VecV;
#ifndef _DOXYGEN
namespace regs {
#endif
using namespace ::asmjit::arm::regs;
static constexpr GpW w0 = GpW(0);
static constexpr GpW w1 = GpW(1);
static constexpr GpW w2 = GpW(2);
static constexpr GpW w3 = GpW(3);
static constexpr GpW w4 = GpW(4);
static constexpr GpW w5 = GpW(5);
static constexpr GpW w6 = GpW(6);
static constexpr GpW w7 = GpW(7);
static constexpr GpW w8 = GpW(8);
static constexpr GpW w9 = GpW(9);
static constexpr GpW w10 = GpW(10);
static constexpr GpW w11 = GpW(11);
static constexpr GpW w12 = GpW(12);
static constexpr GpW w13 = GpW(13);
static constexpr GpW w14 = GpW(14);
static constexpr GpW w15 = GpW(15);
static constexpr GpW w16 = GpW(16);
static constexpr GpW w17 = GpW(17);
static constexpr GpW w18 = GpW(18);
static constexpr GpW w19 = GpW(19);
static constexpr GpW w20 = GpW(20);
static constexpr GpW w21 = GpW(21);
static constexpr GpW w22 = GpW(22);
static constexpr GpW w23 = GpW(23);
static constexpr GpW w24 = GpW(24);
static constexpr GpW w25 = GpW(25);
static constexpr GpW w26 = GpW(26);
static constexpr GpW w27 = GpW(27);
static constexpr GpW w28 = GpW(28);
static constexpr GpW w29 = GpW(29);
static constexpr GpW w30 = GpW(30);
static constexpr GpW wzr = GpW(Gp::kIdZr);
static constexpr GpW wsp = GpW(Gp::kIdSp);
static constexpr GpX x0 = GpX(0);
static constexpr GpX x1 = GpX(1);
static constexpr GpX x2 = GpX(2);
static constexpr GpX x3 = GpX(3);
static constexpr GpX x4 = GpX(4);
static constexpr GpX x5 = GpX(5);
static constexpr GpX x6 = GpX(6);
static constexpr GpX x7 = GpX(7);
static constexpr GpX x8 = GpX(8);
static constexpr GpX x9 = GpX(9);
static constexpr GpX x10 = GpX(10);
static constexpr GpX x11 = GpX(11);
static constexpr GpX x12 = GpX(12);
static constexpr GpX x13 = GpX(13);
static constexpr GpX x14 = GpX(14);
static constexpr GpX x15 = GpX(15);
static constexpr GpX x16 = GpX(16);
static constexpr GpX x17 = GpX(17);
static constexpr GpX x18 = GpX(18);
static constexpr GpX x19 = GpX(19);
static constexpr GpX x20 = GpX(20);
static constexpr GpX x21 = GpX(21);
static constexpr GpX x22 = GpX(22);
static constexpr GpX x23 = GpX(23);
static constexpr GpX x24 = GpX(24);
static constexpr GpX x25 = GpX(25);
static constexpr GpX x26 = GpX(26);
static constexpr GpX x27 = GpX(27);
static constexpr GpX x28 = GpX(28);
static constexpr GpX x29 = GpX(29);
static constexpr GpX x30 = GpX(30);
static constexpr GpX xzr = GpX(Gp::kIdZr);
static constexpr GpX sp = GpX(Gp::kIdSp);
static constexpr VecB b0 = VecB(0);
static constexpr VecB b1 = VecB(1);
static constexpr VecB b2 = VecB(2);
static constexpr VecB b3 = VecB(3);
static constexpr VecB b4 = VecB(4);
static constexpr VecB b5 = VecB(5);
static constexpr VecB b6 = VecB(6);
static constexpr VecB b7 = VecB(7);
static constexpr VecB b8 = VecB(8);
static constexpr VecB b9 = VecB(9);
static constexpr VecB b10 = VecB(10);
static constexpr VecB b11 = VecB(11);
static constexpr VecB b12 = VecB(12);
static constexpr VecB b13 = VecB(13);
static constexpr VecB b14 = VecB(14);
static constexpr VecB b15 = VecB(15);
static constexpr VecB b16 = VecB(16);
static constexpr VecB b17 = VecB(17);
static constexpr VecB b18 = VecB(18);
static constexpr VecB b19 = VecB(19);
static constexpr VecB b20 = VecB(20);
static constexpr VecB b21 = VecB(21);
static constexpr VecB b22 = VecB(22);
static constexpr VecB b23 = VecB(23);
static constexpr VecB b24 = VecB(24);
static constexpr VecB b25 = VecB(25);
static constexpr VecB b26 = VecB(26);
static constexpr VecB b27 = VecB(27);
static constexpr VecB b28 = VecB(28);
static constexpr VecB b29 = VecB(29);
static constexpr VecB b30 = VecB(30);
static constexpr VecB b31 = VecB(31);
static constexpr VecH h0 = VecH(0);
static constexpr VecH h1 = VecH(1);
static constexpr VecH h2 = VecH(2);
static constexpr VecH h3 = VecH(3);
static constexpr VecH h4 = VecH(4);
static constexpr VecH h5 = VecH(5);
static constexpr VecH h6 = VecH(6);
static constexpr VecH h7 = VecH(7);
static constexpr VecH h8 = VecH(8);
static constexpr VecH h9 = VecH(9);
static constexpr VecH h10 = VecH(10);
static constexpr VecH h11 = VecH(11);
static constexpr VecH h12 = VecH(12);
static constexpr VecH h13 = VecH(13);
static constexpr VecH h14 = VecH(14);
static constexpr VecH h15 = VecH(15);
static constexpr VecH h16 = VecH(16);
static constexpr VecH h17 = VecH(17);
static constexpr VecH h18 = VecH(18);
static constexpr VecH h19 = VecH(19);
static constexpr VecH h20 = VecH(20);
static constexpr VecH h21 = VecH(21);
static constexpr VecH h22 = VecH(22);
static constexpr VecH h23 = VecH(23);
static constexpr VecH h24 = VecH(24);
static constexpr VecH h25 = VecH(25);
static constexpr VecH h26 = VecH(26);
static constexpr VecH h27 = VecH(27);
static constexpr VecH h28 = VecH(28);
static constexpr VecH h29 = VecH(29);
static constexpr VecH h30 = VecH(30);
static constexpr VecH h31 = VecH(31);
static constexpr VecS s0 = VecS(0);
static constexpr VecS s1 = VecS(1);
static constexpr VecS s2 = VecS(2);
static constexpr VecS s3 = VecS(3);
static constexpr VecS s4 = VecS(4);
static constexpr VecS s5 = VecS(5);
static constexpr VecS s6 = VecS(6);
static constexpr VecS s7 = VecS(7);
static constexpr VecS s8 = VecS(8);
static constexpr VecS s9 = VecS(9);
static constexpr VecS s10 = VecS(10);
static constexpr VecS s11 = VecS(11);
static constexpr VecS s12 = VecS(12);
static constexpr VecS s13 = VecS(13);
static constexpr VecS s14 = VecS(14);
static constexpr VecS s15 = VecS(15);
static constexpr VecS s16 = VecS(16);
static constexpr VecS s17 = VecS(17);
static constexpr VecS s18 = VecS(18);
static constexpr VecS s19 = VecS(19);
static constexpr VecS s20 = VecS(20);
static constexpr VecS s21 = VecS(21);
static constexpr VecS s22 = VecS(22);
static constexpr VecS s23 = VecS(23);
static constexpr VecS s24 = VecS(24);
static constexpr VecS s25 = VecS(25);
static constexpr VecS s26 = VecS(26);
static constexpr VecS s27 = VecS(27);
static constexpr VecS s28 = VecS(28);
static constexpr VecS s29 = VecS(29);
static constexpr VecS s30 = VecS(30);
static constexpr VecS s31 = VecS(31);
static constexpr VecD d0 = VecD(0);
static constexpr VecD d1 = VecD(1);
static constexpr VecD d2 = VecD(2);
static constexpr VecD d3 = VecD(3);
static constexpr VecD d4 = VecD(4);
static constexpr VecD d5 = VecD(5);
static constexpr VecD d6 = VecD(6);
static constexpr VecD d7 = VecD(7);
static constexpr VecD d8 = VecD(8);
static constexpr VecD d9 = VecD(9);
static constexpr VecD d10 = VecD(10);
static constexpr VecD d11 = VecD(11);
static constexpr VecD d12 = VecD(12);
static constexpr VecD d13 = VecD(13);
static constexpr VecD d14 = VecD(14);
static constexpr VecD d15 = VecD(15);
static constexpr VecD d16 = VecD(16);
static constexpr VecD d17 = VecD(17);
static constexpr VecD d18 = VecD(18);
static constexpr VecD d19 = VecD(19);
static constexpr VecD d20 = VecD(20);
static constexpr VecD d21 = VecD(21);
static constexpr VecD d22 = VecD(22);
static constexpr VecD d23 = VecD(23);
static constexpr VecD d24 = VecD(24);
static constexpr VecD d25 = VecD(25);
static constexpr VecD d26 = VecD(26);
static constexpr VecD d27 = VecD(27);
static constexpr VecD d28 = VecD(28);
static constexpr VecD d29 = VecD(29);
static constexpr VecD d30 = VecD(30);
static constexpr VecD d31 = VecD(31);
static constexpr VecV q0 = VecV(0);
static constexpr VecV q1 = VecV(1);
static constexpr VecV q2 = VecV(2);
static constexpr VecV q3 = VecV(3);
static constexpr VecV q4 = VecV(4);
static constexpr VecV q5 = VecV(5);
static constexpr VecV q6 = VecV(6);
static constexpr VecV q7 = VecV(7);
static constexpr VecV q8 = VecV(8);
static constexpr VecV q9 = VecV(9);
static constexpr VecV q10 = VecV(10);
static constexpr VecV q11 = VecV(11);
static constexpr VecV q12 = VecV(12);
static constexpr VecV q13 = VecV(13);
static constexpr VecV q14 = VecV(14);
static constexpr VecV q15 = VecV(15);
static constexpr VecV q16 = VecV(16);
static constexpr VecV q17 = VecV(17);
static constexpr VecV q18 = VecV(18);
static constexpr VecV q19 = VecV(19);
static constexpr VecV q20 = VecV(20);
static constexpr VecV q21 = VecV(21);
static constexpr VecV q22 = VecV(22);
static constexpr VecV q23 = VecV(23);
static constexpr VecV q24 = VecV(24);
static constexpr VecV q25 = VecV(25);
static constexpr VecV q26 = VecV(26);
static constexpr VecV q27 = VecV(27);
static constexpr VecV q28 = VecV(28);
static constexpr VecV q29 = VecV(29);
static constexpr VecV q30 = VecV(30);
static constexpr VecV q31 = VecV(31);
static constexpr VecV v0 = VecV(0);
static constexpr VecV v1 = VecV(1);
static constexpr VecV v2 = VecV(2);
static constexpr VecV v3 = VecV(3);
static constexpr VecV v4 = VecV(4);
static constexpr VecV v5 = VecV(5);
static constexpr VecV v6 = VecV(6);
static constexpr VecV v7 = VecV(7);
static constexpr VecV v8 = VecV(8);
static constexpr VecV v9 = VecV(9);
static constexpr VecV v10 = VecV(10);
static constexpr VecV v11 = VecV(11);
static constexpr VecV v12 = VecV(12);
static constexpr VecV v13 = VecV(13);
static constexpr VecV v14 = VecV(14);
static constexpr VecV v15 = VecV(15);
static constexpr VecV v16 = VecV(16);
static constexpr VecV v17 = VecV(17);
static constexpr VecV v18 = VecV(18);
static constexpr VecV v19 = VecV(19);
static constexpr VecV v20 = VecV(20);
static constexpr VecV v21 = VecV(21);
static constexpr VecV v22 = VecV(22);
static constexpr VecV v23 = VecV(23);
static constexpr VecV v24 = VecV(24);
static constexpr VecV v25 = VecV(25);
static constexpr VecV v26 = VecV(26);
static constexpr VecV v27 = VecV(27);
static constexpr VecV v28 = VecV(28);
static constexpr VecV v29 = VecV(29);
static constexpr VecV v30 = VecV(30);
static constexpr VecV v31 = VecV(31);
#ifndef _DOXYGEN
} // {regs}
// Make `a64::regs` accessible through `a64` namespace as well.
using namespace regs;
#endif
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64OPERAND_H_INCLUDED

View File

@@ -1,852 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../core/cpuinfo.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::ARMRAPass - Helpers
// ========================
// TODO: [ARM] These should be shared with all backends.
ASMJIT_MAYBE_UNUSED
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
ASMJIT_ASSERT(size > 0 && size < 256);
static const uint64_t masks[] = {
0x00000000000000FFu, // 1
0x000000000000FFFFu, // 2
0x00000000FFFFFFFFu, // 4
0xFFFFFFFFFFFFFFFFu, // 8
0x0000000000000000u, // 16
0x0000000000000000u, // 32
0x0000000000000000u, // 64
0x0000000000000000u, // 128
0x0000000000000000u // 256
};
return masks[Support::ctz(size)];
}
static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
0xFFFFFFFFu, // [0] No consecutive.
0x00000000u, // [1] Invalid, never used.
0x7FFFFFFFu, // [2] 2 consecutive registers.
0x3FFFFFFFu, // [3] 3 consecutive registers.
0x1FFFFFFFu // [4] 4 consecutive registers.
};
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
static constexpr RATiedFlags map[] = {
RATiedFlags::kNone,
RATiedFlags::kRead | RATiedFlags::kUse, // kRead
RATiedFlags::kWrite | RATiedFlags::kOut, // kWrite
RATiedFlags::kRW | RATiedFlags::kUse, // kRW
};
return map[uint32_t(rwFlags & OpRWFlags::kRW)];
}
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
return raUseOutFlagsFromRWFlags(flags);
}
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
// a64::RACFGBuilder
// =================
class RACFGBuilder : public RACFGBuilderT<RACFGBuilder> {
public:
Arch _arch;
inline RACFGBuilder(ARMRAPass* pass) noexcept
: RACFGBuilderT<RACFGBuilder>(pass),
_arch(pass->cc()->arch()) {}
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept;
Error onBeforeRet(FuncRetNode* funcRet) noexcept;
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
};
// a64::RACFGBuilder - OnInst
// ==========================
// TODO: [ARM] This is just a workaround...
static InstControlFlow getControlFlowType(InstId instId) noexcept {
switch (BaseInst::extractRealId(instId)) {
case Inst::kIdB:
case Inst::kIdBr:
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL)
return InstControlFlow::kJump;
else
return InstControlFlow::kBranch;
case Inst::kIdBl:
case Inst::kIdBlr:
return InstControlFlow::kCall;
case Inst::kIdCbz:
case Inst::kIdCbnz:
case Inst::kIdTbz:
case Inst::kIdTbnz:
return InstControlFlow::kBranch;
case Inst::kIdRet:
return InstControlFlow::kReturn;
default:
return InstControlFlow::kRegular;
}
}
Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept {
InstRWInfo rwInfo;
if (Inst::isDefinedId(inst->realId())) {
InstId instId = inst->id();
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
ASMJIT_PROPAGATE(InstInternal::queryRWInfo(_arch, inst->baseInst(), opArray, opCount, &rwInfo));
const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
uint32_t singleRegOps = 0;
ib.addInstRWFlags(rwInfo.instFlags());
if (opCount) {
uint32_t consecutiveOffset = 0xFFFFFFFFu;
uint32_t consecutiveParent = Globals::kInvalidId;
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
const OpRWInfo& opRwInfo = rwInfo.operand(i);
if (op.isReg()) {
// Register Operand
// ----------------
const Reg& reg = op.as<Reg>();
RATiedFlags flags = raRegRwFlags(opRwInfo.opFlags());
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
// Use RW instead of Write in case that not the whole register is overwritten. This is important for
// liveness as we cannot kill a register that will be used.
if ((flags & RATiedFlags::kRW) == RATiedFlags::kWrite) {
if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
// Not write-only operation.
flags = (flags & ~RATiedFlags::kOut) | (RATiedFlags::kRead | RATiedFlags::kUse);
}
}
RegGroup group = workReg->group();
RegMask useRegs = _pass->_availableRegs[group];
RegMask outRegs = useRegs;
uint32_t useId = BaseReg::kIdBad;
uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (opRwInfo.consecutiveLeadCount()) {
// There must be a single consecutive register lead, otherwise the RW data is invalid.
if (consecutiveOffset != 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
if (RATiedReg::consecutiveDataFromFlags(flags) != 0)
return DebugUtils::errored(kErrorNotConsecutiveRegs);
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
consecutiveOffset = 0;
RegMask filter = raConsecutiveLeadCountToRegMaskFilter[opRwInfo.consecutiveLeadCount()];
if (Support::test(flags, RATiedFlags::kUse)) {
flags |= RATiedFlags::kUseConsecutive;
useRegs &= filter;
}
else {
flags |= RATiedFlags::kOutConsecutive;
outRegs &= filter;
}
}
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
useId = opRwInfo.physId();
flags |= RATiedFlags::kUseFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
outId = opRwInfo.physId();
flags |= RATiedFlags::kOutFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
// Special cases regarding element access.
if (reg.as<Vec>().hasElementIndex()) {
// Only the first 0..15 registers can be used if the register uses
// element accessor that accesses half-words (h[0..7] elements).
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == Vec::kElementTypeH) {
if (Support::test(flags, RATiedFlags::kUse))
useId &= 0x0000FFFFu;
else
outId &= 0x0000FFFFu;
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
if (singleRegOps == i)
singleRegOps++;
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive))
consecutiveParent = workReg->workId();
}
}
else if (op.isMem()) {
// Memory Operand
// --------------
const Mem& mem = op.as<Mem>();
if (mem.isRegHome()) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
_pass->getOrCreateStackSlot(workReg);
}
else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemBaseRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Base registers have never fixed id on ARM.
const uint32_t useId = BaseReg::kIdBad;
const uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
else
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
if (mem.hasIndexReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemIndexRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Index registers have never fixed id on ARM.
const uint32_t useId = BaseReg::kIdBad;
const uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
else
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
}
}
}
controlType = getControlFlowType(instId);
}
return kErrorOk;
}
// a64::RACFGBuilder - OnInvoke
// ============================
Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
const FuncDetail& fd = invokeNode->detail();
uint32_t argCount = invokeNode->argCount();
cc()->_setCursor(invokeNode->prev());
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
break;
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
continue;
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = Reg::groupOf(arg.regType());
if (regGroup != argGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
else {
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
}
}
else if (op.isImm()) {
if (arg.isReg()) {
BaseReg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, op.as<Imm>(), &reg));
invokeNode->_args[argIndex][valueIndex] = reg;
}
else {
ASMJIT_PROPAGATE(moveImmToStackArg(invokeNode, arg, op.as<Imm>()));
}
}
}
}
cc()->_setCursor(invokeNode);
if (fd.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& ret = fd.ret(valueIndex);
if (!ret)
break;
const Operand& op = invokeNode->ret(valueIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = Reg::groupOf(ret.regType());
if (regGroup != retGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
}
}
// This block has function call(s).
_curBlock->addFlags(RABlockFlags::kHasFuncCalls);
_pass->func()->frame().addAttributes(FuncAttributes::kHasFuncCalls);
_pass->func()->frame().updateCallStackSize(fd.argStackSize());
return kErrorOk;
}
Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept {
uint32_t argCount = invokeNode->argCount();
const FuncDetail& fd = invokeNode->detail();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
continue;
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
continue;
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isIndirect()) {
RegGroup regGroup = workReg->group();
if (regGroup != RegGroup::kGp)
return DebugUtils::errored(kErrorInvalidState);
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
else if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = Reg::groupOf(arg.regType());
if (regGroup == argGroup) {
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
}
}
}
}
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
const FuncValue& ret = fd.ret(retIndex);
if (!ret)
break;
const Operand& op = invokeNode->ret(retIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = Reg::groupOf(ret.regType());
if (regGroup == retGroup) {
ASMJIT_PROPAGATE(ib.addCallRet(workReg, ret.regId()));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
// Setup clobbered registers.
ib._clobbered[0] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(0)]) & ~fd.preservedRegs(RegGroup(0));
ib._clobbered[1] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(1)]) & ~fd.preservedRegs(RegGroup(1));
ib._clobbered[2] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(2)]) & ~fd.preservedRegs(RegGroup(2));
ib._clobbered[3] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(3)]) & ~fd.preservedRegs(RegGroup(3));
return kErrorOk;
}
// a64::RACFGBuilder - MoveImmToRegArg
// ===================================
Error RACFGBuilder::moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept {
DebugUtils::unused(invokeNode);
ASMJIT_ASSERT(arg.isReg());
Imm imm(imm_);
TypeId typeId = TypeId::kVoid;
switch (arg.typeId()) {
case TypeId::kInt8 : typeId = TypeId::kUInt64; imm.signExtend8Bits(); break;
case TypeId::kUInt8 : typeId = TypeId::kUInt64; imm.zeroExtend8Bits(); break;
case TypeId::kInt16 : typeId = TypeId::kUInt64; imm.signExtend16Bits(); break;
case TypeId::kUInt16: typeId = TypeId::kUInt64; imm.zeroExtend16Bits(); break;
case TypeId::kInt32 : typeId = TypeId::kUInt64; imm.signExtend32Bits(); break;
case TypeId::kUInt32: typeId = TypeId::kUInt64; imm.zeroExtend32Bits(); break;
case TypeId::kInt64 : typeId = TypeId::kUInt64; break;
case TypeId::kUInt64: typeId = TypeId::kUInt64; break;
default:
return DebugUtils::errored(kErrorInvalidAssignment);
}
ASMJIT_PROPAGATE(cc()->_newReg(out, typeId, nullptr));
cc()->virtRegById(out->id())->setWeight(BaseRAPass::kCallArgWeight);
return cc()->mov(out->as<Gp>(), imm);
}
// a64::RACFGBuilder - MoveImmToStackArg
// =====================================
Error RACFGBuilder::moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept {
BaseReg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, imm_, &reg));
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
return kErrorOk;
}
// a64::RACFGBuilder - MoveRegToStackArg
// =====================================
Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept {
DebugUtils::unused(invokeNode);
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
if (reg.isGp())
return cc()->str(reg.as<Gp>(), stackPtr);
if (reg.isVec())
return cc()->str(reg.as<Vec>(), stackPtr);
return DebugUtils::errored(kErrorInvalidState);
}
// a64::RACFGBuilder - OnReg
// =========================
Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept {
DebugUtils::unused(funcRet);
return kErrorOk;
}
Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
const FuncDetail& funcDetail = _pass->func()->detail();
const Operand* opArray = funcRet->operands();
uint32_t opCount = funcRet->opCount();
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
if (op.isNone()) continue;
const FuncValue& ret = funcDetail.ret(i);
if (ASMJIT_UNLIKELY(!ret.isReg()))
return DebugUtils::errored(kErrorInvalidAssignment);
if (op.isReg()) {
// Register return value.
const Reg& reg = op.as<Reg>();
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, ret.regId(), 0, 0, BaseReg::kIdBad, 0));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
return kErrorOk;
}
// a64::ARMRAPass - Construction & Destruction
// ===========================================
ARMRAPass::ARMRAPass() noexcept
: BaseRAPass() { _iEmitHelper = &_emitHelper; }
ARMRAPass::~ARMRAPass() noexcept {}
// a64::ARMRAPass - OnInit / OnDone
// ================================
void ARMRAPass::onInit() noexcept {
Arch arch = cc()->arch();
_emitHelper._emitter = _cb;
_archTraits = &ArchTraits::byArch(arch);
_physRegCount.set(RegGroup::kGp, 32);
_physRegCount.set(RegGroup::kVec, 32);
_physRegCount.set(RegGroup::kExtraVirt2, 0);
_physRegCount.set(RegGroup::kExtraVirt3, 0);
_buildPhysIndex();
_availableRegCount = _physRegCount;
_availableRegs[RegGroup::kGp] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kGp));
_availableRegs[RegGroup::kVec] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kVec));
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt2));
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt3));
_scratchRegIndexes[0] = uint8_t(27);
_scratchRegIndexes[1] = uint8_t(28);
// The architecture specific setup makes implicitly all registers available. So
// make unavailable all registers that are special and cannot be used in general.
bool hasFP = _func->frame().hasPreservedFP();
if (hasFP)
makeUnavailable(RegGroup::kGp, Gp::kIdFp);
makeUnavailable(RegGroup::kGp, Gp::kIdSp);
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
_sp = sp;
_fp = x29;
}
void ARMRAPass::onDone() noexcept {}
// a64::ARMRAPass - BuildCFG
// =========================
Error ARMRAPass::buildCFG() noexcept {
return RACFGBuilder(this).run();
}
// a64::ARMRAPass - Rewrite
// ========================
ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
uint32_t virtCount = cc()->_vRegArray.size();
BaseNode* node = first;
while (node != stop) {
BaseNode* next = node->next();
if (node->isInst()) {
InstNode* inst = node->as<InstNode>();
RAInst* raInst = node->passData<RAInst>();
Operand* operands = inst->operands();
uint32_t opCount = inst->opCount();
uint32_t i;
// Rewrite virtual registers into physical registers.
if (raInst) {
// If the instruction contains pass data (raInst) then it was a subject
// for register allocation and must be rewritten to use physical regs.
RATiedReg* tiedRegs = raInst->tiedRegs();
uint32_t tiedCount = raInst->tiedCount();
for (i = 0; i < tiedCount; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
uint32_t useId = tiedReg->useId();
while (useIt.hasNext())
inst->rewriteIdAtIndex(useIt.next(), useId);
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
uint32_t outId = tiedReg->outId();
while (outIt.hasNext())
inst->rewriteIdAtIndex(outIt.next(), outId);
}
// This data is allocated by Zone passed to `runOnFunction()`, which
// will be reset after the RA pass finishes. So reset this data to
// prevent having a dead pointer after the RA pass is complete.
node->resetPassData();
if (ASMJIT_UNLIKELY(node->type() != NodeType::kInst)) {
// FuncRet terminates the flow, it must either be removed if the exit
// label is next to it (optimization) or patched to an architecture
// dependent jump instruction that jumps to the function's exit before
// the epilog.
if (node->type() == NodeType::kFuncRet) {
RABlock* block = raInst->block();
if (!isNextTo(node, _func->exitNode())) {
cc()->_setCursor(node->prev());
ASMJIT_PROPAGATE(emitJump(_func->exitNode()->label()));
}
BaseNode* prev = node->prev();
cc()->removeNode(node);
block->setLast(prev);
}
}
}
// Rewrite stack slot addresses.
for (i = 0; i < opCount; i++) {
Operand& op = operands[i];
if (op.isMem()) {
BaseMem& mem = op.as<BaseMem>();
if (mem.isRegHome()) {
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
return DebugUtils::errored(kErrorInvalidVirtId);
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
RAWorkReg* workReg = virtReg->workReg();
ASMJIT_ASSERT(workReg != nullptr);
RAStackSlot* slot = workReg->stackSlot();
int32_t offset = slot->offset();
mem._setBase(_sp.type(), slot->baseRegId());
mem.clearRegHome();
mem.addOffsetLo32(offset);
}
}
}
// Rewrite `loadAddressOf()` construct.
if (inst->realId() == Inst::kIdAdr && inst->opCount() == 2 && inst->op(1).isMem()) {
BaseMem mem = inst->op(1).as<BaseMem>();
int64_t offset = mem.offset();
if (!mem.hasBaseOrIndex()) {
inst->setId(Inst::kIdMov);
inst->setOp(1, Imm(offset));
}
else {
if (mem.hasIndex())
return DebugUtils::errored(kErrorInvalidAddressIndex);
GpX dst(inst->op(0).as<Gp>().id());
GpX base(mem.baseId());
InstId arithInstId = offset < 0 ? Inst::kIdSub : Inst::kIdAdd;
uint64_t absOffset = offset < 0 ? Support::neg(uint64_t(offset)) : uint64_t(offset);
inst->setId(arithInstId);
inst->setOpCount(3);
inst->setOp(1, base);
inst->setOp(2, Imm(absOffset));
// Use two operations if the offset cannot be encoded with ADD/SUB.
if (absOffset > 0xFFFu && (absOffset & ~uint64_t(0xFFF000u)) != 0) {
if (absOffset <= 0xFFFFFFu) {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(arithInstId, dst, base, Imm(absOffset & 0xFFFu)));
inst->setOp(1, dst);
inst->setOp(2, Imm(absOffset & 0xFFF000u));
}
else {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, inst->op(0), Imm(absOffset)));
inst->setOp(1, base);
inst->setOp(2, dst);
}
}
}
}
}
node = next;
}
return kErrorOk;
}
// a64::ARMRAPass - Prolog & Epilog
// ================================
Error ARMRAPass::updateStackFrame() noexcept {
if (_func->frame().hasFuncCalls())
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
return BaseRAPass::updateStackFrame();
}
// a64::ARMRAPass - OnEmit
// =======================
Error ARMRAPass::emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseReg dst(wReg->signature(), dstPhysId);
BaseReg src(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<MOVE> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dst, src, wReg->typeId(), comment);
}
Error ARMRAPass::emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
DebugUtils::unused(aWorkId, aPhysId, bWorkId, bPhysId);
return DebugUtils::errored(kErrorInvalidState);
}
Error ARMRAPass::emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseReg dstReg(wReg->signature(), dstPhysId);
BaseMem srcMem(workRegAsMem(wReg));
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<LOAD> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstReg, srcMem, wReg->typeId(), comment);
}
Error ARMRAPass::emitSave(uint32_t workId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseMem dstMem(workRegAsMem(wReg));
BaseReg srcReg(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<SAVE> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstMem, srcReg, wReg->typeId(), comment);
}
Error ARMRAPass::emitJump(const Label& label) noexcept {
return cc()->b(label);
}
Error ARMRAPass::emitPreCall(InvokeNode* invokeNode) noexcept {
DebugUtils::unused(invokeNode);
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

View File

@@ -1,105 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#define ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/rabuilders_p.h"
#include "../core/rapass_p.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! ARM register allocation pass.
//!
//! Takes care of generating function prologs and epilogs, and also performs
//! register allocation.
class ARMRAPass : public BaseRAPass {
public:
ASMJIT_NONCOPYABLE(ARMRAPass)
typedef BaseRAPass Base;
EmitHelper _emitHelper;
//! \name Construction & Destruction
//! \{
ARMRAPass() noexcept;
~ARMRAPass() noexcept override;
//! \}
//! \name Accessors
//! \{
//! Returns the compiler casted to `arm::Compiler`.
ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
//! Returns emit helper.
ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; }
//! \}
//! \name Events
//! \{
void onInit() noexcept override;
void onDone() noexcept override;
//! \}
//! \name CFG
//! \{
Error buildCFG() noexcept override;
//! \}
//! \name Rewrite
//! \{
Error _rewrite(BaseNode* first, BaseNode* stop) noexcept override;
//! \}
//! \name Prolog & Epilog
//! \{
Error updateStackFrame() noexcept override;
//! \}
//! \name Emit Helpers
//! \{
Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
Error emitJump(const Label& label) noexcept override;
Error emitPreCall(InvokeNode* invokeNode) noexcept override;
//! \}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_A64RAPASS_P_H_INCLUDED

View File

@@ -1,471 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/armformatter_p.h"
#include "../arm/armoperand.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
// arm::FormatterInternal - Format Feature
// =======================================
Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept {
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "sFeature", "strip": "k"}@
static const char sFeatureString[] =
"None\0"
"ARMv6\0"
"ARMv7\0"
"ARMv8a\0"
"THUMB\0"
"THUMBv2\0"
"AES\0"
"AFP\0"
"ASIMD\0"
"BF16\0"
"BTI\0"
"CCIDX\0"
"CHK\0"
"CLRBHB\0"
"CPUID\0"
"CRC32\0"
"CSSC\0"
"D128\0"
"DGH\0"
"DIT\0"
"DOTPROD\0"
"DPB\0"
"DPB2\0"
"EBF16\0"
"ECV\0"
"EDSP\0"
"FCMA\0"
"FGT\0"
"FGT2\0"
"FHM\0"
"FLAGM\0"
"FLAGM2\0"
"FMAC\0"
"FP\0"
"FP16\0"
"FP16CONV\0"
"FRINTTS\0"
"GCS\0"
"HBC\0"
"HCX\0"
"I8MM\0"
"IDIVA\0"
"IDIVT\0"
"JSCVT\0"
"LOR\0"
"LRCPC\0"
"LRCPC2\0"
"LRCPC3\0"
"LS64\0"
"LS64_ACCDATA\0"
"LS64_V\0"
"LSE\0"
"LSE128\0"
"LSE2\0"
"MOPS\0"
"MPAM\0"
"MTE\0"
"MTE2\0"
"MTE3\0"
"MTE4\0"
"NMI\0"
"NV\0"
"NV2\0"
"PAN\0"
"PAN2\0"
"PAN3\0"
"PAUTH\0"
"PMU\0"
"PMULL\0"
"PRFMSLC\0"
"RAS\0"
"RAS1_1\0"
"RAS2\0"
"RDM\0"
"RME\0"
"RNG\0"
"RNG_TRAP\0"
"RPRES\0"
"RPRFM\0"
"SB\0"
"SHA1\0"
"SHA256\0"
"SHA3\0"
"SHA512\0"
"SM3\0"
"SM4\0"
"SME\0"
"SME2\0"
"SME2_1\0"
"SME_B16B16\0"
"SME_B16F32\0"
"SME_BI32I32\0"
"SME_F16F16\0"
"SME_F16F32\0"
"SME_F32F32\0"
"SME_F64F64\0"
"SME_FA64\0"
"SME_I16I32\0"
"SME_I16I64\0"
"SME_I8I32\0"
"SPECRES\0"
"SPECRES2\0"
"SSBS\0"
"SSBS2\0"
"SVE\0"
"SVE2\0"
"SVE2_1\0"
"SVE_AES\0"
"SVE_B16B16\0"
"SVE_BF16\0"
"SVE_BITPERM\0"
"SVE_EBF16\0"
"SVE_F32MM\0"
"SVE_F64MM\0"
"SVE_I8MM\0"
"SVE_PMULL128\0"
"SVE_SHA3\0"
"SVE_SM4\0"
"SYSINSTR128\0"
"SYSREG128\0"
"THE\0"
"TME\0"
"TRF\0"
"UAO\0"
"VFP_D32\0"
"VHE\0"
"WFXT\0"
"XS\0"
"<Unknown>\0";
static const uint16_t sFeatureIndex[] = {
0, 5, 11, 17, 24, 30, 38, 42, 46, 52, 57, 61, 67, 71, 78, 84, 90, 95, 100,
104, 108, 116, 120, 125, 131, 135, 140, 145, 149, 154, 158, 164, 171, 176,
179, 184, 193, 201, 205, 209, 213, 218, 224, 230, 236, 240, 246, 253, 260,
265, 278, 285, 289, 296, 301, 306, 311, 315, 320, 325, 330, 334, 337, 341,
345, 350, 355, 361, 365, 371, 379, 383, 390, 395, 399, 403, 407, 416, 422,
428, 431, 436, 443, 448, 455, 459, 463, 467, 472, 479, 490, 501, 513, 524,
535, 546, 557, 566, 577, 588, 598, 606, 615, 620, 626, 630, 635, 642, 650,
661, 670, 682, 692, 702, 712, 721, 734, 743, 751, 763, 773, 777, 781, 785,
789, 797, 801, 806, 809
};
// @EnumStringEnd@
return sb.append(sFeatureString + sFeatureIndex[Support::min<uint32_t>(featureId, uint32_t(CpuFeatures::ARM::kMaxValue) + 1)]);
}
// arm::FormatterInternal - Format Constants
// =========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatCondCode(String& sb, CondCode cc) noexcept {
static const char condCodeData[] =
"al\0" "na\0"
"eq\0" "ne\0"
"cs\0" "cc\0" "mi\0" "pl\0" "vs\0" "vc\0"
"hi\0" "ls\0" "ge\0" "lt\0" "gt\0" "le\0"
"<Unknown>";
return sb.append(condCodeData + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shiftOp) noexcept {
const char* str = "<Unknown>";
switch (shiftOp) {
case ShiftOp::kLSL: str = "lsl"; break;
case ShiftOp::kLSR: str = "lsr"; break;
case ShiftOp::kASR: str = "asr"; break;
case ShiftOp::kROR: str = "ror"; break;
case ShiftOp::kRRX: str = "rrx"; break;
case ShiftOp::kMSL: str = "msl"; break;
case ShiftOp::kUXTB: str = "uxtb"; break;
case ShiftOp::kUXTH: str = "uxth"; break;
case ShiftOp::kUXTW: str = "uxtw"; break;
case ShiftOp::kUXTX: str = "uxtx"; break;
case ShiftOp::kSXTB: str = "sxtb"; break;
case ShiftOp::kSXTH: str = "sxth"; break;
case ShiftOp::kSXTW: str = "sxtw"; break;
case ShiftOp::kSXTX: str = "sxtx"; break;
}
return sb.append(str);
}
// arm::FormatterInternal - Format Register
// ========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType,
uint32_t elementIndex) noexcept {
DebugUtils::unused(flags);
DebugUtils::unused(arch);
static const char bhsdq[] = "bhsdq";
bool virtRegFormatted = false;
#ifndef ASMJIT_NO_COMPILER
if (Operand::isVirtId(rId)) {
if (emitter && emitter->isCompiler()) {
const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
if (cc->isVirtIdValid(rId)) {
VirtReg* vReg = cc->virtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->name();
if (name && name[0] != '\0')
ASMJIT_PROPAGATE(sb.append(name));
else
ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
virtRegFormatted = true;
}
}
}
#else
DebugUtils::unused(emitter, flags);
#endif
if (!virtRegFormatted) {
char letter = '\0';
switch (regType) {
case RegType::kARM_VecB:
case RegType::kARM_VecH:
case RegType::kARM_VecS:
case RegType::kARM_VecD:
case RegType::kARM_VecV:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)];
if (elementType)
letter = 'v';
break;
case RegType::kARM_GpW:
if (Environment::is64Bit(arch)) {
letter = 'w';
if (rId == Gp::kIdZr)
return sb.append("wzr", 3);
if (rId == Gp::kIdSp)
return sb.append("wsp", 3);
}
else {
letter = 'r';
if (rId == 13)
return sb.append("sp", 2);
if (rId == 14)
return sb.append("lr", 2);
if (rId == 15)
return sb.append("pc", 2);
}
break;
case RegType::kARM_GpX:
if (Environment::is64Bit(arch)) {
if (rId == Gp::kIdZr)
return sb.append("xzr", 3);
if (rId == Gp::kIdSp)
return sb.append("sp", 2);
letter = 'x';
break;
}
// X registers are undefined in 32-bit mode.
ASMJIT_FALLTHROUGH;
default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?$u", uint32_t(regType), rId));
break;
}
if (letter)
ASMJIT_PROPAGATE(sb.appendFormat("%c%u", letter, rId));
}
if (elementType) {
char elementLetter = '\0';
uint32_t elementCount = 0;
switch (elementType) {
case Vec::kElementTypeB:
elementLetter = 'b';
elementCount = 16;
break;
case Vec::kElementTypeH:
elementLetter = 'h';
elementCount = 8;
break;
case Vec::kElementTypeS:
elementLetter = 's';
elementCount = 4;
break;
case Vec::kElementTypeD:
elementLetter = 'd';
elementCount = 2;
break;
default:
return sb.append(".<Unknown>");
}
if (elementLetter) {
if (elementIndex == 0xFFFFFFFFu) {
if (regType == RegType::kARM_VecD)
elementCount /= 2u;
ASMJIT_PROPAGATE(sb.appendFormat(".%u%c", elementCount, elementLetter));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat(".%c[%u]", elementLetter, elementIndex));
}
}
}
else if (elementIndex != 0xFFFFFFFFu) {
// This should only be used by AArch32 - AArch64 requires an additional elementType in index[].
ASMJIT_PROPAGATE(sb.appendFormat("[%u]", elementIndex));
}
return kErrorOk;
}
// a64::FormatterInternal - Format Operand
// =======================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
if (op.isReg()) {
const BaseReg& reg = op.as<BaseReg>();
uint32_t elementType = op.as<Vec>().elementType();
uint32_t elementIndex = op.as<Vec>().elementIndex();
if (!op.as<Vec>().hasElementIndex())
elementIndex = 0xFFFFFFFFu;
return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex);
}
if (op.isMem()) {
const Mem& m = op.as<Mem>();
ASMJIT_PROPAGATE(sb.append('['));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Formatter::formatLabel(sb, flags, emitter, m.baseId()));
}
else {
FormatFlags modifiedFlags = flags;
if (m.isRegHome()) {
ASMJIT_PROPAGATE(sb.append('&'));
modifiedFlags &= ~FormatFlags::kRegCasts;
}
ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, arch, m.baseType(), m.baseId()));
}
}
else {
// ARM really requires base.
if (m.hasIndex() || m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append("<None>"));
}
}
// The post index makes it look like there was another operand, but it's
// still the part of AsmJit's `arm::Mem` operand so it's consistent with
// other architectures.
if (m.isPostIndex())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.hasIndex()) {
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, m.indexType(), m.indexId()));
}
if (m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append(", "));
int64_t off = int64_t(m.offset());
uint32_t base = 10;
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9)
base = 16;
if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base));
}
else {
ASMJIT_PROPAGATE(sb.append("0x"));
ASMJIT_PROPAGATE(sb.appendUInt(uint64_t(off), base));
}
}
if (m.hasShift()) {
ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost())
ASMJIT_PROPAGATE(formatShiftOp(sb, (ShiftOp)m.predicate()));
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
}
if (!m.isPostIndex())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.isPreIndex())
ASMJIT_PROPAGATE(sb.append('!'));
return kErrorOk;
}
if (op.isImm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.value();
if (Support::test(flags, FormatFlags::kHexImms) && uint64_t(val) > 9) {
ASMJIT_PROPAGATE(sb.append("0x"));
return sb.appendUInt(uint64_t(val), 16);
}
else {
return sb.appendInt(val, 10);
}
}
if (op.isLabel()) {
return Formatter::formatLabel(sb, flags, emitter, op.id());
}
return sb.append("<None>");
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING

View File

@@ -1,61 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \cond INTERNAL
//! \addtogroup asmjit_arm
//! \{
namespace FormatterInternal {
Error ASMJIT_CDECL formatFeature(
String& sb,
uint32_t featureId) noexcept;
Error ASMJIT_CDECL formatCondCode(
String& sb,
CondCode cc) noexcept;
Error ASMJIT_CDECL formatShiftOp(
String& sb,
ShiftOp shiftOp) noexcept;
Error ASMJIT_CDECL formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType = 0,
uint32_t elementIndex = 0xFFFFFFFF) noexcept;
Error ASMJIT_CDECL formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED

View File

@@ -1,21 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#define ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#include "../core/archcommons.h"
#include "../core/inst.h"
//! \namespace asmjit::arm
//! \ingroup asmjit_arm
//!
//! API shared between AArch32 & AArch64 backends.
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_ARMGLOBALS_H_INCLUDED

View File

@@ -1,626 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMOPERAND_H_INCLUDED
#define ASMJIT_ARM_ARMOPERAND_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
class Reg;
class Mem;
class Gp;
class GpW;
class GpX;
class Vec;
class VecB;
class VecH;
class VecS;
class VecD;
class VecV;
//! Register traits (ARM/AArch64).
//!
//! Register traits contains information about a particular register type. It's used by asmjit to setup register
//! information on-the-fly and to populate tables that contain register information (this way it's possible to
//! change register types and groups without having to reorder these tables).
template<RegType kRegType>
struct RegTraits : public BaseRegTraits {};
//! \cond
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
// | Reg | Reg-Type | Reg-Group |Sz |Cnt| TypeId |
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
ASMJIT_DEFINE_REG_TRAITS(GpW , RegType::kARM_GpW , RegGroup::kGp , 4 , 32, TypeId::kInt32 );
ASMJIT_DEFINE_REG_TRAITS(GpX , RegType::kARM_GpX , RegGroup::kGp , 8 , 32, TypeId::kInt64 );
ASMJIT_DEFINE_REG_TRAITS(VecB , RegType::kARM_VecB , RegGroup::kVec , 1 , 32, TypeId::kVoid );
ASMJIT_DEFINE_REG_TRAITS(VecH , RegType::kARM_VecH , RegGroup::kVec , 2 , 32, TypeId::kVoid );
ASMJIT_DEFINE_REG_TRAITS(VecS , RegType::kARM_VecS , RegGroup::kVec , 4 , 32, TypeId::kInt32x1 );
ASMJIT_DEFINE_REG_TRAITS(VecD , RegType::kARM_VecD , RegGroup::kVec , 8 , 32, TypeId::kInt32x2 );
ASMJIT_DEFINE_REG_TRAITS(VecV , RegType::kARM_VecV , RegGroup::kVec , 16, 32, TypeId::kInt32x4 );
//! \endcond
//! Register (ARM).
class Reg : public BaseReg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
//! Gets whether the register is a `R|W` register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is an `X` register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
//! Gets whether the register is a VEC-B register (8-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a VEC-H register (16-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a VEC-S register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a VEC-D register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a VEC-Q register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
//! Gets whether the register is a VEC-V register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
template<RegType kRegType>
ASMJIT_INLINE_NODEBUG void setRegT(uint32_t id) noexcept {
setSignature(RegTraits<kRegType>::kSignature);
setId(id);
}
ASMJIT_INLINE_NODEBUG void setTypeAndId(RegType type, uint32_t id) noexcept {
setSignature(signatureOf(type));
setId(id);
}
static ASMJIT_INLINE_NODEBUG RegGroup groupOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToGroup(type); }
static ASMJIT_INLINE_NODEBUG TypeId typeIdOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToTypeId(type); }
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfT() noexcept { return RegTraits<kRegType>::kSignature; }
static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); }
static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); }
static ASMJIT_INLINE_NODEBUG bool isVecB(const Operand_& op) noexcept { return op.as<Reg>().isVecB(); }
static ASMJIT_INLINE_NODEBUG bool isVecH(const Operand_& op) noexcept { return op.as<Reg>().isVecH(); }
static ASMJIT_INLINE_NODEBUG bool isVecS(const Operand_& op) noexcept { return op.as<Reg>().isVecS(); }
static ASMJIT_INLINE_NODEBUG bool isVecD(const Operand_& op) noexcept { return op.as<Reg>().isVecD(); }
static ASMJIT_INLINE_NODEBUG bool isVecQ(const Operand_& op) noexcept { return op.as<Reg>().isVecQ(); }
static ASMJIT_INLINE_NODEBUG bool isVecV(const Operand_& op) noexcept { return op.as<Reg>().isVecV(); }
static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGpW(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGpX(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecB(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecB(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecH(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecH(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecS(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecS(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecD(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecD(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecQ(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecQ(op)) & unsigned(op.id() == id)); }
static ASMJIT_INLINE_NODEBUG bool isVecV(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecV(op)) & unsigned(op.id() == id)); }
};
//! General purpose register (ARM).
class Gp : public Reg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Gp, Reg)
//! Special register id.
enum Id : uint32_t {
//! Register that depends on OS, could be used as TLS offset.
kIdOs = 18,
//! Frame pointer.
kIdFp = 29,
//! Link register.
kIdLr = 30,
//! Stack register id.
kIdSp = 31,
//! Zero register id.
//!
//! Although zero register has the same id as stack register it has a special treatment, because we need to be
//! able to distinguish between these two at API level. Some intructions were designed to be used with SP and
//! some other with ZR - so we need a way to distinguish these two to make sure we emit the right thing.
//!
//! The number 63 is not random, when you perform `id & 31` you would always get 31 for both SP and ZR inputs,
//! which is the identifier used by AArch64 ISA to encode either SP or ZR depending on the instruction.
kIdZr = 63
};
ASMJIT_INLINE_NODEBUG constexpr bool isZR() const noexcept { return id() == kIdZr; }
ASMJIT_INLINE_NODEBUG constexpr bool isSP() const noexcept { return id() == kIdSp; }
//! Cast this register to a 32-bit R|W.
ASMJIT_INLINE_NODEBUG GpW w() const noexcept;
//! Cast this register to a 64-bit X.
ASMJIT_INLINE_NODEBUG GpX x() const noexcept;
};
//! Vector register (ARM).
class Vec : public Reg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Vec, Reg)
//! Additional signature bits used by arm::Vec.
enum AdditionalBits : uint32_t {
// Register element type (3 bits).
// |........|........|.XXX....|........|
kSignatureRegElementTypeShift = 12,
kSignatureRegElementTypeMask = 0x07 << kSignatureRegElementTypeShift,
// Register has element index (1 bit).
// |........|........|X.......|........|
kSignatureRegElementFlagShift = 15,
kSignatureRegElementFlagMask = 0x01 << kSignatureRegElementFlagShift,
// Register element index (4 bits).
// |........|....XXXX|........|........|
kSignatureRegElementIndexShift = 16,
kSignatureRegElementIndexMask = 0x0F << kSignatureRegElementIndexShift
};
//! Element type (AArch64 only).
enum ElementType : uint32_t {
//! No element type specified.
kElementTypeNone = 0,
//! Byte elements (B8 or B16).
kElementTypeB,
//! Halfword elements (H4 or H8).
kElementTypeH,
//! Singleword elements (S2 or S4).
kElementTypeS,
//! Doubleword elements (D2).
kElementTypeD,
//! Byte elements grouped by 4 bytes (B4).
//!
//! \note This element-type is only used by few instructions.
kElementTypeB4,
//! Halfword elements grouped by 2 halfwords (H2).
//!
//! \note This element-type is only used by few instructions.
kElementTypeH2,
//! Count of element types.
kElementTypeCount
};
//! \cond
//! Shortcuts.
enum SignatureReg : uint32_t {
kSignatureElementB = kElementTypeB << kSignatureRegElementTypeShift,
kSignatureElementH = kElementTypeH << kSignatureRegElementTypeShift,
kSignatureElementS = kElementTypeS << kSignatureRegElementTypeShift,
kSignatureElementD = kElementTypeD << kSignatureRegElementTypeShift,
kSignatureElementB4 = kElementTypeB4 << kSignatureRegElementTypeShift,
kSignatureElementH2 = kElementTypeH2 << kSignatureRegElementTypeShift
};
//! \endcond
//! Returns whether the register has associated an element type.
ASMJIT_INLINE_NODEBUG constexpr bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
//! Returns whether the register has element index (it's an element index access).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
//! Returns whether the reggister has element type or element index (or both).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
//! Returns element type of the register.
ASMJIT_INLINE_NODEBUG constexpr uint32_t elementType() const noexcept { return _signature.getField<kSignatureRegElementTypeMask>(); }
//! Sets element type of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementType(uint32_t elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(elementType); }
//! Resets element type to none.
ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
//! Returns element index of the register.
ASMJIT_INLINE_NODEBUG constexpr uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
//! Sets element index of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementIndex(uint32_t elementIndex) noexcept {
_signature |= kSignatureRegElementFlagMask;
_signature.setField<kSignatureRegElementIndexMask>(elementIndex);
}
//! Resets element index of the register.
ASMJIT_INLINE_NODEBUG void resetElementIndex() noexcept {
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
}
ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2); }
//! Creates a cloned register with element access.
ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
}
//! Cast this register to an 8-bit B register (AArch64 only).
ASMJIT_INLINE_NODEBUG VecB b() const noexcept;
//! Cast this register to a 16-bit H register (AArch64 only).
ASMJIT_INLINE_NODEBUG VecH h() const noexcept;
//! Cast this register to a 32-bit S register.
ASMJIT_INLINE_NODEBUG VecS s() const noexcept;
//! Cast this register to a 64-bit D register.
ASMJIT_INLINE_NODEBUG VecD d() const noexcept;
//! Cast this register to a 128-bit Q register.
ASMJIT_INLINE_NODEBUG VecV q() const noexcept;
//! Cast this register to a 128-bit V register.
ASMJIT_INLINE_NODEBUG VecV v() const noexcept;
//! Cast this register to a 128-bit V.B[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV b(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV h(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.S[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV s(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.D[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV d(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H2[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV h2(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.B4[elementIndex] register.
ASMJIT_INLINE_NODEBUG VecV b4(uint32_t elementIndex) const noexcept;
//! Cast this register to V.8B.
ASMJIT_INLINE_NODEBUG VecD b8() const noexcept;
//! Cast this register to V.16B.
ASMJIT_INLINE_NODEBUG VecV b16() const noexcept;
//! Cast this register to V.2H.
ASMJIT_INLINE_NODEBUG VecS h2() const noexcept;
//! Cast this register to V.4H.
ASMJIT_INLINE_NODEBUG VecD h4() const noexcept;
//! Cast this register to V.8H.
ASMJIT_INLINE_NODEBUG VecV h8() const noexcept;
//! Cast this register to V.2S.
ASMJIT_INLINE_NODEBUG VecD s2() const noexcept;
//! Cast this register to V.4S.
ASMJIT_INLINE_NODEBUG VecV s4() const noexcept;
//! Cast this register to V.2D.
ASMJIT_INLINE_NODEBUG VecV d2() const noexcept;
static ASMJIT_INLINE_NODEBUG constexpr OperandSignature _makeElementAccessSignature(uint32_t elementType, uint32_t elementIndex) noexcept {
return OperandSignature{
uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) |
uint32_t(kSignatureRegElementFlagMask) |
uint32_t(elementType << kSignatureRegElementTypeShift) |
uint32_t(elementIndex << kSignatureRegElementIndexShift)};
}
};
//! 32-bit GPW (AArch64) and/or GPR (ARM/AArch32) register.
class GpW : public Gp { ASMJIT_DEFINE_FINAL_REG(GpW, Gp, RegTraits<RegType::kARM_GpW>) };
//! 64-bit GPX (AArch64) register.
class GpX : public Gp { ASMJIT_DEFINE_FINAL_REG(GpX, Gp, RegTraits<RegType::kARM_GpX>) };
//! 8-bit view (S) of VFP/SIMD register.
class VecB : public Vec { ASMJIT_DEFINE_FINAL_REG(VecB, Vec, RegTraits<RegType::kARM_VecB>) };
//! 16-bit view (S) of VFP/SIMD register.
class VecH : public Vec { ASMJIT_DEFINE_FINAL_REG(VecH, Vec, RegTraits<RegType::kARM_VecH>) };
//! 32-bit view (S) of VFP/SIMD register.
class VecS : public Vec { ASMJIT_DEFINE_FINAL_REG(VecS, Vec, RegTraits<RegType::kARM_VecS>) };
//! 64-bit view (D) of VFP/SIMD register.
class VecD : public Vec { ASMJIT_DEFINE_FINAL_REG(VecD, Vec, RegTraits<RegType::kARM_VecD>) };
//! 128-bit vector register (Q or V).
class VecV : public Vec { ASMJIT_DEFINE_FINAL_REG(VecV, Vec, RegTraits<RegType::kARM_VecV>) };
ASMJIT_INLINE_NODEBUG GpW Gp::w() const noexcept { return GpW(id()); }
ASMJIT_INLINE_NODEBUG GpX Gp::x() const noexcept { return GpX(id()); }
ASMJIT_INLINE_NODEBUG VecB Vec::b() const noexcept { return VecB(id()); }
ASMJIT_INLINE_NODEBUG VecH Vec::h() const noexcept { return VecH(id()); }
ASMJIT_INLINE_NODEBUG VecS Vec::s() const noexcept { return VecS(id()); }
ASMJIT_INLINE_NODEBUG VecD Vec::d() const noexcept { return VecD(id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::q() const noexcept { return VecV(id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::v() const noexcept { return VecV(id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::b(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::h(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::s(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeS, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::d(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeD, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::h2(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH2, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::b4(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB4, elementIndex), id()); }
ASMJIT_INLINE_NODEBUG VecD Vec::b8() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementB}, id()); }
ASMJIT_INLINE_NODEBUG VecS Vec::h2() const noexcept { return VecS(OperandSignature{VecS::kSignature | kSignatureElementH}, id()); }
ASMJIT_INLINE_NODEBUG VecD Vec::h4() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementH}, id()); }
ASMJIT_INLINE_NODEBUG VecD Vec::s2() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementS}, id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::b16() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementB}, id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::h8() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementH}, id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::s4() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementS}, id()); }
ASMJIT_INLINE_NODEBUG VecV Vec::d2() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementD}, id()); }
#ifndef _DOXYGEN
namespace regs {
#endif
//! Creates a 32-bit W register operand (ARM/AArch64).
static ASMJIT_INLINE_NODEBUG constexpr GpW w(uint32_t id) noexcept { return GpW(id); }
//! Creates a 64-bit X register operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr GpX x(uint32_t id) noexcept { return GpX(id); }
//! Creates a 32-bit S register operand (ARM/AArch64).
static ASMJIT_INLINE_NODEBUG constexpr VecS s(uint32_t id) noexcept { return VecS(id); }
//! Creates a 64-bit D register operand (ARM/AArch64).
static ASMJIT_INLINE_NODEBUG constexpr VecD d(uint32_t id) noexcept { return VecD(id); }
//! Creates a 1282-bit V register operand (ARM/AArch64).
static ASMJIT_INLINE_NODEBUG constexpr VecV v(uint32_t id) noexcept { return VecV(id); }
#ifndef _DOXYGEN
} // {regs}
// Make `arm::regs` accessible through `arm` namespace as well.
using namespace regs;
#endif
//! Memory operand (ARM).
class Mem : public BaseMem {
public:
//! \cond INTERNAL
//! Additional bits of operand's signature used by `arm::Mem`.
enum AdditionalBits : uint32_t {
// Index shift value (5 bits).
// |........|.....XXX|XX......|........|
kSignatureMemShiftValueShift = 14,
kSignatureMemShiftValueMask = 0x1Fu << kSignatureMemShiftValueShift,
// Shift operation type (4 bits).
// |........|XXXX....|........|........|
kSignatureMemPredicateShift = 20,
kSignatureMemPredicateMask = 0x0Fu << kSignatureMemPredicateShift
};
//! \endcond
//! Memory offset mode.
//!
//! Additional constants that can be used with the `predicate`.
enum OffsetMode : uint32_t {
//! Pre-index "[BASE, #Offset {, <shift>}]!" with write-back.
kOffsetPreIndex = 0xE,
//! Post-index "[BASE], #Offset {, <shift>}" with write-back.
kOffsetPostIndex = 0xF
};
//! \name Construction & Destruction
//! \{
//! Construct a default `Mem` operand, that points to [0].
ASMJIT_INLINE_NODEBUG constexpr Mem() noexcept
: BaseMem() {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Mem& other) noexcept
: BaseMem(other) {}
ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept
: BaseMem(Globals::NoInit) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
: BaseMem(signature, baseId, indexId, offset) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(RegType::kLabelTag) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
Signature::fromValue<kSignatureMemPredicateMask>(uint32_t(shift.op())) |
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(uint64_t base, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
//! \}
//! \name Overloaded Operators
//! \{
ASMJIT_INLINE_NODEBUG Mem& operator=(const Mem& other) noexcept = default;
//! \}
//! \name Clone
//! \{
//! Clones the memory operand.
ASMJIT_INLINE_NODEBUG constexpr Mem clone() const noexcept { return Mem(*this); }
//! Gets new memory operand adjusted by `off`.
ASMJIT_INLINE_NODEBUG Mem cloneAdjusted(int64_t off) const noexcept {
Mem result(*this);
result.addOffset(off);
return result;
}
//! Clones the memory operand and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre() const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPreIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre(int64_t off) const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPreIndex);
result.addOffset(off);
return result;
}
//! Clones the memory operand and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post() const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPostIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post(int64_t off) const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPostIndex);
result.addOffset(off);
return result;
}
//! \}
//! \name Base & Index
//! \{
//! Converts memory `baseType` and `baseId` to `arm::Reg` instance.
//!
//! The memory must have a valid base register otherwise the result will be wrong.
ASMJIT_INLINE_NODEBUG Reg baseReg() const noexcept { return Reg::fromTypeAndId(baseType(), baseId()); }
//! Converts memory `indexType` and `indexId` to `arm::Reg` instance.
//!
//! The memory must have a valid index register otherwise the result will be wrong.
ASMJIT_INLINE_NODEBUG Reg indexReg() const noexcept { return Reg::fromTypeAndId(indexType(), indexId()); }
using BaseMem::setIndex;
ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, uint32_t shift) noexcept {
setIndex(index);
setShift(shift);
}
//! \}
//! \name ARM Specific Features
//! \{
//! Gets whether the memory operand has shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
//! Gets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
//! Sets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
//! Resets the memory operand's shift (aka scale) constant to zero.
ASMJIT_INLINE_NODEBUG void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
//! Gets memory predicate (shift mode or offset mode), see \ref ShiftOp and \ref OffsetMode.
ASMJIT_INLINE_NODEBUG constexpr uint32_t predicate() const noexcept { return _signature.getField<kSignatureMemPredicateMask>(); }
//! Sets memory predicate to `predicate`, see `Mem::ShiftOp`.
ASMJIT_INLINE_NODEBUG void setPredicate(uint32_t predicate) noexcept { _signature.setField<kSignatureMemPredicateMask>(predicate); }
//! Resets shift mode to LSL (default).
ASMJIT_INLINE_NODEBUG void resetPredicate() noexcept { _signature.setField<kSignatureMemPredicateMask>(0); }
ASMJIT_INLINE_NODEBUG constexpr bool isFixedOffset() const noexcept { return predicate() < kOffsetPreIndex; }
ASMJIT_INLINE_NODEBUG constexpr bool isPreOrPost() const noexcept { return predicate() >= kOffsetPreIndex; }
ASMJIT_INLINE_NODEBUG constexpr bool isPreIndex() const noexcept { return predicate() == kOffsetPreIndex; }
ASMJIT_INLINE_NODEBUG constexpr bool isPostIndex() const noexcept { return predicate() == kOffsetPostIndex; }
ASMJIT_INLINE_NODEBUG void resetToFixedOffset() noexcept { resetPredicate(); }
ASMJIT_INLINE_NODEBUG void makePreIndex() noexcept { setPredicate(kOffsetPreIndex); }
ASMJIT_INLINE_NODEBUG void makePostIndex() noexcept { setPredicate(kOffsetPostIndex); }
//! \}
};
//! Creates `[base, offset]` memory operand (offset mode).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
//! Creates `[base, offset]!` memory operand (pre-index mode).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPreIndex));
}
//! Creates `[base], offset` memory operand (post-index mode).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
}
//! Creates `[base, index]` memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index) noexcept {
return Mem(base, index);
}
//! Creates `[base, index]!` memory operand (pre-index mode).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPreIndex));
}
//! Creates `[base], index` memory operand (post-index mode).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
}
//! Creates `[base, index, SHIFT_OP #shift]` memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
return Mem(base, index, shift);
}
//! Creates `[base, offset]` memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
// TODO: [ARM] PC + offset address.
#if 0
//! Creates `[PC + offset]` (relative) memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
return Mem(pc, offset);
}
#endif
//! Creates `[base]` absolute memory operand.
//!
//! \note The concept of absolute memory operands doesn't exist on ARM, the ISA only provides PC relative addressing.
//! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it
//! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit supports it to
//! make it possible to assemble such output back.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); }
//! \}
ASMJIT_END_SUB_NAMESPACE
//! \cond INTERNAL
ASMJIT_BEGIN_NAMESPACE
ASMJIT_DEFINE_TYPE_ID(arm::GpW, TypeId::kInt32);
ASMJIT_DEFINE_TYPE_ID(arm::GpX, TypeId::kInt64);
ASMJIT_DEFINE_TYPE_ID(arm::VecS, TypeId::kFloat32x1);
ASMJIT_DEFINE_TYPE_ID(arm::VecD, TypeId::kFloat64x1);
ASMJIT_DEFINE_TYPE_ID(arm::VecV, TypeId::kInt32x4);
ASMJIT_END_NAMESPACE
//! \endcond
#endif // ASMJIT_ARM_ARMOPERAND_H_INCLUDED

View File

@@ -1,186 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMUTILS_H_INCLUDED
#define ASMJIT_ARM_ARMUTILS_H_INCLUDED
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Public utilities and helpers for targeting AArch32 and AArch64 architectures.
namespace Utils {
//! Decomposed fields of a logical immediate value.
struct LogicalImm {
uint32_t n;
uint32_t s;
uint32_t r;
};
//! Encodes the given `imm` value of the given `width` to a logical immediate value represented as N, S, and R fields
//! and writes these fields to `out`.
//!
//! Encoding Table:
//!
//! ```
//! +---+--------+--------+------+
//! | N | ImmS | ImmR | Size |
//! +---+--------+--------+------+
//! | 1 | ssssss | rrrrrr | 64 |
//! | 0 | 0sssss | .rrrrr | 32 |
//! | 0 | 10ssss | ..rrrr | 16 |
//! | 0 | 110sss | ...rrr | 8 |
//! | 0 | 1110ss | ....rr | 4 |
//! | 0 | 11110s | .....r | 2 |
//! +---+--------+--------+------+
//! ```
ASMJIT_MAYBE_UNUSED
static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do {
width /= 2;
uint64_t mask = (uint64_t(1) << width) - 1u;
if ((imm & mask) != ((imm >> width) & mask)) {
width *= 2;
break;
}
} while (width > 2);
// Patterns of all zeros and all ones are not encodable.
uint64_t lsbMask = Support::lsbMask<uint64_t>(width);
imm &= lsbMask;
if (imm == 0 || imm == lsbMask)
return false;
// Inspect the pattern and get the most important bit indexes.
//
// oIndex <-+ +-> zIndex
// | |
// |..zeros..|oCount|zCount|..ones..|
// |000000000|111111|000000|11111111|
uint32_t zIndex = Support::ctz(~imm);
uint64_t zImm = imm ^ ((uint64_t(1) << zIndex) - 1);
uint32_t zCount = (zImm ? Support::ctz(zImm) : width) - zIndex;
uint32_t oIndex = zIndex + zCount;
uint64_t oImm = ~(zImm ^ Support::lsbMask<uint64_t>(oIndex));
uint32_t oCount = (oImm ? Support::ctz(oImm) : width) - (oIndex);
// Verify whether the bit-pattern is encodable.
uint64_t mustBeZero = oImm ^ ~Support::lsbMask<uint64_t>(oIndex + oCount);
if (mustBeZero != 0 || (zIndex > 0 && width - (oIndex + oCount) != 0))
return false;
out->n = width == 64;
out->s = (oCount + zIndex - 1) | (Support::neg(width * 2) & 0x3F);
out->r = width - oIndex;
return true;
}
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
ASMJIT_MAYBE_UNUSED
static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy);
}
//! Returns true if the given `imm` value is a byte mask. Byte mask has each byte part of the value set to either
//! 0x00 or 0xFF. Some ARM instructions accept immediates that form a byte-mask and this function can be used to
//! verify that the immediate is encodable before using the value.
template<typename T>
static ASMJIT_INLINE_NODEBUG bool isByteMaskImm8(const T& imm) noexcept {
constexpr T kMask = T(0x0101010101010101 & Support::allOnes<T>());
return imm == (imm & kMask) * T(255);
}
// [.......A|B.......|.......C|D.......|.......E|F.......|.......G|H.......]
static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) noexcept {
return uint32_t(((imm >> (7 - 0)) & 0b00000011) | // [.......G|H.......]
((imm >> (23 - 2)) & 0b00001100) | // [.......E|F.......]
((imm >> (39 - 4)) & 0b00110000) | // [.......C|D.......]
((imm >> (55 - 6)) & 0b11000000)); // [.......A|B.......]
}
//! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_FORCE_INLINE bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;
T immZ = val & Support::lsbMask<T>(kNumZeroBits);
uint32_t immB = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
// ImmZ must be all zeros and ImmB must either be B0 or B1 pattern.
return immZ == 0 && (immB == kB0Pattern || immB == kB1Pattern);
}
//! \endcond
//! Returns true if the given half precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbcdef|gh000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP16Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 3, 6, 6>(val); }
//! Returns true if the given single precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbc|defgh000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 6, 6, 19>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(float val) noexcept { return isFP32Imm8(Support::bitCast<uint32_t>(val)); }
//! Returns true if the given double precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbb|bbcdefgh|00000000|00000000|00000000|00000000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(uint64_t val) noexcept { return isFPImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(double val) noexcept { return isFP64Imm8(Support::bitCast<uint64_t>(val)); }
//! \cond
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_INLINE_NODEBUG uint32_t encodeFPToImm8Generic(T val) noexcept {
uint32_t bits = uint32_t(val >> kNumZeroBits);
return ((bits >> (kNumBBits + kNumCDEFGHBits - 7)) & 0x80u) | (bits & 0x7F);
}
//! \endcond
//! Encodes a double precision floating point value into IMM8 format.
//!
//! \note This function expects that `isFP64Imm8(val) == true` so it doesn't perform any checks of the value and just
//! rearranges some bits into Imm8 order.
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(uint64_t val) noexcept { return encodeFPToImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(double val) noexcept { return encodeFP64ToImm8(Support::bitCast<uint64_t>(val)); }
} // {Utils}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_ARMUTILS_H_INCLUDED

View File

@@ -1,17 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma push_macro("min")
#pragma push_macro("max")
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#endif

View File

@@ -1,9 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma pop_macro("min")
#pragma pop_macro("max")
#endif

View File

@@ -1,33 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// SPDX-License-Identifier: Zlib
// Official GitHub Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2021 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED
#include "./core.h"
#ifndef ASMJIT_NO_X86
#include "./x86.h"
#endif
#endif // ASMJIT_ASMJIT_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,55 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_EXPORTS
// Only turn-off these warnings when building asmjit itself.
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#endif
// Dependencies only required for asmjit build, but never exposed through public headers.
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#endif
#include "./api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
#define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
#define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
#define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
#define ASMJIT_FAVOR_SPEED
#else
#define ASMJIT_FAVOR_SIZE
#define ASMJIT_FAVOR_SPEED
#endif
// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
#define ASMJIT_TEST
#endif
// Include a unit testing package if this is a `asmjit_test_unit` build.
#if defined(ASMJIT_TEST)
#include "../../../test/broken.h"
#endif
#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED

View File

@@ -1,622 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
// AsmJit Library & ABI Version
// ============================
//! \addtogroup asmjit_core
//! \{
//! Makes a 32-bit integer that represents AsmJit version in `(major << 16) | (minor << 8) | patch` form.
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 11, 0)
//! \def ASMJIT_ABI_NAMESPACE
//!
//! AsmJit ABI namespace is an inline namespace within \ref asmjit namespace.
//!
//! It's used to make sure that when user links to an incompatible version of AsmJit, it won't link. It has also
//! some additional properties as well. When `ASMJIT_ABI_NAMESPACE` is defined by the user it would override the
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
//! controlled by users. This is useful especially in cases in which some of such library comes from third party.
#if !defined(ASMJIT_ABI_NAMESPACE)
#define ASMJIT_ABI_NAMESPACE _abi_1_11
#endif // !ASMJIT_ABI_NAMESPACE
//! \}
// Global Dependencies
// ===================
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h> // We really want std types as globals, not under 'std' namespace.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// Build Options
// =============
// NOTE: Doxygen cannot document macros that are not defined, that's why we have to define them and then undefine
// them immediately, so it won't use the macros with its own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
//! \addtogroup asmjit_build
//! \{
//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
#define ASMJIT_EMBED
//! Enables static-library build.
#define ASMJIT_STATIC
//! Defined when AsmJit's build configuration is 'Debug'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_DEBUG
//! Defined when AsmJit's build configuration is 'Release'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_RELEASE
//! Disables X86/X64 backends.
#define ASMJIT_NO_X86
//! Disables AArch64 backend.
#define ASMJIT_NO_AARCH64
//! Disables non-host backends entirely (useful for JIT compilers to minimize the library size).
#define ASMJIT_NO_FOREIGN
//! Disables deprecated API at compile time (deprecated API won't be available).
#define ASMJIT_NO_DEPRECATED
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
//! Disables JIT memory management and \ref asmjit::JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref asmjit::Logger and \ref asmjit::Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
// Avoid doxygen preprocessor using feature-selection definitions.
#undef ASMJIT_BUILD_EMBNED
#undef ASMJIT_BUILD_STATIC
#undef ASMJIT_BUILD_DEBUG
#undef ASMJIT_BUILD_RELEASE
#undef ASMJIT_NO_X86
#undef ASMJIT_NO_FOREIGN
// (keep ASMJIT_NO_DEPRECATED defined, we don't document deprecated APIs).
#undef ASMJIT_NO_BUILDER
#undef ASMJIT_NO_COMPILER
#undef ASMJIT_NO_JIT
#undef ASMJIT_NO_LOGGING
#undef ASMJIT_NO_TEXT
#undef ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_INTROSPECTION
//! \}
} // {asmjit}
#endif // _DOXYGEN
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
#endif
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
#pragma message("'ASMJIT_NO_TEXT' can only be defined when 'ASMJIT_NO_LOGGING' is defined.")
#undef ASMJIT_NO_TEXT
#endif
#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
#pragma message("'ASMJIT_NO_INTROSPECTION' can only be defined when 'ASMJIT_NO_COMPILER' is defined")
#undef ASMJIT_NO_INTROSPECTION
#endif
// Build Mode
// ==========
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
#if !defined(NDEBUG)
#define ASMJIT_BUILD_DEBUG
#else
#define ASMJIT_BUILD_RELEASE
#endif
#endif
// Target Architecture Detection
// =============================
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
#define ASMJIT_ARCH_X86 32
#else
#define ASMJIT_ARCH_X86 0
#endif
#if defined(_M_ARM64) || defined(__arm64__) || defined(__aarch64__)
# define ASMJIT_ARCH_ARM 64
#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
#define ASMJIT_ARCH_ARM 32
#else
#define ASMJIT_ARCH_ARM 0
#endif
#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
#define ASMJIT_ARCH_MIPS 64
#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
#define ASMJIT_ARCH_MIPS 32
#else
#define ASMJIT_ARCH_MIPS 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
#if ASMJIT_ARCH_BITS == 0
#undef ASMJIT_ARCH_BITS
#if defined (__LP64__) || defined(_LP64)
#define ASMJIT_ARCH_BITS 64
#else
#define ASMJIT_ARCH_BITS 32
#endif
#endif
#if (defined(__ARMEB__)) || \
(defined(__MIPSEB__)) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define ASMJIT_ARCH_LE 0
#define ASMJIT_ARCH_BE 1
#else
#define ASMJIT_ARCH_LE 1
#define ASMJIT_ARCH_BE 0
#endif
#if defined(ASMJIT_NO_FOREIGN)
#if !ASMJIT_ARCH_X86 && !defined(ASMJIT_NO_X86)
#define ASMJIT_NO_X86
#endif
#if !ASMJIT_ARCH_ARM && !defined(ASMJIT_NO_AARCH64)
#define ASMJIT_NO_AARCH64
#endif
#endif
// C++ Compiler and Features Detection
// ===================================
#define ASMJIT_CXX_GNU 0
#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
#if defined(__INTEL_COMPILER)
// MSC Compiler:
// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
//
// Version List:
// - 16.00.0 == VS2010
// - 17.00.0 == VS2012
// - 18.00.0 == VS2013
// - 19.00.0 == VS2015
// - 19.10.0 == VS2017
#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
// Clang Compiler [Pretends to be GNU, so it must be checked before]:
// - https://clang.llvm.org/cxx_status.html
#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
// GNU Compiler:
// - https://gcc.gnu.org/projects/cxx-status.html
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#undef ASMJIT_CXX_GNU
#define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__)
#endif
// Compiler features detection macros.
#if defined(__clang__) && defined(__has_attribute)
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
#else
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
#endif
// API Decorators & C++ Extensions
// ===============================
//! \def ASMJIT_API
//!
//! A decorator that is used to decorate API that AsmJit exports when built as a shared library.
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
#if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __declspec(dllexport)
#else
#define ASMJIT_API __declspec(dllimport)
#endif
#elif defined(_WIN32) && defined(__GNUC__)
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __attribute__((__dllexport__))
#else
#define ASMJIT_API __attribute__((__dllimport__))
#endif
#elif defined(__GNUC__)
#define ASMJIT_API __attribute__((__visibility__("default")))
#endif
#endif
#if !defined(ASMJIT_API)
#define ASMJIT_API
#endif
#if !defined(ASMJIT_VARAPI)
#define ASMJIT_VARAPI extern ASMJIT_API
#endif
//! \def ASMJIT_VIRTAPI
//!
//! This is basically a workaround. When using MSVC and marking class as DLL export everything gets exported, which
//! is unwanted in most projects. MSVC automatically exports typeinfo and vtable if at least one symbol of the class
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
#if !defined(_WIN32) && defined(__GNUC__)
#define ASMJIT_VIRTAPI ASMJIT_API
#else
#define ASMJIT_VIRTAPI
#endif
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_FORCE_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_FORCE_INLINE __forceinline
#else
#define ASMJIT_FORCE_INLINE inline
#endif
#if defined(__clang__)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __nodebug__))
#elif defined(__GNUC__)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __artificial__))
#else
#define ASMJIT_INLINE_NODEBUG inline
#endif
#if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__))
#define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline)
#define ASMJIT_NORETURN __declspec(noreturn)
#else
#define ASMJIT_NOINLINE
#define ASMJIT_NORETURN
#endif
// Calling conventions.
#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
#define ASMJIT_CDECL __attribute__((__cdecl__))
#define ASMJIT_STDCALL __attribute__((__stdcall__))
#define ASMJIT_FASTCALL __attribute__((__fastcall__))
#define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
#define ASMJIT_CDECL __cdecl
#define ASMJIT_STDCALL __stdcall
#define ASMJIT_FASTCALL __fastcall
#define ASMJIT_REGPARM(N)
#else
#define ASMJIT_CDECL
#define ASMJIT_STDCALL
#define ASMJIT_FASTCALL
#define ASMJIT_REGPARM(N)
#endif
#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
#define ASMJIT_VECTORCALL __vectorcall
#elif ASMJIT_ARCH_X86 && defined(_WIN32)
#define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
#else
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++11 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
#elif defined(_MSC_VER)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
#else
#define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
#endif
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#if defined(__GNUC__)
#define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
#else
#define ASMJIT_MAY_ALIAS
#endif
//! \def ASMJIT_MAYBE_UNUSED
//!
//! Expands to `[[maybe_unused]]` if supported or a compiler attribute instead.
#if __cplusplus >= 201703L
#define ASMJIT_MAYBE_UNUSED [[maybe_unused]]
#elif defined(__GNUC__)
#define ASMJIT_MAYBE_UNUSED __attribute__((unused))
#else
#define ASMJIT_MAYBE_UNUSED
#endif
#if defined(__clang_major__) && __clang_major__ >= 4 && !defined(_DOXYGEN)
// NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides to
// support this use, we will enable it for GCC as well. However, until that, it will be clang only, which is
// what we need for static analysis.
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__))
#else
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
#endif
//! \def ASMJIT_NOEXCEPT_TYPE
//!
//! Defined to `noexcept` in C++17 mode or nothing otherwise. Used by function typedefs.
#if __cplusplus >= 201703L
#define ASMJIT_NOEXCEPT_TYPE noexcept
#else
#define ASMJIT_NOEXCEPT_TYPE
#endif
//! \def ASMJIT_ASSUME(...)
//!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
//!
//! This macro has two purposes:
//!
//! 1. Enable optimizations that would not be possible without the assumption.
//! 2. Hint static analysis tools that a certain condition is true to prevent false positives.
#if defined(__clang__)
#define ASMJIT_ASSUME(...) __builtin_assume(__VA_ARGS__)
#elif defined(__GNUC__)
#define ASMJIT_ASSUME(...) do { if (!(__VA_ARGS__)) __builtin_unreachable(); } while (0)
#elif defined(_MSC_VER)
#define ASMJIT_ASSUME(...) __assume(__VA_ARGS__)
#else
#define ASMJIT_ASSUME(...) (void)0
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#if defined(__GNUC__)
#define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
#define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
#else
#define ASMJIT_LIKELY(...) (__VA_ARGS__)
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif
//! \def ASMJIT_FALLTHROUGH
//!
//! Portable [[fallthrough]] attribute.
#if defined(__clang__) && __cplusplus >= 201103L
#define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
#endif
//! \def ASMJIT_DEPRECATED
//!
//! Marks function, class, struct, enum, or anything else as deprecated.
#if defined(__GNUC__)
#define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#if defined(__clang__)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#else
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#endif
#elif defined(_MSC_VER)
#define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#else
#define ASMJIT_DEPRECATED(MESSAGE)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE)
#endif
// Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
// Begin-Namespace & End-Namespace Macros
// ======================================
#if defined _DOXYGEN
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
#define ASMJIT_END_NAMESPACE }
#elif defined(__clang__)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
_Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("clang diagnostic pop") \
}}
#elif defined(__GNUC__) && __GNUC__ == 4
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}}
#elif defined(__GNUC__) && __GNUC__ >= 8
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}}
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_NAMESPACE \
__pragma(warning(pop)) \
}}
#endif
#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE {
#define ASMJIT_END_NAMESPACE }}
#endif
#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
ASMJIT_BEGIN_NAMESPACE \
namespace NAMESPACE {
#define ASMJIT_END_SUB_NAMESPACE \
} \
ASMJIT_END_NAMESPACE
// C++ Utilities
// =============
#define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
#define ASMJIT_NONCONSTRUCTIBLE(Type) \
Type() = delete; \
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
//!
//! Defines bit operations for enumeration flags.
#ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_INLINE_NODEBUG constexpr T operator~(T a) noexcept { \
return T(~(std::underlying_type<T>::type)(a)); \
} \
\
static ASMJIT_INLINE_NODEBUG constexpr T operator|(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_INLINE_NODEBUG constexpr T operator&(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_INLINE_NODEBUG constexpr T operator^(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
} \
\
static ASMJIT_INLINE_NODEBUG T& operator|=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_INLINE_NODEBUG T& operator&=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_INLINE_NODEBUG T& operator^=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
return a; \
}
#endif
//! \def ASMJIT_DEFINE_ENUM_COMPARE(T)
//!
//! Defines comparison operations for enumeration flags.
#if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900)
#define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_INLINE_NODEBUG bool operator<(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) < (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator<=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) <= (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator>(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) > (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_INLINE_NODEBUG bool operator>=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) >= (std::underlying_type<T>::type)(b); \
}
#endif
// Cleanup Api-Config Specific Macros
// ==================================
#undef ASMJIT_CXX_GNU
#undef ASMJIT_CXX_MAKE_VER
#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED

View File

@@ -1,231 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
// This file provides architecture-specific classes that are required in the core library. For example Imm operand
// allows to be created from arm::Shift in a const-expr way, so the arm::Shift must be provided. So this header file
// provides everything architecture-specific that is used by the Core API.
#include "../core/globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Condition code (both AArch32 & AArch64).
//!
//! \note This enumeration doesn't match condition code that is used in AArch32/AArch64 opcodes. In general this
//! condition code is encoded as `(cc - 2) & 0xF` so that `kAL` condition code is zero and encoded as 0xE in opcode.
//! This makes it easier to use a condition code as an instruction modifier that defaults to 'al'.
enum class CondCode : uint8_t {
kAL = 0x00u, //!< (no condition code) (always)
kNA = 0x01u, //!< (not available) (special)
kEQ = 0x02u, //!< Z==1 (any_sign ==)
kNE = 0x03u, //!< Z==0 (any_sign !=)
kCS = 0x04u, //!< C==1 (unsigned >=)
kHS = 0x04u, //!< C==1 (unsigned >=)
kCC = 0x05u, //!< C==0 (unsigned < )
kLO = 0x05u, //!< C==0 (unsigned < )
kMI = 0x06u, //!< N==1 (is negative)
kPL = 0x07u, //!< N==0 (is positive or zero)
kVS = 0x08u, //!< V==1 (is overflow)
kVC = 0x09u, //!< V==0 (no overflow)
kHI = 0x0Au, //!< C==1 & Z==0 (unsigned > )
kLS = 0x0Bu, //!< C==0 | Z==1 (unsigned <=)
kGE = 0x0Cu, //!< N==V (signed >=)
kLT = 0x0Du, //!< N!=V (signed < )
kGT = 0x0Eu, //!< Z==0 & N==V (signed > )
kLE = 0x0Fu, //!< Z==1 | N!=V (signed <=)
kSign = kMI, //!< Sign.
kNotSign = kPL, //!< Not sign.
kOverflow = kVS, //!< Signed overflow.
kNotOverflow = kVC, //!< Not signed overflow.
kEqual = kEQ, //!< Equal `a == b`.
kNotEqual = kNE, //!< Not Equal `a != b`.
kZero = kEQ, //!< Zero (alias to equal).
kNotZero = kNE, //!< Not Zero (alias to Not Equal).
kNegative = kMI, //!< Negative.
kPositive = kPL, //!< Positive or zero.
kSignedLT = kLT, //!< Signed `a < b`.
kSignedLE = kLE, //!< Signed `a <= b`.
kSignedGT = kGT, //!< Signed `a > b`.
kSignedGE = kGE, //!< Signed `a >= b`.
kUnsignedLT = kLO, //!< Unsigned `a < b`.
kUnsignedLE = kLS, //!< Unsigned `a <= b`.
kUnsignedGT = kHI, //!< Unsigned `a > b`.
kUnsignedGE = kHS, //!< Unsigned `a >= b`.
kAlways = kAL, //!< No condition code (always).
kMaxValue = 0x0Fu //!< Maximum value of `CondCode`.
};
//! Negates a condition code.
static ASMJIT_INLINE_NODEBUG constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
//! Data type that can be encoded with the instruction (AArch32 only).
enum class DataType : uint32_t {
//! No data type specified (default for all general purpose instructions).
kNone = 0,
//! 8-bit signed integer, specified as `.s8` in assembly.
kS8 = 1,
//! 16-bit signed integer, specified as `.s16` in assembly.
kS16 = 2,
//! 32-bit signed integer, specified as `.s32` in assembly.
kS32 = 3,
//! 64-bit signed integer, specified as `.s64` in assembly.
kS64 = 4,
//! 8-bit unsigned integer, specified as `.u8` in assembly.
kU8 = 5,
//! 16-bit unsigned integer, specified as `.u16` in assembly.
kU16 = 6,
//! 32-bit unsigned integer, specified as `.u32` in assembly.
kU32 = 7,
//! 64-bit unsigned integer, specified as `.u64` in assembly.
kU64 = 8,
//! 16-bit floating point (half precision), specified as `.f16` in assembly.
kF16 = 10,
//! 32-bit floating point (single precision), specified as `.f32` in assembly.
kF32 = 11,
//! 64-bit floating point (double precision), specified as `.f64` in assembly.
kF64 = 12,
//! 8-bit polynomial.
kP8 = 13,
//! 16-bit BF16 floating point.
kBF16 = 14,
//! 64-bit polynomial.
kP64 = 15,
//! Maximum value of `DataType`.
kMaxValue = 15
};
//! Shift operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real constants on ARM32 and AArch64 hardware,
//! however, the addition constants that describe extend modes are specific to AsmJit and would be translated to the
//! AArch64 specific constants by the assembler.
enum class ShiftOp : uint32_t {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kASR = 0x02u,
//! Rotate right operation (AArch32 only).
kROR = 0x03u,
//! Rotate right with carry operation (encoded as `ShiftOp::kROR` with zero) (AArch32 only).
kRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Represents ARM immediate shift operation type and value.
class Shift {
public:
//! Shift operation.
ShiftOp _op;
//! Shift Value.
uint32_t _value;
//! Default constructed Shift is not initialized.
ASMJIT_INLINE_NODEBUG Shift() noexcept = default;
//! Copy constructor (default)
ASMJIT_INLINE_NODEBUG constexpr Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
ASMJIT_INLINE_NODEBUG constexpr Shift(ShiftOp op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
ASMJIT_INLINE_NODEBUG constexpr ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`.
ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift smount.
ASMJIT_INLINE_NODEBUG constexpr uint32_t value() const noexcept { return _value; }
//! Sets shift amount to `value`.
ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; }
};
//! Constructs a `LSL #value` shift (logical shift left).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); }
//! Constructs a `ROR #value` shift (rotate right).
static ASMJIT_INLINE_NODEBUG constexpr Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1).
static ASMJIT_INLINE_NODEBUG constexpr Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones).
static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); }
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); }
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED

View File

@@ -1,160 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86archtraits_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64archtraits_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
static const constexpr ArchTraits noArchTraits = {
// SP/FP/LR/PC.
0xFF, 0xFF, 0xFF, 0xFF,
// Reserved,
{ 0, 0, 0 },
// HW stack alignment.
0,
// Min/Max stack offset.
0, 0,
// ISA features [Gp, Vec, Other0, Other1].
{{
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegTypeToSignature.
#define V(index) OperandSignature{0}
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
#define V(index) TypeId::kVoid
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
#define V(index) RegType::kNone
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ArchTypeNameId::kByte,
ArchTypeNameId::kHalf,
ArchTypeNameId::kWord,
ArchTypeNameId::kQuad
}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1] = {
// No architecture.
noArchTraits,
// X86/X86 architectures.
#if !defined(ASMJIT_NO_X86)
x86::x86ArchTraits,
x86::x64ArchTraits,
#else
noArchTraits,
noArchTraits,
#endif
// RISCV32/RISCV64 architectures.
noArchTraits,
noArchTraits,
// ARM architecture
noArchTraits,
// AArch64 architecture.
#if !defined(ASMJIT_NO_AARCH64)
a64::a64ArchTraits,
#else
noArchTraits,
#endif
// ARM/Thumb architecture.
noArchTraits,
// Reserved.
noArchTraits,
// MIPS32/MIPS64
noArchTraits,
noArchTraits
};
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId?
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue))
typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId)));
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == TypeId::kIntPtr)
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
else
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = TypeUtils::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
else {
if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid())
regType = RegType::kVec64;
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid())
regType = RegType::kVec128;
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid())
regType = RegType::kVec256;
else if (archTraits._regSignature[RegType::kVec512].isValid())
regType = RegType::kVec512;
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
*typeIdOut = typeId;
*regSignatureOut = archTraits.regTypeToSignature(regType);
return kErrorOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -1,290 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Instruction set architecture (ISA).
enum class Arch : uint8_t {
//! Unknown or uninitialized ISA.
kUnknown = 0,
//! 32-bit X86 ISA.
kX86 = 1,
//! 64-bit X86 ISA also known as X64, X86_64, and AMD64.
kX64 = 2,
//! 32-bit RISC-V ISA.
kRISCV32 = 3,
//! 64-bit RISC-V ISA.
kRISCV64 = 4,
//! 32-bit ARM ISA (little endian).
kARM = 5,
//! 64-bit ARM ISA in (little endian).
kAArch64 = 6,
//! 32-bit ARM ISA in Thumb mode (little endian).
kThumb = 7,
// 8 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (little endian).
kMIPS32_LE = 9,
//! 64-bit MIPS ISA in (little endian).
kMIPS64_LE = 10,
//! 32-bit ARM ISA (big endian).
kARM_BE = 11,
//! 64-bit ARM ISA in (big endian).
kAArch64_BE = 12,
//! 32-bit ARM ISA in Thumb mode (big endian).
kThumb_BE = 13,
// 14 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (big endian).
kMIPS32_BE = 15,
//! 64-bit MIPS ISA in (big endian).
kMIPS64_BE = 16,
//! Maximum value of `Arch`.
kMaxValue = kMIPS64_BE,
//! Mask used by 32-bit ISAs (odd are 32-bit, even are 64-bit).
k32BitMask = 0x01,
//! First big-endian architecture.
kBigEndian = kARM_BE,
//! ISA detected at compile-time (ISA of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
ASMJIT_ARCH_X86 == 32 ? kX86 :
ASMJIT_ARCH_X86 == 64 ? kX64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kMIPS64_BE :
kUnknown
#endif
};
//! Sub-architecture.
enum class SubArch : uint8_t {
//! Unknown or uninitialized architecture sub-type.
kUnknown = 0,
//! Maximum value of `SubArch`.
kMaxValue = kUnknown,
//! Sub-architecture detected at compile-time (sub-architecture of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Identifier used to represent names of different data types across architectures.
enum class ArchTypeNameId : uint8_t {
//! Describes 'db' (X86/X86_64 convention, always 8-bit quantity).
kDB = 0,
//! Describes 'dw' (X86/X86_64 convention, always 16-bit word).
kDW,
//! Describes 'dd' (X86/X86_64 convention, always 32-bit word).
kDD,
//! Describes 'dq' (X86/X86_64 convention, always 64-bit word).
kDQ,
//! Describes 'byte' (always 8-bit quantity).
kByte,
//! Describes 'half' (most likely 16-bit word).
kHalf,
//! Describes 'word' (either 16-bit or 32-bit word).
kWord,
//! Describes 'hword' (most likely 16-bit word).
kHWord,
//! Describes 'dword' (either 32-bit or 64-bit word).
kDWord,
//! Describes 'qword' (64-bit word).
kQWord,
//! Describes 'xword' (64-bit word).
kXWord,
//! Describes 'short' (always 16-bit word).
kShort,
//! Describes 'long' (most likely 32-bit word).
kLong,
//! Describes 'quad' (64-bit word).
kQuad,
//! Maximum value of `ArchTypeNameId`.
kMaxValue = kQuad
};
//! Instruction feature hints for each register group provided by \ref ArchTraits.
//!
//! Instruction feature hints describe miscellaneous instructions provided by the architecture that can be used by
//! register allocator to make certain things simpler - like register swaps or emitting register push/pop sequences.
//!
//! \remarks Instruction feature hints are only defined for register groups that can be used with \ref
//! asmjit_compiler infrastructure. Register groups that are not managed by Compiler are not provided by
//! \ref ArchTraits and cannot be queried.
enum class InstHints : uint8_t {
//! No feature hints.
kNoHints = 0,
//! Architecture supports a register swap by using a single instruction.
kRegSwap = 0x01u,
//! Architecture provides push/pop instructions.
kPushPop = 0x02u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstHints)
//! Architecture traits used by Function API and Compiler's register allocator.
struct ArchTraits {
//! \name Members
//! \{
//! Stack pointer register id.
uint8_t _spRegId;
//! Frame pointer register id.
uint8_t _fpRegId;
//! Link register id.
uint8_t _linkRegId;
//! Instruction pointer (or program counter) register id, if accessible.
uint8_t _ipRegId;
// Reserved.
uint8_t _reserved[3];
//! Hardware stack alignment requirement.
uint8_t _hwStackAlignment;
//! Minimum addressable offset on stack guaranteed for all instructions.
uint32_t _minStackOffset;
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
//! Flags for each virtual register group.
Support::Array<InstHints, Globals::kNumVirtGroups> _instHints;
//! Maps register type into a signature, that provides group, size and can be used to construct register operands.
Support::Array<OperandSignature, uint32_t(RegType::kMaxValue) + 1> _regSignature;
//! Maps a register to type-id, see \ref TypeId.
Support::Array<TypeId, uint32_t(RegType::kMaxValue) + 1> _regTypeToTypeId;
//! Maps scalar TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref TypeId.
Support::Array<RegType, 32> _typeIdToRegType;
//! Word name identifiers of 8-bit, 16-bit, 32-biit, and 64-bit quantities that appear in formatted text.
ArchTypeNameId _typeNameIdTable[4];
//! \}
//! \name Accessors
//! \{
//! Returns stack pointer register id.
ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id.
ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it.
ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it.
ASMJIT_INLINE_NODEBUG uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64).
ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
//! Tests whether the given register `group` has the given `flag` set.
ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept {
return type <= RegType::kMaxValue && _regSignature[type].isValid();
}
//! Returns an operand signature from the given register `type` of this architecture.
ASMJIT_INLINE_NODEBUG OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; }
//! Returns a register from the given register `type` of this architecture.
ASMJIT_INLINE_NODEBUG RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); }
//! Returns a register size the given register `type` of this architecture.
ASMJIT_INLINE_NODEBUG uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); }
//! Returns a corresponding `TypeId` from the given register `type` of this architecture.
ASMJIT_INLINE_NODEBUG TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; }
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
//!
//! The index of this table is log2 of the size:
//! - [0] 8-bits
//! - [1] 16-bits
//! - [2] 32-bits
//! - [3] 64-bits
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
//! \}
//! \name Statics
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1];
//! \cond
ASMJIT_INLINE_NODEBUG const ArchTraits& ArchTraits::byArch(Arch arch) noexcept { return _archTraits[uint32_t(arch)]; }
//! \endcond
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept;
} // {ArchUtils}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARCHTRAITS_H_INCLUDED

View File

@@ -1,406 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/codewriter_p.h"
#include "../core/constpool.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// BaseAssembler - Construction & Destruction
// ==========================================
BaseAssembler::BaseAssembler() noexcept
: BaseEmitter(EmitterType::kAssembler) {}
BaseAssembler::~BaseAssembler() noexcept {}
// BaseAssembler - Buffer Management
// =================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// BaseAssembler - Section Management
// ==================================
static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
uint8_t* p = section->_buffer._data;
self->_section = section;
self->_bufferData = p;
self->_bufferPtr = p + section->_buffer._size;
self->_bufferEnd = p + section->_buffer._capacity;
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
return reportError(DebugUtils::errored(kErrorInvalidSection));
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logf(".section %s {#%u}\n", section->name(), section->id());
#endif
BaseAssembler_initSection(this, section);
return kErrorOk;
}
// BaseAssembler - Label Management
// ================================
Label BaseAssembler::newLabel() {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newLabelEntry(&le);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
Error err = _code->bindLabel(label, _section->id(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
EmitterUtils::logLabelBound(this, label);
#endif
resetInlineComment();
if (err)
return reportError(err);
return kErrorOk;
}
// BaseAssembler - Embed
// =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (dataSize == 0)
return kErrorOk;
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
writer.emitData(data, dataSize);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), TypeId::kUInt8, data, dataSize, 1);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (itemCount == 0 || repeatCount == 0)
return kErrorOk;
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
for (size_t i = 0; i < repeatCount; i++)
writer.emitData(data, dataSize);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), typeId, data, itemCount, repeatCount);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
static const TypeId dataTypeIdBySize[9] = {
TypeId::kVoid, // [0] (invalid)
TypeId::kUInt8, // [1] (uint8_t)
TypeId::kUInt16, // [2] (uint16_t)
TypeId::kVoid, // [3] (invalid)
TypeId::kUInt32, // [4] (uint32_t)
TypeId::kVoid, // [5] (invalid)
TypeId::kVoid, // [6] (invalid)
TypeId::kVoid, // [7] (invalid)
TypeId::kUInt64 // [8] (uint64_t)
};
#endif
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (ASMJIT_UNLIKELY(!isLabelValid(label)))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
if (!size)
return kErrorOk;
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
#ifndef ASMJIT_NO_LOGGING
uint8_t* data = writer.cursor();
#endif
pool.fill(writer.cursor());
writer.advance(size);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
uint32_t dataSizeLog2 = Support::min<uint32_t>(Support::ctz(pool.minItemSize()), 3);
uint32_t dataSize = 1 << dataSizeLog2;
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize], data, size >> dataSizeLog2);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->labelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(' ');
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
if (le->isBound()) {
re->_targetSectionId = le->section()->id();
re->_payload = le->offset();
}
else {
OffsetFormat of;
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!link))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
link->relocId = re->id();
}
// Emit dummy DWORD/QWORD depending on the data size.
writer.emitZeros(dataSize);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
LabelEntry* labelEntry = _code->labelEntry(label);
LabelEntry* baseEntry = _code->labelEntry(base);
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(" (");
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, FormatFlags::kNone, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
#endif
// If both labels are bound within the same section it means the delta can be calculated now.
if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
uint64_t delta = labelEntry->offset() - baseEntry->offset();
writer.emitValueLE(delta, dataSize);
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
exp->reset();
exp->opType = ExpressionOpType::kSub;
exp->setValueAsLabel(0, labelEntry);
exp->setValueAsLabel(1, baseEntry);
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, dataSize);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_payload = (uint64_t)(uintptr_t)exp;
writer.emitZeros(dataSize);
}
writer.done(this);
return kErrorOk;
}
// BaseAssembler - Comment
// =======================
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
// Logger cannot be NULL if `EmitterFlags::kLogComments` is set.
ASMJIT_ASSERT(_logger != nullptr);
_logger->log(data, size);
_logger->log("\n", 1);
return kErrorOk;
#else
DebugUtils::unused(data, size);
return kErrorOk;
#endif
}
// BaseAssembler - Events
// ======================
Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
// Attach to the end of the .text section.
BaseAssembler_initSection(this, code->_sections[0]);
return kErrorOk;
}
Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,129 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#include "../core/codeholder.h"
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
//! Base assembler.
//!
//! This is a base class that provides interface used by architecture specific
//! assembler implementations. Assembler doesn't hold any data, instead it's
//! attached to \ref CodeHolder, which provides all the data that Assembler
//! needs and which can be altered by it.
//!
//! Check out architecture specific assemblers for more details and examples:
//!
//! - \ref x86::Assembler - X86/X64 assembler implementation.
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseAssembler)
typedef BaseEmitter Base;
//! Current section where the assembling happens.
Section* _section = nullptr;
//! Start of the CodeBuffer of the current section.
uint8_t* _bufferData = nullptr;
//! End (first invalid byte) of the current section.
uint8_t* _bufferEnd = nullptr;
//! Pointer in the CodeBuffer of the current section.
uint8_t* _bufferPtr = nullptr;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseAssembler` instance.
ASMJIT_API BaseAssembler() noexcept;
//! Destroys the `BaseAssembler` instance.
ASMJIT_API virtual ~BaseAssembler() noexcept;
//! \}
//! \name Code-Buffer Management
//! \{
//! Returns the capacity of the current CodeBuffer.
ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer.
ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer.
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`.
//!
//! \note The `offset` cannot be greater than buffer size even if it's
//! within the buffer's capacity.
ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section.
ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section.
ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \}
//! \name Section Management
//! \{
//! Returns the current section.
ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;
//! \}
//! \name Label Management
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
//! \name Embed
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
//! \}
//! \name Comment
//! \{
ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED

View File

@@ -1,897 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// PostponedErrorHandler (Internal)
// ================================
//! Postponed error handler that never throws. Used as a temporal error handler
//! to run passes. If error occurs, the caller is notified and will call the
//! real error handler, that can throw.
class PostponedErrorHandler : public ErrorHandler {
public:
void handleError(Error err, const char* message, BaseEmitter* origin) override {
DebugUtils::unused(err, origin);
_message.assign(message);
}
StringTmp<128> _message;
};
// BaseBuilder - Utilities
// =======================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes)
pass->~Pass();
self->_passes.reset();
}
// BaseBuilder - Construction & Destruction
// ========================================
BaseBuilder::BaseBuilder() noexcept
: BaseEmitter(EmitterType::kBuilder),
_codeZone(32768 - Zone::kBlockOverhead),
_dataZone(16384 - Zone::kBlockOverhead),
_passZone(65536 - Zone::kBlockOverhead),
_allocator(&_codeZone) {}
BaseBuilder::~BaseBuilder() noexcept {
BaseBuilder_deletePasses(this);
}
// BaseBuilder - Node Management
// =============================
Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOptions, uint32_t opCount) {
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
*out = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
return kErrorOk;
}
Error BaseBuilder::newLabelNode(LabelNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::newAlignNode(AlignNode** out, AlignMode alignMode, uint32_t alignment) {
*out = nullptr;
return _newNodeT<AlignNode>(out, alignMode, alignment);
}
Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
*out = nullptr;
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
node->_embed._typeId = typeId;
node->_embed._typeSize = uint8_t(typeSize);
node->_itemCount = itemCount;
node->_repeatCount = repeatCount;
uint8_t* dstData = node->_inlineData;
if (dataSize > EmbedDataNode::kInlineBufferSize) {
dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
if (ASMJIT_UNLIKELY(!dstData))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node->_externalData = dstData;
}
if (data)
memcpy(dstData, data, dataSize);
*out = node;
return kErrorOk;
}
Error BaseBuilder::newConstPoolNode(ConstPoolNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t size) {
*out = nullptr;
if (data) {
if (size == SIZE_MAX)
size = strlen(data);
if (size > 0) {
data = static_cast<char*>(_dataZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
}
return _newNodeT<CommentNode>(out, data);
}
BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
if (!_cursor) {
if (_nodeList.empty()) {
_nodeList.reset(node, node);
}
else {
node->_next = _nodeList.first();
_nodeList._first->_prev = node;
_nodeList._first = node;
}
}
else {
BaseNode* prev = _cursor;
BaseNode* next = _cursor->next();
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_nodeList._last = node;
}
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
_cursor = node;
return node;
}
BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
BaseNode* prev = ref;
BaseNode* next = ref->next();
node->_prev = prev;
node->_next = next;
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
prev->_next = node;
if (next)
next->_prev = node;
else
_nodeList._last = node;
return node;
}
BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
ASMJIT_ASSERT(ref->isActive());
BaseNode* prev = ref->prev();
BaseNode* next = ref;
node->_prev = prev;
node->_next = next;
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
next->_prev = node;
if (prev)
prev->_next = node;
else
_nodeList._first = node;
return node;
}
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive())
return node;
BaseNode* prev = node->prev();
BaseNode* next = node->next();
if (_nodeList._first == node)
_nodeList._first = next;
else
prev->_next = next;
if (_nodeList._last == node)
_nodeList._last = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
if (_cursor == node)
_cursor = prev;
return node;
}
void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
if (!first->isActive())
return;
BaseNode* prev = first->prev();
BaseNode* next = last->next();
if (_nodeList._first == first)
_nodeList._first = next;
else
prev->_next = next;
if (_nodeList._last == last)
_nodeList._last = prev;
else
next->_prev = prev;
BaseNode* node = first;
uint32_t didRemoveSection = false;
for (;;) {
next = node->next();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(NodeFlags::kIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node)
_cursor = prev;
if (node == last)
break;
node = next;
}
if (didRemoveSection)
_dirtySectionLinks = true;
}
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
BaseNode* old = _cursor;
_cursor = node;
return old;
}
// BaseBuilder - Sections
// ======================
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
return reportError(DebugUtils::errored(kErrorInvalidSection));
if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk))
return reportError(err);
}
SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size())
node = _sectionNodes[sectionId];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size())
_sectionNodes.resize(&_allocator, sectionId + 1);
_sectionNodes[sectionId] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::section(Section* section) {
SectionNode* node;
ASMJIT_PROPAGATE(sectionNodeOf(&node, section->id()));
ASMJIT_ASSUME(node != nullptr);
if (!node->isActive()) {
// Insert the section at the end if it was not part of the code.
addAfter(node, lastNode());
_cursor = node;
}
else {
// This is a bit tricky. We cache section links to make sure that
// switching sections doesn't involve traversal in linked-list unless
// the position of the section has changed.
if (hasDirtySectionLinks())
updateSectionLinks();
if (node->_nextSection)
_cursor = node->_nextSection->_prev;
else
_cursor = _nodeList.last();
}
return kErrorOk;
}
void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks)
return;
BaseNode* node_ = _nodeList.first();
SectionNode* currentSection = nullptr;
while (node_) {
if (node_->isSection()) {
if (currentSection)
currentSection->_nextSection = node_->as<SectionNode>();
currentSection = node_->as<SectionNode>();
}
node_ = node_->next();
}
if (currentSection)
currentSection->_nextSection = nullptr;
_dirtySectionLinks = false;
}
// BaseBuilder - Labels
// ====================
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
return DebugUtils::errored(kErrorInvalidLabel);
if (index >= _labelNodes.size())
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
LabelNode* node = _labelNodes[index];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(&node, labelId));
_labelNodes[index] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
LabelEntry* le;
ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
uint32_t labelId = le->id();
// We just added one label so it must be true.
ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err))
return self->reportError(err);
LabelNode* node;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
Label BaseBuilder::newLabel() {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newLabelEntry(&le) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newNamedLabelEntry(&le, name, nameSize, type, parentId) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Error BaseBuilder::bind(const Label& label) {
LabelNode* node;
ASMJIT_PROPAGATE(labelNodeOf(&node, label));
addNode(node);
return kErrorOk;
}
// BaseBuilder - Passes
// ====================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes)
if (strcmp(pass->name(), name) == 0)
return pass;
return nullptr;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorOutOfMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay...
if (pass->_cb == this)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
pass->_cb = this;
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr))
return DebugUtils::errored(kErrorInvalidArgument);
if (pass->_cb != nullptr) {
if (pass->_cb != this)
return DebugUtils::errored(kErrorInvalidState);
uint32_t index = _passes.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kNotFound);
pass->_cb = nullptr;
_passes.removeAt(index);
}
pass->~Pass();
return kErrorOk;
}
Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (_passes.empty())
return kErrorOk;
ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed;
Error err = kErrorOk;
setErrorHandler(&postponed);
for (Pass* pass : _passes) {
_passZone.reset();
err = pass->run(&_passZone, _logger);
if (err)
break;
}
_passZone.reset();
setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err))
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
return kErrorOk;
}
// BaseBuilder - Emit
// ==================
Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
InstOptions options = instOptions() | forcedInstOptions();
if (Support::test(options, InstOptions::kReserved)) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
if (hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate)) {
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
ValidationFlags validationFlags = isCompiler() ? ValidationFlags::kEnableVirtRegs : ValidationFlags::kNone;
Error err = _funcs.validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount, validationFlags);
if (ASMJIT_UNLIKELY(err)) {
#ifndef ASMJIT_NO_LOGGING
return EmitterUtils::logInstructionFailed(this, err, instId, options, o0, o1, o2, opExt);
#else
resetState();
return reportError(err);
#endif
}
}
#endif
// Clear instruction options that should never be part of a regular instruction.
options &= ~InstOptions::kReserved;
}
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
if (ASMJIT_UNLIKELY(!node)) {
resetExtraReg();
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node = new(node) InstNode(this, instId, options, opCount, opCapacity);
node->setExtraReg(extraReg());
node->setOp(0, o0);
node->setOp(1, o1);
node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++)
node->setOp(i, opExt[i - 3]);
node->resetOpRange(opCount, opCapacity);
if (comment)
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
addNode(node);
resetExtraReg();
return kErrorOk;
}
// BaseBuilder - Align
// ===================
Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
AlignNode* node;
ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
// BaseBuilder - Embed
// ===================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!isLabelValid(label))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, nullptr, pool.size()));
ASMJIT_ASSUME(node != nullptr);
pool.fill(node->data());
addNode(node);
return kErrorOk;
}
// BaseBuilder - EmbedLabel & EmbedLabelDelta
// ==========================================
//
// If dataSize is zero it means that the size is the same as target register width, however,
// if it's provided we really want to validate whether it's within the possible range.
static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
}
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelDeltaNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
// BaseBuilder - Comment
// =====================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
CommentNode* node;
ASMJIT_PROPAGATE(newCommentNode(&node, data, size));
ASMJIT_ASSUME(node != nullptr);
addNode(node);
return kErrorOk;
}
// BaseBuilder - SerializeTo
// =========================
Error BaseBuilder::serializeTo(BaseEmitter* dst) {
Error err = kErrorOk;
BaseNode* node_ = _nodeList.first();
Operand_ opArray[Globals::kMaxOpCount];
do {
dst->setInlineComment(node_->inlineComment());
if (node_->isInst()) {
InstNode* node = node_->as<InstNode>();
// NOTE: Inlined to remove one additional call per instruction.
dst->setInstOptions(node->options());
dst->setExtraReg(node->extraReg());
const Operand_* op = node->operands();
const Operand_* opExt = EmitterUtils::noExt;
uint32_t opCount = node->opCount();
if (opCount > 3) {
uint32_t i = 4;
opArray[3] = op[3];
while (i < opCount) {
opArray[i].copyFrom(op[i]);
i++;
}
while (i < Globals::kMaxOpCount) {
opArray[i].reset();
i++;
}
opExt = opArray + 3;
}
err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
}
else if (node_->isLabel()) {
if (node_->isConstPool()) {
ConstPoolNode* node = node_->as<ConstPoolNode>();
err = dst->embedConstPool(node->label(), node->constPool());
}
else {
LabelNode* node = node_->as<LabelNode>();
err = dst->bind(node->label());
}
}
else if (node_->isAlign()) {
AlignNode* node = node_->as<AlignNode>();
err = dst->align(node->alignMode(), node->alignment());
}
else if (node_->isEmbedData()) {
EmbedDataNode* node = node_->as<EmbedDataNode>();
err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
}
else if (node_->isEmbedLabel()) {
EmbedLabelNode* node = node_->as<EmbedLabelNode>();
err = dst->embedLabel(node->label(), node->dataSize());
}
else if (node_->isEmbedLabelDelta()) {
EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
}
else if (node_->isSection()) {
SectionNode* node = node_->as<SectionNode>();
err = dst->section(_code->sectionById(node->id()));
}
else if (node_->isComment()) {
CommentNode* node = node_->as<CommentNode>();
err = dst->comment(node->inlineComment());
}
if (err) break;
node_ = node_->next();
} while (node_);
return err;
}
// BaseBuilder - Events
// ====================
Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
SectionNode* initialSection;
Error err = sectionNodeOf(&initialSection, 0);
if (!err)
err = _passes.willGrow(&_allocator, 8);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
ASMJIT_ASSUME(initialSection != nullptr);
_cursor = initialSection;
_nodeList.reset(initialSection, initialSection);
initialSection->setFlags(NodeFlags::kIsActive);
return kErrorOk;
}
Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
BaseBuilder_deletePasses(this);
_sectionNodes.reset();
_labelNodes.reset();
_allocator.reset(&_codeZone);
_codeZone.reset();
_dataZone.reset();
_passZone.reset();
_nodeFlags = NodeFlags::kNone;
_cursor = nullptr;
_nodeList.reset();
return Base::onDetach(code);
}
// Pass - Construction & Destruction
// =================================
Pass::Pass(const char* name) noexcept
: _name(name) {}
Pass::~Pass() noexcept {}
// Pass - Interface
// ================
// [[pure virtual]]
Error Pass::run(Zone* zone, Logger* logger) {
DebugUtils::unused(zone, logger);
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER

File diff suppressed because it is too large Load Diff

View File

@@ -1,35 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_BUILDER_P_H_INCLUDED
#define ASMJIT_CORE_BUILDER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_builder
//! \{
static inline void BaseBuilder_assignInlineComment(BaseBuilder* self, BaseNode* node, const char* comment) noexcept {
if (comment)
node->setInlineComment(static_cast<char*>(self->_dataZone.dup(comment, strlen(comment), true)));
}
static inline void BaseBuilder_assignInstState(BaseBuilder* self, InstNode* node, const BaseEmitter::State& state) noexcept {
node->setOptions(state.options);
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(self, node, state.comment);
}
//! \}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER
#endif // ASMJIT_CORE_BUILDER_P_H_INCLUDED

View File

@@ -1,113 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Flags used by \ref CodeBuffer.
enum class CodeBufferFlags : uint32_t {
//! No flags.
kNone = 0,
//! Buffer is external (not allocated by asmjit).
kIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kIsFixed = 0x00000002u
};
ASMJIT_DEFINE_ENUM_FLAGS(CodeBufferFlags)
//! Code or data buffer.
struct CodeBuffer {
//! \name Members
//! \{
//! The content of the buffer (data).
uint8_t* _data;
//! Number of bytes of `data` used.
size_t _size;
//! Buffer capacity (in bytes).
size_t _capacity;
//! Buffer flags.
CodeBufferFlags _flags;
//! \}
//! \name Overloaded Operators
//! \{
//! Returns a reference to the byte at the given `index`.
inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \overload
inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \}
//! \name Accessors
//! \{
//! Returns code buffer flags.
ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; }
//! Returns the size of the data.
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the capacity of the data.
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references.
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; }
//! \overload
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; }
//! \}
//! \name Iterators
//! \{
ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; }
ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; }
ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; }
ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,175 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
#include "../core/codewriter_p.h"
ASMJIT_BEGIN_NAMESPACE
bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t bitShift = format.immBitShift();
uint32_t discardLsb = format.immDiscardLsb();
// Invalid offset (should not happen).
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
uint32_t value;
// First handle all unsigned offset types.
if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
if (value != offset64)
return false;
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isInt32(offset64))
return false;
value = uint32_t(int32_t(offset64));
if (!Support::isEncodableOffset32(int32_t(value), bitCount))
return false;
}
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
return true;
}
case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
return false;
uint32_t immLo = value & 0x3u;
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
*dst = (immLo << 29) | (immHi << 5);
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
uint64_t value;
// First handle all unsigned offset types.
if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
}
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
if (value != uint64_t(offset64))
return false;
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount))
return false;
value = uint64_t(offset64);
}
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
// Offset the destination by ValueOffset so the `dst` points to the
// patched word instead of the beginning of the patched region.
dst = static_cast<char*>(dst) + format.valueOffset();
switch (format.valueSize()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask));
return true;
}
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask));
return true;
}
case 4: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
return false;
}
Support::writeU32uLE(dst, Support::readU32uLE(dst) | mask);
return true;
}
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format))
return false;
Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
return true;
}
default:
return false;
}
}
ASMJIT_END_NAMESPACE

View File

@@ -1,179 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#include "../core/assembler.h"
#include "../core/codebuffer.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_assembler
//! \{
struct OffsetFormat;
//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_FORCE_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
ASMJIT_FORCE_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
Error err = a->_code->growBuffer(&buffer, n);
if (ASMJIT_UNLIKELY(err))
return a->reportError(err);
_cursor = a->_bufferPtr;
}
return kErrorOk;
}
ASMJIT_FORCE_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_FORCE_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_FORCE_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_FORCE_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_FORCE_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_FORCE_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor += size_t(cond);
}
template<typename T>
ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_FORCE_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_FORCE_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_FORCE_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v & 0xFFu);
v >>= 8;
}
_cursor += size;
}
template<typename T>
ASMJIT_FORCE_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v >> (sizeof(T) - 8));
v <<= 8;
}
_cursor += size;
}
ASMJIT_FORCE_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_FORCE_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
while (++p != _cursor)
p[-1] = p[0];
_cursor--;
}
template<typename T>
ASMJIT_FORCE_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
p[0] = p[-1];
p--;
}
*p = uint8_t(val & 0xFF);
_cursor++;
}
ASMJIT_FORCE_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
a->_bufferPtr = _cursor;
buffer._size = Support::max(buffer._size, newSize);
}
};
//! Code writer utilities.
namespace CodeWriterUtils {
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED

View File

@@ -1,598 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder_p.h"
#include "../core/compiler.h"
#include "../core/cpuinfo.h"
#include "../core/logger.h"
#include "../core/rapass_p.h"
#include "../core/rastack_p.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
// GlobalConstPoolPass
// ===================
class GlobalConstPoolPass : public Pass {
public:
typedef Pass Base;
public:
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
DebugUtils::unused(zone, logger);
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
ConstPoolNode* globalConstPool = compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)];
if (globalConstPool) {
compiler->addAfter(globalConstPool, compiler->lastNode());
compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
}
return kErrorOk;
}
};
// BaseCompiler - Construction & Destruction
// =========================================
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kBlockOverhead),
_vRegArray(),
_constPools { nullptr, nullptr } {
_emitterType = EmitterType::kCompiler;
_validationFlags = ValidationFlags::kEnableVirtRegs;
}
BaseCompiler::~BaseCompiler() noexcept {}
// BaseCompiler - Function Management
// ==================================
Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelType::kFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
}
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
*out = funcNode;
return kErrorOk;
}
Error BaseCompiler::addFuncNode(FuncNode** out, const FuncSignature& signature) {
State state = _grabState();
ASMJIT_PROPAGATE(newFuncNode(out, signature));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addFunc(*out);
return kErrorOk;
}
Error BaseCompiler::newFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
FuncRetNode* node;
ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
ASMJIT_ASSUME(node != nullptr);
node->setOpCount(opCount);
node->setOp(0, o0);
node->setOp(1, o1);
node->resetOpRange(2, node->opCapacity());
*out = node;
return kErrorOk;
}
Error BaseCompiler::addFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
State state = _grabState();
ASMJIT_PROPAGATE(newFuncRetNode(out, o0, o1));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addNode(*out);
return kErrorOk;
}
FuncNode* BaseCompiler::addFunc(FuncNode* func) {
_func = func;
addNode(func); // Function node.
BaseNode* prev = cursor(); // {CURSOR}.
addNode(func->exitNode()); // Function exit label.
addNode(func->endNode()); // Function end sentinel.
_setCursor(prev);
return func;
}
Error BaseCompiler::endFunc() {
FuncNode* func = _func;
resetState();
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
// Add the local constant pool at the end of the function (if exists).
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
if (localConstPool) {
setCursor(func->endNode()->prev());
addNode(localConstPool);
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
}
// Mark as finished.
_func = nullptr;
SentinelNode* end = func->endNode();
setCursor(end);
return kErrorOk;
}
// BaseCompiler - Function Invocation
// ==================================
Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, InstOptions::kNone));
node->setOpCount(1);
node->setOp(0, o0);
node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
if (!node->_args)
return reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
}
*out = node;
return kErrorOk;
}
Error BaseCompiler::addInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
State state = _grabState();
ASMJIT_PROPAGATE(newInvokeNode(out, instId, o0, signature));
ASMJIT_ASSUME(*out != nullptr);
BaseBuilder_assignInstState(this, *out, state);
addNode(*out);
return kErrorOk;
}
// BaseCompiler - Virtual Registers
// ================================
static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
char buf[64];
int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
}
Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
if (ASMJIT_UNLIKELY(!vReg))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
vReg = new(vReg) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0')
vReg->_name.setData(&_dataZone, name, SIZE_MAX);
else
BaseCompiler_assignGenericName(this, vReg);
#else
DebugUtils::unused(name);
#endif
_vRegArray.appendUnsafe(vReg);
*out = vReg;
return kErrorOk;
}
Error BaseCompiler::_newReg(BaseReg* out, TypeId typeId, const char* name) {
OperandSignature regSignature;
out->reset();
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, TypeId typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, typeId, sb.data());
}
Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
out->reset();
OperandSignature regSignature;
TypeId typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
// NOTE: It's possible to cast one register type to another if it's the same register group. However, VirtReg
// always contains the TypeId that was used to create the register. This means that in some cases we may end
// up having different size of `ref` and `vRef`. In such case we adjust the TypeId to match the `ref` register
// type instead of the original register type, which should be the expected behavior.
uint32_t typeSize = TypeUtils::sizeOf(typeId);
uint32_t refSize = ref.size();
if (typeSize != refSize) {
if (TypeUtils::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = TypeId(uint32_t(TypeId::kInt8 ) | (uint32_t(typeId) & 1)); break;
case 2: typeId = TypeId(uint32_t(TypeId::kInt16) | (uint32_t(typeId) & 1)); break;
case 4: typeId = TypeId(uint32_t(TypeId::kInt32) | (uint32_t(typeId) & 1)); break;
case 8: typeId = TypeId(uint32_t(TypeId::kInt64) | (uint32_t(typeId) & 1)); break;
default: typeId = TypeId::kVoid; break;
}
}
else if (TypeUtils::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = TypeId::kMmx64;
}
else if (TypeUtils::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
}
}
else {
// Vector register - change TypeId to match `ref` size, keep vector metadata.
TypeId scalarTypeId = TypeUtils::scalarOf(typeId);
switch (refSize) {
case 16: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec128Start); break;
case 32: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec256Start); break;
case 64: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec512Start); break;
default: typeId = TypeId::kVoid; break;
}
}
if (typeId == TypeId::kVoid)
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ArchTraits::byArch(arch()).regTypeToTypeId(ref.type());
}
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, ref, sb.data());
}
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (size == 0)
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment == 0)
alignment = 1;
if (!Support::isPowerOf2(alignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment > 64)
alignment = 64;
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name));
ASMJIT_ASSUME(vReg != nullptr);
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_gpSignature.regType()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
vReg->id(), 0, 0);
return kErrorOk;
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId))
return DebugUtils::errored(kErrorInvalidVirtId);
if (newAlignment && !Support::isPowerOf2(newAlignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (newAlignment > 64)
newAlignment = 64;
VirtReg* vReg = virtRegById(virtId);
if (newSize)
vReg->_virtSize = newSize;
if (newAlignment)
vReg->_alignment = uint8_t(newAlignment);
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
workReg->_stackSlot->_alignment = vReg->_alignment;
}
return kErrorOk;
}
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
out->reset();
if (uint32_t(scope) > 1)
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (!_constPools[uint32_t(scope)])
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
ConstPoolNode* pool = _constPools[uint32_t(scope)];
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) |
OperandSignature::fromSize(uint32_t(size)),
pool->labelId(), 0, int32_t(off));
return kErrorOk;
}
void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) return;
if (fmt && fmt[0] != '\0') {
char buf[128];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
va_end(ap);
vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
}
else {
BaseCompiler_assignGenericName(this, vReg);
}
}
// BaseCompiler - Jump Annotations
// ===============================
Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _allocator.allocT<JumpNode>();
uint32_t opCount = 1;
*out = node;
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
node->setOp(0, o0);
node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
return kErrorOk;
}
Error BaseCompiler::emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation) {
State state = _grabState();
JumpNode* node;
ASMJIT_PROPAGATE(newJumpNode(&node, instId, state.options, o0, annotation));
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(this, node, state.comment);
addNode(node);
return kErrorOk;
}
JumpAnnotation* BaseCompiler::newJumpAnnotation() {
if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
uint32_t id = _jumpAnnotations.size();
JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
if (!jumpAnnotation) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
_jumpAnnotations.appendUnsafe(jumpAnnotation);
return jumpAnnotation;
}
// BaseCompiler - Events
// =====================
Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
RegType nativeRegType = Environment::is32Bit(code->arch()) ? RegType::kGp32 : RegType::kGp64;
_gpSignature = archTraits.regTypeToSignature(nativeRegType);
Error err = addPassT<GlobalConstPoolPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
_vRegArray.reset();
_vRegZone.reset();
return Base::onDetach(code);
}
// FuncPass - Construction & Destruction
// =====================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
// FuncPass - Run
// ==============
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
if (!node) return kErrorOk;
do {
if (node->type() == NodeType::kFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `NodeType::kFunc`.
do {
node = node->next();
} while (node && node->type() != NodeType::kFunc);
} while (node);
return kErrorOk;
}
// [[pure virtual]]
Error FuncPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) {
DebugUtils::unused(zone, logger, func);
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

View File

@@ -1,737 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
#define ASMJIT_CORE_COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder.h"
#include "../core/constpool.h"
#include "../core/compilerdefs.h"
#include "../core/func.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
class JumpAnnotation;
class JumpNode;
class FuncNode;
class FuncRetNode;
class InvokeNode;
//! \addtogroup asmjit_compiler
//! \{
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register allocation and automatic handling of function
//! calling conventions. It was primarily designed for merging multiple parts of code into a function without worrying
//! about registers and function calling conventions.
//!
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit code generation within a single code
//! base.
//!
//! BaseCompiler is based on BaseBuilder and contains all the features it provides. It means that the code it stores
//! can be modified (removed, added, injected) and analyzed. When the code is finalized the compiler can emit the code
//! into an Assembler to translate the abstract representation into a machine code.
//!
//! Check out architecture specific compilers for more details and examples:
//!
//! - \ref x86::Compiler - X86/X64 compiler implementation.
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public:
ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base;
//! \name Members
//! \{
//! Current function.
FuncNode* _func;
//! Allocates `VirtReg` objects.
Zone _vRegZone;
//! Stores array of `VirtReg` pointers.
ZoneVector<VirtReg*> _vRegArray;
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
//! Local and global constant pools.
//!
//! Local constant pool is flushed with each function, global constant pool is flushed only by \ref finalize().
ConstPoolNode* _constPools[2];
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseCompiler` instance.
ASMJIT_API BaseCompiler() noexcept;
//! Destroys the `BaseCompiler` instance.
ASMJIT_API virtual ~BaseCompiler() noexcept;
//! \}
//! \name Function Management
//! \{
//! Creates a new \ref FuncNode.
ASMJIT_API Error newFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the instruction stream.
ASMJIT_API Error addFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error newFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the instruction stream.
ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Returns the current function.
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
FuncNode* node;
newFuncNode(&node, signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the instruction stream by using
//! the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
FuncNode* node;
addFuncNode(&node, signature);
return node;
}
//! Adds a function `node` to the instruction stream.
ASMJIT_API FuncNode* addFunc(FuncNode* ASMJIT_NONNULL(func));
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
#if !defined(ASMJIT_NO_DEPRECATED)
inline Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
//! Sets a function argument at `argIndex` to `reg`.
ASMJIT_DEPRECATED("Setting arguments through Compiler is deprecated, use FuncNode->setArg() instead")
inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
//! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
ASMJIT_DEPRECATED("Setting arguments through Compiler is deprecated, use FuncNode->setArg() instead")
inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
#endif
inline Error addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
return addFuncRetNode(&node, o0, o1);
}
//! \}
//! \name Function Invocation
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error newInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to the instruction stream.
ASMJIT_API Error addInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
//! \}
//! \name Virtual Registers
//! \{
//! Creates a new virtual register representing the given `typeId` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used by AsmJit users, use architecture
//! specific `newReg()` functionality instead or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** ASMJIT_NONNULL(out), TypeId typeId, OperandSignature signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(BaseReg* ASMJIT_NONNULL(out), TypeId typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), TypeId typeId, const char* fmt, ...);
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* name = nullptr);
//! Creates a new virtual register compatible with the provided reference register `ref`.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const BaseReg& reg) const noexcept {
return isVirtIdValid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack
//! \{
//! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
//!
//! \note `name` can be used to give the stack a name, for debugging purposes.
ASMJIT_API Error _newStack(BaseMem* ASMJIT_NONNULL(out), uint32_t size, uint32_t alignment, const char* name = nullptr);
//! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
//! Updates the stack size of a stack created by `_newStack()`.
ASMJIT_INLINE_NODEBUG Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
return setStackSize(mem.id(), newSize, newAlignment);
}
//! \}
//! \name Constants
//! \{
//! Creates a new constant of the given `scope` (see \ref ConstPoolScope).
//!
//! This function adds a constant of the given `size` to the built-in \ref ConstPool and stores the reference to that
//! constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* ASMJIT_NONNULL(out), ConstPoolScope scope, const void* data, size_t size);
//! \}
//! \name Miscellaneous
//! \{
//! Rename the given virtual register `reg` to a formatted string `fmt`.
ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
//! \}
//! \name Jump Annotations
//! \{
ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
ASMJIT_API Error newJumpNode(JumpNode** ASMJIT_NONNULL(out), InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables.
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! Jump annotation used to annotate jumps.
//!
//! \ref BaseCompiler allows to emit jumps where the target is either register or memory operand. Such jumps cannot be
//! trivially inspected, so instead of doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then uses the annotation to construct control-flow, which is then used by liveness analysis and
//! other tools to prepare ground for register allocation.
class JumpAnnotation {
public:
ASMJIT_NONCOPYABLE(JumpAnnotation)
//! \name Members
//! \{
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
uint32_t _annotationId;
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG JumpAnnotation(BaseCompiler* ASMJIT_NONNULL(compiler), uint32_t annotationId) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
//! \}
//! \name Accessors
//! \{
//! Returns the compiler that owns this JumpAnnotation.
ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump.
ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! \}
//! \name Annotation Building API
//! \{
//! Adds the `label` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
//! \}
};
//! Jump instruction with \ref JumpAnnotation.
//!
//! \note This node should be only used to represent jump where the jump target cannot be deduced by examining
//! instruction operands. For example if the jump target is register or memory location. This pattern is often
//! used to perform indirect jumps that use jump table, e.g. to implement `switch{}` statement.
class JumpNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(JumpNode)
//! \name Members
//! \{
JumpAnnotation* _annotation;
//! \}
//! \name Construction & Destruction
//! \{
inline JumpNode(BaseCompiler* ASMJIT_NONNULL(cc), InstId instId, InstOptions options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNode(cc, instId, options, opCount, kBaseOpCapacity),
_annotation(annotation) {
setType(NodeType::kJump);
}
//! \}
//! \name Accessors
//! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
//! \}
};
//! Function node represents a function used by \ref BaseCompiler.
//!
//! A function is composed of the following:
//!
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit. To get the entry, simply use
//! \ref FuncNode::label(), which is the same as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A helper function
//! \ref FuncNode::exitLabel() exists and returns an exit label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of a function - there should be no
//! code that belongs to the function after this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::argPacks().
//!
//! In a node list, the function and its body looks like the following:
//!
//! \code{.unparsed}
//! [...] - Anything before the function.
//!
//! [FuncNode] - Entry point of the function, acts as a label as well.
//! <Prolog> - Prolog inserted by the register allocator.
//! {...} - Function body - user code basically.
//! [ExitLabel] - Exit label
//! <Epilog> - Epilog inserted by the register allocator.
//! <Return> - Return inserted by the register allocator.
//! {...} - Can contain data or user code (error handling, special cases, ...).
//! [FuncEnd] - End sentinel
//!
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the instruction stream by \ref BaseCompiler::addFunc() it actually inserts 3 nodes
//! (FuncNode, ExitLabel, and FuncEnd) and sets the current cursor to be FuncNode. When \ref BaseCompiler::endFunc()
//! is called the cursor is set to FuncEnd. This guarantees that user can use ExitLabel as a marker after additional
//! code or data can be placed, which is a common practice.
class FuncNode : public LabelNode {
public:
ASMJIT_NONCOPYABLE(FuncNode)
//! Arguments pack.
struct ArgPack {
RegOnly _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
inline RegOnly& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline const RegOnly& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function frame.
FuncFrame _frame;
//! Function exit label.
LabelNode* _exitNode;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
ArgPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create a new `FuncNode`.
inline FuncNode(BaseBuilder* ASMJIT_NONNULL(cb)) noexcept
: LabelNode(cb),
_funcDetail(),
_frame(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr) {
setType(NodeType::kFunc);
}
//! \}
//! \{
//! \name Accessors
//! Returns function exit `LabelNode`.
ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label.
ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel node.
ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; }
//! Returns function detail.
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function detail.
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame.
ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; }
//! Returns function attributes.
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
//! Returns arguments count.
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; }
//! Tests whether the function has a return value.
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns argument pack at `argIndex`.
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, const BaseReg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, size_t valueIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex].reset();
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].reset();
}
//! \}
};
//! Function return, used by \ref BaseCompiler.
class FuncRetNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(FuncRetNode)
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode(BaseBuilder* ASMJIT_NONNULL(cb)) noexcept : InstNode(cb, BaseInst::kIdAbstract, InstOptions::kNone, 0) {
_any._nodeType = NodeType::kFuncRet;
}
//! \}
};
//! Function invocation, used by \ref BaseCompiler.
class InvokeNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(InvokeNode)
//! Operand pack provides multiple operands that can be associated with a single return value of function
//! argument. Sometims this is necessary to express an argument or return value that requires multiple
//! registers, for example 64-bit value in 32-bit mode or passing / returning homogeneous data structures.
struct OperandPack {
//! Operands.
Operand_ _data[Globals::kMaxValuePack];
//! Reset the pack by resetting all operands in the pack.
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
//! Returns an operand at the given `valueIndex`.
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function return value(s).
OperandPack _rets;
//! Function arguments.
OperandPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(BaseBuilder* ASMJIT_NONNULL(cb), InstId instId, InstOptions options) noexcept
: InstNode(cb, instId, options, kBaseOpCapacity),
_funcDetail(),
_args(nullptr) {
setType(NodeType::kInvoke);
_resetOps();
_rets.reset();
addFlags(NodeFlags::kIsRemovable);
}
//! \}
//! \name Accessors
//! \{
//! Sets the function signature.
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
}
//! Returns the function detail.
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail.
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand.
ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return _opArray[0].as<Operand>(); }
//! \overload
ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
//! Returns the number of function return values.
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments.
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s).
ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s).
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! \overload
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Returns a function argument at the given `argIndex`.
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! \overload
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! Sets the function return value at `i` to `op`.
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
//! Sets the function argument at `i` to `op`.
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = op;
}
//! Sets the function return value at `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setRet(size_t valueIndex, const BaseReg& reg) noexcept { _setRet(valueIndex, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const BaseReg& reg) noexcept { _setArg(argIndex, 0, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
//! \}
};
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
typedef Pass Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API FuncPass(const char* name) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the associated `BaseCompiler`.
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \}
//! \name Pass Interface
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
ASMJIT_API Error run(Zone* zone, Logger* logger) override;
//! Called once per `FuncNode`.
ASMJIT_API virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) = 0;
//! \}
};
#if !defined(ASMJIT_NO_DEPRECATED)
inline Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
func->setArg(argIndex, valueIndex, reg);
return kErrorOk;
}
#endif
//! \}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_COMPILER_H_INCLUDED

View File

@@ -1,171 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
//! Virtual register data, managed by \ref BaseCompiler.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! \name Members
//! \{
//! Virtual register signature.
OperandSignature _signature {};
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register size (can be smaller than `_signature._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
TypeId _typeId = TypeId::kVoid;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
uint8_t _isFixed : 1;
//! True if the virtual register is only used as a stack (never accessed as register).
uint8_t _isStack : 1;
//! True if this virtual register has assigned stack offset (can be only valid after register allocation pass).
uint8_t _hasStackSlot : 1;
uint8_t _reservedBits : 5;
//! Stack offset assigned by the register allocator relative to stack pointer (can be negative as well).
int32_t _stackOffset = 0;
//! Reserved for future use (padding).
uint32_t _reservedU32 = 0;
//! Virtual register name (user provided or automatically generated).
ZoneString<16> _name {};
// The following members are used exclusively by RAPass. They are initialized when the VirtReg is created to
// null pointers and then changed during RAPass execution. RAPass sets them back to NULL before it returns.
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG VirtReg(OperandSignature signature, uint32_t id, uint32_t virtSize, uint32_t alignment, TypeId typeId) noexcept
: _signature(signature),
_id(id),
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(typeId),
_isFixed(0),
_isStack(0),
_hasStackSlot(0),
_reservedBits(0) {}
//! \}
//! \name Accessors
//! \{
//! Returns the virtual register id.
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register signature of this virtual register.
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well).
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
//! Returns a virtual register group (maps to the physical register group as well).
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`.
ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id.
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions.
ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); }
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//!
//! \note It's an error if a stack is accessed as a register.
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); }
//! Tests whether this virtual register (or stack) has assigned a stack offset.
//!
//! If this is a virtual register that was never allocated on stack, it would return false, otherwise if
//! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true.
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); }
//! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true.
ASMJIT_INLINE_NODEBUG void assignStackSlot(int32_t stackOffset) noexcept {
_hasStackSlot = 1;
_stackOffset = stackOffset;
}
//! Returns a stack offset associated with a virtual register or explicit stack allocation.
//!
//! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return
//! value will be zero when the stack offset was not assigned.
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; }
//! Returns an associated RAWorkReg with this virtual register (only valid during register allocation).
ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; }
//! Associates a RAWorkReg with this virtual register (used by register allocator).
ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
//! Reset the RAWorkReg association (used by register allocator).
ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED

View File

@@ -1,370 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/constpool.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ConstPool - Construction & Destruction
// ======================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ConstPool - Reset
// =================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
_minItemSize = 0;
}
// ConstPool - Operations
// ======================
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap)
return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
}
static inline void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
ASMJIT_ASSERT(size > 0);
while (size > 0) {
size_t gapIndex;
size_t gapSize;
if (size >= 32 && Support::isAligned<size_t>(offset, 32)) {
gapIndex = ConstPool::kIndex32;
gapSize = 32;
}
else if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
}
else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapSize = 8;
}
else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapSize = 4;
}
else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapSize = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapSize = 1;
}
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap)
return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_size = gapSize;
offset += gapSize;
size -= gapSize;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 64)
treeIndex = kIndex64;
else if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return DebugUtils::errored(kErrorInvalidArgument);
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapSize = gap->_size;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0)
ConstPool_addGap(this, gapOffset, gapSize);
}
gapIndex++;
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorOutOfMemory);
_tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern. We stop at size 4,
// it probably doesn't make sense to split constants down to 1 byte.
size_t pCount = 1;
size_t smallerSize = size;
while (smallerSize > 4) {
pCount <<= 1;
smallerSize >>= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += smallerSize) {
node = _tree[treeIndex].get(pData);
if (node) continue;
node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true);
_tree[treeIndex].insert(node);
}
}
if (_minItemSize == 0)
_minItemSize = size;
else
_minItemSize = Support::min(_minItemSize, size);
return kErrorOk;
}
// ConstPool - Reset
// =================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared)
memcpy(_dst + node->_offset, node->data(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].forEach(filler);
filler._dataSize <<= 1;
}
}
// ConstPool - Tests
// =================
#if defined(ASMJIT_TEST)
UNIT(const_pool) {
Zone zone(32384 - Zone::kBlockOverhead);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
INFO("Adding %u constants to the pool", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = 0x0101010101010101u;
EXPECT_EQ(pool.add(&c, 8, prevOffset), kErrorOk);
EXPECT_EQ(prevOffset, 0u);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 8, curOffset), kErrorOk);
EXPECT_EQ(prevOffset + 8, curOffset);
EXPECT_EQ(pool.size(), (i + 1) * 8);
prevOffset = curOffset;
}
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Retrieving %u constants from the pool", kCount);
{
uint64_t c = 0x0101010101010101u;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(offset, i * 8);
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101u;
size_t offset;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(offset, 0u);
// NOTE: We have to adjust the offset to successfully test this on big endian architectures.
size_t baseOffset = size_t(ASMJIT_ARCH_BE ? 4 : 0);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(offset, baseOffset + i * 8);
}
}
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8);
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = 0xFFFFFFFFFFFFFFFFu;
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8 + 8u);
}
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(offset, kCount * 8 + 2);
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
EXPECT_EQ(pool.size(), 0u);
EXPECT_EQ(pool.alignment(), 0u);
}
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT_EQ(pool.size(), 1u);
EXPECT_EQ(pool.alignment(), 1u);
EXPECT_EQ(offset, 0u);
pool.add(bytes, 2, offset);
EXPECT_EQ(pool.size(), 4u);
EXPECT_EQ(pool.alignment(), 2u);
EXPECT_EQ(offset, 2u);
pool.add(bytes, 4, offset);
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 4, offset);
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 32, offset);
EXPECT_EQ(pool.size(), 64u);
EXPECT_EQ(pool.alignment(), 32u);
EXPECT_EQ(offset, 32u);
}
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -1,250 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
//! Constant pool scope.
enum class ConstPoolScope : uint32_t {
//! Local constant, always embedded right after the current function.
kLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kGlobal = 1,
//! Maximum value of `ConstPoolScope`.
kMaxValue = kGlobal
};
//! Constant pool.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
//! \cond INTERNAL
//! Index of a given size in const-pool table.
enum Index : uint32_t {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndex64 = 6,
kIndexCount = 7
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
struct Gap {
//! Pointer to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _size;
};
//! Zone-allocated const-pool node.
class Node : public ZoneTreeNodeT<Node> {
public:
ASMJIT_NONCOPYABLE(Node)
//! If this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
ASMJIT_INLINE_NODEBUG Node(size_t offset, bool shared) noexcept
: ZoneTreeNodeT<Node>(),
_shared(shared),
_offset(uint32_t(offset)) {}
ASMJIT_INLINE_NODEBUG void* data() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
};
//! Data comparer used internally.
class Compare {
public:
size_t _dataSize;
ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
}
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
}
};
//! Zone-allocated const-pool tree.
struct Tree {
//! RB tree.
ZoneTree<Node> _tree;
//! Size of the tree (number of nodes).
size_t _size;
//! Size of the data.
size_t _dataSize;
ASMJIT_INLINE_NODEBUG explicit Tree(size_t dataSize = 0) noexcept
: _tree(),
_size(0),
_dataSize(dataSize) {}
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_tree.reset();
_size = 0;
}
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(empty());
_dataSize = dataSize;
}
ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
return _tree.get(data, cmp);
}
ASMJIT_INLINE_NODEBUG void insert(Node* node) noexcept {
Compare cmp(_dataSize);
_tree.insert(node, cmp);
_size++;
}
template<typename Visitor>
inline void forEach(Visitor& visitor) const noexcept {
Node* node = _tree.root();
if (!node) return;
Node* stack[Globals::kMaxTreeHeight];
size_t top = 0;
for (;;) {
Node* left = node->left();
if (left != nullptr) {
ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
stack[top++] = node;
node = left;
continue;
}
for (;;) {
visitor(node);
node = node->right();
if (node != nullptr)
break;
if (top == 0)
return;
node = stack[--top];
}
}
}
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (ASMJIT_UNLIKELY(!node)) return nullptr;
node = new(node) Node(offset, shared);
memcpy(node->data(), data, size);
return node;
}
};
//! \endcond
//! \name Members
//! \{
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
//! Size of the pool (in bytes).
size_t _size;
//! Required pool alignment.
size_t _alignment;
//! Minimum item size in the pool.
size_t _minItemSize;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
ASMJIT_API void reset(Zone* zone) noexcept;
//! \}
//! \name Accessors
//! \{
//! Tests whether the constant-pool is empty.
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns minimum alignment.
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; }
//! Returns the minimum size of all items added to the constant pool.
ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; }
//! \}
//! \name Utilities
//! \{
//! Adds a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. The constant is added to the pool only
//! if it doesn't not exist, otherwise cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add 8-byte constant 0x1122334455667788 it
//! will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used frequently. However, AsmJit is not
//! able to reallocate a constant that has been already added. For example if you try to add 4-byte constant and
//! then 8-byte constant having the same 4-byte pattern as the previous one, two independent slots will be used.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
//! Fills the destination with the content of this constant pool.
ASMJIT_API void fill(void* dst) const noexcept;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,341 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/emithelper_p.h"
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/radefs_p.h"
// Can be used for debugging...
// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
ASMJIT_BEGIN_NAMESPACE
// BaseEmitHelper - Formatting
// ===========================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
if (value.isIndirect())
sb.append('[');
if (value.isReg())
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
else if (value.isStack())
sb.appendFormat("[%d]", value.stackOffset());
else
sb.append("<none>");
if (value.isIndirect())
sb.append(']');
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var;
Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount();
for (uint32_t i = 0; i < varCount; i++) {
const Var& var = ctx.var(i);
const FuncValue& dst = var.out;
const FuncValue& cur = var.cur;
sb.appendFormat("Var%u: ", i);
dumpFuncValue(sb, arch, dst);
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
if (var.isDone())
sb.append(" {Done}");
sb.append('\n');
}
}
#endif
// BaseEmitHelper - Abstract
// =========================
Error BaseEmitHelper::emitRegMove(const Operand_& dst_, const Operand_& src_, TypeId typeId, const char* comment) {
DebugUtils::unused(dst_, src_, typeId, comment);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitHelper::emitRegSwap(const BaseReg& a, const BaseReg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitHelper::emitArgMove(const BaseReg& dst_, TypeId dstTypeId, const Operand_& src_, TypeId srcTypeId, const char* comment) {
DebugUtils::unused(dst_, dstTypeId, src_, srcTypeId, comment);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitHelper - EmitArgsAssignment
// ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var;
typedef FuncArgsContext::WorkData WorkData;
enum WorkFlags : uint32_t {
kWorkNone = 0x00,
kWorkDidSome = 0x01,
kWorkPending = 0x02,
kWorkPostponed = 0x04
};
Arch arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
RAConstraints constraints;
FuncArgsContext ctx;
ASMJIT_PROPAGATE(constraints.init(arch));
ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
{
String sb;
dumpAssignment(sb, ctx);
printf("%s\n", sb.data());
}
#endif
auto& workData = ctx._workData;
uint32_t varCount = ctx._varCount;
uint32_t saVarId = ctx._saVarId;
BaseReg sp = BaseReg(_emitter->_gpSignature, archTraits.spRegId());
BaseReg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP())
sa.setId(archTraits.fpRegId());
else
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
if (ctx._stackDstMask) {
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem baseStackPtr(sp, 0);
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (!var.out.isStack())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
ASMJIT_ASSERT(cur.isReg() || cur.isStack());
BaseReg reg;
BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
if (cur.isIndirect()) {
if (cur.isStack()) {
// TODO: Indirect stack.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
srcStackPtr.setBaseId(cur.regId());
}
}
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[archTraits.regTypeToGroup(cur.regType())];
uint32_t regId = cur.regId();
reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), regId);
wd.unassign(varId, regId);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which register to use. In general
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid()))
return DebugUtils::errored(kErrorInvalidState);
WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorInvalidState);
uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg())
workData[RegGroup::kGp].unassign(varId, cur.regId());
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
var.markDone();
}
}
// Shuffle all registers that are currently assigned accordingly to target assignment.
uint32_t workFlags = kWorkNone;
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
RegGroup curGroup = archTraits.regTypeToGroup(cur.regType());
RegGroup outGroup = archTraits.regTypeToGroup(out.regType());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
if (curGroup != outGroup) {
// TODO: Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
WorkData& wd = workData[outGroup];
if (!wd.isAssigned(outId)) {
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
BaseReg(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
wd.reassign(varId, outId, curId);
cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId())
var.markDone();
workFlags |= kWorkDidSome | kWorkPending;
}
else {
uint32_t altId = wd._physToVarId[outId];
Var& altVar = ctx._vars[altId];
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16))
highestType = RegType::kGp32;
OperandSignature signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE(
emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
if (altVar.out.isInitialized())
altVar.markDone();
workFlags |= kWorkDidSome;
}
else {
// If there is a scratch register it can be used to perform the swap.
RegMask availableRegs = wd.availableRegs();
if (availableRegs) {
RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs)
availableRegs &= ~inOutRegs;
outId = Support::ctz(availableRegs);
goto EmitMove;
}
else {
workFlags |= kWorkPending;
}
}
}
else {
workFlags |= kWorkPending;
}
}
}
}
if (!(workFlags & kWorkPending))
break;
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
return DebugUtils::errored(kErrorInvalidState);
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone())
continue;
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());
uint32_t outId = var.out.regId();
RegType outType = var.out.regType();
RegGroup group = archTraits.regTypeToGroup(outType);
WorkData& wd = workData[group];
if (outId == sa.id() && group == RegGroup::kGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
continue;
}
wd.unassign(wd._physToVarId[outId], outId);
}
BaseReg dstReg = BaseReg(archTraits.regTypeToSignature(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
ASMJIT_PROPAGATE(emitArgMove(
dstReg, var.out.typeId(),
srcMem, var.cur.typeId()));
wd.assign(varId, outId);
var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
}
}
}
}
return kErrorOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -1,58 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
BaseEmitter* _emitter;
ASMJIT_INLINE_NODEBUG explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
: _emitter(emitter) {}
ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; }
ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or between a register and its home
//! slot. This function does not handle register conversion.
virtual Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr) = 0;
//! Emits swap between two registers.
virtual Error emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment = nullptr) = 0;
//! Emits move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to another, and from one register type
//! to another, if it's possible. Any attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) = 0;
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITHELPER_P_H_INCLUDED

View File

@@ -1,407 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// BaseEmitter - Construction & Destruction
// ========================================
BaseEmitter::BaseEmitter(EmitterType emitterType) noexcept
: _emitterType(emitterType) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(EmitterFlags::kDestroyed);
_code->detach(this);
}
}
// BaseEmitter - Finalize
// ======================
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
}
// BaseEmitter - Internals
// =======================
static constexpr EmitterFlags kEmitterPreservedFlags = EmitterFlags::kOwnLogger | EmitterFlags::kOwnErrorHandler;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool emitComments = false;
bool hasDiagnosticOptions = false;
if (self->emitterType() == EmitterType::kAssembler) {
// Assembler: Don't emit comments if logger is not attached.
emitComments = self->_code != nullptr && self->_logger != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateAssembler);
}
else {
// Builder/Compiler: Always emit comments, we cannot assume they won't be used.
emitComments = self->_code != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
}
if (emitComments)
self->_addEmitterFlags(EmitterFlags::kLogComments);
else
self->_clearEmitterFlags(EmitterFlags::kLogComments);
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions)
self->_forcedInstOptions |= InstOptions::kReserved;
else
self->_forcedInstOptions &= ~InstOptions::kReserved;
}
// BaseEmitter - Diagnostic Options
// ================================
void BaseEmitter::addDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions |= options;
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions &= ~options;
BaseEmitter_updateForcedOptions(this);
}
// BaseEmitter - Logging
// =====================
void BaseEmitter::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(EmitterFlags::kOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnLogger);
if (_code)
_logger = _code->logger();
}
BaseEmitter_updateForcedOptions(this);
#else
DebugUtils::unused(logger);
#endif
}
// BaseEmitter - Error Handling
// ============================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(EmitterFlags::kOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
if (_code)
_errorHandler = _code->errorHandler();
}
}
Error BaseEmitter::reportError(Error err, const char* message) {
ErrorHandler* eh = _errorHandler;
if (eh) {
if (!message)
message = DebugUtils::errorAsString(err);
eh->handleError(err, message, this);
}
return err;
}
// BaseEmitter - Sections
// ======================
// [[pure virtual]]
Error BaseEmitter::section(Section* section) {
DebugUtils::unused(section);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Labels
// ====================
// [[pure virtual]]
Label BaseEmitter::newLabel() {
return Label(Globals::kInvalidId);
}
// [[pure virtual]]
Label BaseEmitter::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
DebugUtils::unused(name, nameSize, type, parentId);
return Label(Globals::kInvalidId);
}
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : Globals::kInvalidId);
}
// [[pure virtual]]
Error BaseEmitter::bind(const Label& label) {
DebugUtils::unused(label);
return DebugUtils::errored(kErrorInvalidState);
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
}
// BaseEmitter - Emit (Low-Level)
// ==============================
using EmitterUtils::noExt;
Error BaseEmitter::_emitI(InstId instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
}
// [[pure virtual]]
Error BaseEmitter::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) {
DebugUtils::unused(instId, o0, o1, o2, oExt);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitter::_emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
const Operand_* op = operands;
Operand_ opExt[3];
switch (opCount) {
case 0:
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
case 1:
return _emit(instId, op[0], noExt[1], noExt[2], noExt);
case 2:
return _emit(instId, op[0], op[1], noExt[2], noExt);
case 3:
return _emit(instId, op[0], op[1], op[2], noExt);
case 4:
opExt[0] = op[3];
opExt[1].reset();
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 5:
opExt[0] = op[3];
opExt[1] = op[4];
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 6:
return _emit(instId, op[0], op[1], op[2], op + 3);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// BaseEmitter - Emit Utilities
// ============================
Error BaseEmitter::emitProlog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitProlog(this, frame);
}
Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitEpilog(this, frame);
}
Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return _funcs.emitArgsAssignment(this, frame, args);
}
// BaseEmitter - Align
// ===================
// [[pure virtual]]
Error BaseEmitter::align(AlignMode alignMode, uint32_t alignment) {
DebugUtils::unused(alignMode, alignment);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Embed
// ===================
// [[pure virtual]]
Error BaseEmitter::embed(const void* data, size_t dataSize) {
DebugUtils::unused(data, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
DebugUtils::unused(typeId, data, itemCount, repeatCount);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabel(const Label& label, size_t dataSize) {
DebugUtils::unused(label, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
DebugUtils::unused(label, base, dataSize);
return DebugUtils::errored(kErrorInvalidState);
}
// BaseEmitter - Comment
// =====================
// [[pure virtual]]
Error comment(const char* data, size_t size = SIZE_MAX) {
DebugUtils::unused(data, size);
return DebugUtils::errored(kErrorInvalidState);
}
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
va_list ap;
va_start(ap, fmt);
Error err = sb.appendVFormat(fmt, ap);
va_end(ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt);
return kErrorOk;
#endif
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
Error err = sb.appendVFormat(fmt, ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt, ap);
return kErrorOk;
#endif
}
// BaseEmitter - Events
// ====================
Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
_code = code;
_environment = code->environment();
_addEmitterFlags(EmitterFlags::kAttached);
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
RegType nativeRegType = Environment::is32Bit(code->arch()) ? RegType::kGp32 : RegType::kGp64;
_gpSignature = archTraits.regTypeToSignature(nativeRegType);
onSettingsUpdated();
return kErrorOk;
}
Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
DebugUtils::unused(code);
if (!hasOwnLogger())
_logger = nullptr;
if (!hasOwnErrorHandler())
_errorHandler = nullptr;
_clearEmitterFlags(~kEmitterPreservedFlags);
_forcedInstOptions = InstOptions::kReserved;
_privateData = 0;
_environment.reset();
_gpSignature.reset();
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;
return kErrorOk;
}
void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger())
_logger = _code->logger();
if (!hasOwnErrorHandler())
_errorHandler = _code->errorHandler();
BaseEmitter_updateForcedOptions(this);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,765 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
#define ASMJIT_CORE_EMITTER_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/codeholder.h"
#include "../core/formatter.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
class ConstPool;
class FuncFrame;
class FuncArgsAssignment;
//! Align mode, used by \ref BaseEmitter::align().
enum class AlignMode : uint8_t {
//! Align executable code.
kCode = 0,
//! Align non-executable code.
kData = 1,
//! Align by a sequence of zeros.
kZero = 2,
//! Maximum value of `AlignMode`.
kMaxValue = kZero
};
//! Emitter type used by \ref BaseEmitter.
enum class EmitterType : uint8_t {
//! Unknown or uninitialized.
kNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kCompiler = 3,
//! Maximum value of `EmitterType`.
kMaxValue = kCompiler
};
//! Emitter flags, used by \ref BaseEmitter.
enum class EmitterFlags : uint8_t {
//! No flags.
kNone = 0u,
//! Emitter is attached to CodeHolder.
kAttached = 0x01u,
//! The emitter must emit comments.
kLogComments = 0x08u,
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFinalized = 0x40u,
//! The emitter was destroyed.
//!
//! This flag is used for a very short time when an emitter is being destroyed by
//! CodeHolder.
kDestroyed = 0x80u
};
ASMJIT_DEFINE_ENUM_FLAGS(EmitterFlags)
//! Encoding options.
enum class EncodingOptions : uint32_t {
//! No encoding options.
kNone = 0,
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions if possible into operation equivalent
//! instructions that take less bytes by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm` and `and r32, imm` when the
//! immediate constant is lesser than `2^31`.
kOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90) opcode that is often shown by disassemblers
//! as NOP. However there are more optimized align sequences for 2-11 bytes that may execute faster on certain CPUs.
//! If this feature is enabled AsmJit will generate specialized sequences for alignment between 2 to 11 bytes.
kOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the jump is backward it is usually predicted as
//! taken; and if the jump is forward it is usually predicted as not-taken. The reason is that loops generally use
//! backward jumps and conditions usually use forward jumps. However this behavior can be overridden by using
//! instruction prefixes. If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that used to take into consideration prediction
//! hints was P4. Newer processors implement heuristics for branch prediction and ignore static hints. This means
//! that this feature can be only used for annotation purposes.
kPredictedJumps = 0x00000010u
};
ASMJIT_DEFINE_ENUM_FLAGS(EncodingOptions)
//! Diagnostic options are used to tell emitters and their passes to perform diagnostics when emitting or processing
//! user code. These options control validation and extra diagnostics that can be performed by higher level emitters.
//!
//! Instruction Validation
//! ----------------------
//!
//! \ref BaseAssembler implementation perform by default only basic checks that are necessary to identify all
//! variations of an instruction so the correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down. However, sometimes these checks are
//! beneficial especially when the project that uses AsmJit is in a development phase, in which mistakes happen
//! often. To make the experience of using AsmJit seamless it offers validation features that can be controlled
//! by \ref DiagnosticOptions.
//!
//! Compiler Diagnostics
//! --------------------
//!
//! Diagnostic options work with \ref BaseCompiler passes (precisely with its register allocation pass). These options
//! can be used to enable logging of all operations that the Compiler does.
enum class DiagnosticOptions : uint32_t {
//! No validation options.
kNone = 0,
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded into a binary representation. This flag
//! is only relevant for \ref BaseAssembler implementations, but can be set in any other emitter type, in that case
//! if that emitter needs to create an assembler on its own, for the purpose of \ref BaseEmitter::finalize() it
//! would propagate this flag to such assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidateAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref InstNode representing the instruction is
//! created by \ref BaseBuilder or \ref BaseCompiler. This option could be more useful than \ref kValidateAssembler
//! in cases in which there is an invalid instruction passed to an assembler, which was invalid much earlier, most
//! likely when such instruction was passed to Builder/Compiler.
//!
//! This is a separate option that was introduced, because it's possible to manipulate the instruction stream
//! emitted by \ref BaseBuilder and \ref BaseCompiler - this means that it's allowed to emit invalid instructions
//! (for example with missing operands) that will be fixed later before finalizing it.
//!
//! Default: false.
kValidateIntermediate = 0x00000002u,
//! Annotate all nodes processed by register allocator (Compiler/RA).
//!
//! \note Annotations don't need debug options, however, some debug options like `kRADebugLiveness` may influence
//! their output (for example the mentioned option would add liveness information to per-instruction annotation).
kRAAnnotate = 0x00000080u,
//! Debug CFG generation and other related algorithms / operations (Compiler/RA).
kRADebugCFG = 0x00000100u,
//! Debug liveness analysis (Compiler/RA).
kRADebugLiveness = 0x00000200u,
//! Debug register allocation assignment (Compiler/RA).
kRADebugAssignment = 0x00000400u,
//! Debug the removal of code part of unreachable blocks.
kRADebugUnreachable = 0x00000800u,
//! Enable all debug options (Compiler/RA).
kRADebugAll = 0x0000FF00u,
};
ASMJIT_DEFINE_ENUM_FLAGS(DiagnosticOptions)
//! Provides a base foundation to emitting code - specialized by \ref BaseAssembler and \ref BaseBuilder.
class ASMJIT_VIRTAPI BaseEmitter {
public:
ASMJIT_BASE_CLASS(BaseEmitter)
//! \name Members
//! \{
//! See \ref EmitterType.
EmitterType _emitterType = EmitterType::kNone;
//! See \ref EmitterFlags.
EmitterFlags _emitterFlags = EmitterFlags::kNone;
//! Validation flags in case validation is used.
//!
//! \note Validation flags are specific to the emitter and they are setup at construction time and then never
//! changed.
ValidationFlags _validationFlags = ValidationFlags::kNone;
//! Validation options.
DiagnosticOptions _diagnosticOptions = DiagnosticOptions::kNone;
//! All supported architectures in a bit-mask, where LSB is the bit with a zero index.
uint64_t _archMask = 0;
//! Encoding options.
EncodingOptions _encodingOptions = EncodingOptions::kNone;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
InstOptions _forcedInstOptions = InstOptions::kReserved;
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
//! CodeHolder the emitter is attached to.
CodeHolder* _code = nullptr;
//! Attached \ref Logger.
Logger* _logger = nullptr;
//! Attached \ref ErrorHandler.
ErrorHandler* _errorHandler = nullptr;
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature and signature related information.
OperandSignature _gpSignature {};
//! Emitter state that can be used to specify options and inline comment of a next node or instruction.
struct State {
InstOptions options;
RegOnly extraReg;
const char* comment;
};
//! Next instruction options (affects the next instruction).
InstOptions _instOptions = InstOptions::kNone;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
//! Function callbacks used by emitter implementation.
//!
//! These are typically shared between Assembler/Builder/Compiler of a single backend.
struct Funcs {
typedef Error (ASMJIT_CDECL* EmitProlog)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitEpilog)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitArgsAssignment)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
typedef Error (ASMJIT_CDECL* FormatInstruction)(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) ASMJIT_NOEXCEPT_TYPE;
typedef Error (ASMJIT_CDECL* ValidateFunc)(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) ASMJIT_NOEXCEPT_TYPE;
//! Emit prolog implementation.
EmitProlog emitProlog;
//! Emit epilog implementation.
EmitEpilog emitEpilog;
//! Emit arguments assignment implementation.
EmitArgsAssignment emitArgsAssignment;
//! Instruction formatter implementation.
FormatInstruction formatInstruction;
//! Instruction validation implementation.
ValidateFunc validate;
//! Resets all functions to nullptr.
ASMJIT_INLINE_NODEBUG void reset() noexcept {
emitProlog = nullptr;
emitEpilog = nullptr;
emitArgsAssignment = nullptr;
validate = nullptr;
}
};
Funcs _funcs {};
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(EmitterType emitterType) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
//! \name Cast
//! \{
template<typename T>
ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T>
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \}
//! \name Emitter Type & Flags
//! \{
//! Returns the type of this emitter, see `EmitterType`.
ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`.
ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
//! Tests whether the emitter is finalized.
ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
ASMJIT_INLINE_NODEBUG void _addEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags |= flags; }
ASMJIT_INLINE_NODEBUG void _clearEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags &= _emitterFlags & ~flags; }
//! \}
//! \name Target Information
//! \{
//! Returns the CodeHolder this emitter is attached to.
ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit.
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit.
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! \}
//! \name Initialization & Finalization
//! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
//!
//! Materializes the content of the emitter by serializing it to the attached \ref CodeHolder through an architecture
//! specific \ref BaseAssembler. This function won't do anything if the emitter inherits from \ref BaseAssembler as
//! assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder. However, if this is an emitter that
//! inherits from \ref BaseBuilder or \ref BaseCompiler then these emitters need the materialization phase as they
//! store their content in a representation not visible to \ref CodeHolder.
ASMJIT_API virtual Error finalize();
//! \}
//! \name Logging
//! \{
//! Tests whether the emitter has a logger.
ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to.
ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter
//! is attached to.
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered emitter's own logger, see \ref
//! hasOwnLogger() for more details. If the given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this emitter is attached to, or no logger
//! at all if \ref CodeHolder doesn't have one.
ASMJIT_INLINE_NODEBUG void resetLogger() noexcept { return setLogger(nullptr); }
//! \}
//! \name Error Handling
//! \{
//! Tests whether the emitter has an error handler attached.
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to.
ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to.
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
//! Resets the error handler.
ASMJIT_INLINE_NODEBUG void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its \ref ErrorHandler::handleError() member function
//! first, and then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is simply returned.
ASMJIT_API Error reportError(Error err, const char* message = nullptr);
//! \}
//! \name Encoding Options
//! \{
//! Returns encoding options.
ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
//! Enables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void addEncodingOptions(EncodingOptions options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void clearEncodingOptions(EncodingOptions options) noexcept { _encodingOptions &= ~options; }
//! \}
//! \name Diagnostic Options
//! \{
//! Returns the emitter's diagnostic options.
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
//! Tests whether the given `option` is present in the emitter's diagnostic options.
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Activates the given diagnostic `options`.
//!
//! This function is used to activate explicit validation options that will be then used by all emitter
//! implementations. There are in general two possibilities:
//!
//! - Architecture specific assembler is used. In this case a \ref DiagnosticOptions::kValidateAssembler can be
//! used to turn on explicit validation that will be used before an instruction is emitted. This means that
//! internally an extra step will be performed to make sure that the instruction is correct. This is needed,
//! because by default assemblers prefer speed over strictness.
//!
//! This option should be used in debug builds as it's pretty expensive.
//!
//! - Architecture specific builder or compiler is used. In this case the user can turn on
//! \ref DiagnosticOptions::kValidateIntermediate option that adds explicit validation step before the Builder
//! or Compiler creates an \ref InstNode to represent an emitted instruction. Error will be returned if the
//! instruction is ill-formed. In addition, also \ref DiagnosticOptions::kValidateAssembler can be used, which
//! would not be consumed by Builder / Compiler directly, but it would be propagated to an architecture specific
//! \ref BaseAssembler implementation it creates during \ref BaseEmitter::finalize().
ASMJIT_API void addDiagnosticOptions(DiagnosticOptions options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addDiagnosticOptions() and \ref DiagnosticOptions for more details.
ASMJIT_API void clearDiagnosticOptions(DiagnosticOptions options) noexcept;
//! \}
//! \name Instruction Options
//! \{
//! Returns forced instruction options.
//!
//! Forced instruction options are merged with next instruction options before the instruction is encoded. These
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction.
ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
//! Tests whether the extra register operand is valid.
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction.
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction).
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! \}
//! \name Emitter State
//! \{
ASMJIT_INLINE_NODEBUG void resetState() noexcept {
resetInstOptions();
resetExtraReg();
resetInlineComment();
}
ASMJIT_INLINE_NODEBUG State _grabState() noexcept {
State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment};
resetState();
return s;
}
//! \}
//! \name Sections
//! \{
ASMJIT_API virtual Error section(Section* section) = 0;
//! \}
//! \name Labels
//! \{
//! Creates a new label.
ASMJIT_API virtual Label newLabel() = 0;
//! Creates a new named label.
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
//! Creates a new anonymous label with a name, which can only be used for debugging purposes.
ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
//! Creates a new external label.
ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
//! Returns `Label` by `name`.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned.
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
//!
//! \note Attempt to bind the same label multiple times will return an error.
ASMJIT_API virtual Error bind(const Label& label) = 0;
//! Tests whether the label `id` is valid (i.e. registered).
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \}
//! \name Emit
//! \{
// NOTE: These `emit()` helpers are designed to address a code-bloat generated by C++ compilers to call a function
// having many arguments. Each parameter to `_emit()` requires some code to pass it, which means that if we default
// to 5 arguments in `_emit()` and instId the C++ compiler would have to generate a virtual function call having 5
// parameters and additional `this` argument, which is quite a lot. Since by default most instructions have 2 to 3
// operands it's better to introduce helpers that pass from 0 to 6 operands that help to reduce the size of emit(...)
// function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(InstId instId);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
template<typename... Args>
ASMJIT_INLINE_NODEBUG Error emit(InstId instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
}
ASMJIT_INLINE_NODEBUG Error emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
}
ASMJIT_FORCE_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
}
//! \cond INTERNAL
//! Emits an instruction - all 6 operands must be defined.
ASMJIT_API virtual Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(InstId instId, const Operand_* operands, size_t opCount);
//! \endcond
//! \}
//! \name Emit Utilities
//! \{
ASMJIT_API Error emitProlog(const FuncFrame& frame);
ASMJIT_API Error emitEpilog(const FuncFrame& frame);
ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
//! \}
//! \name Align
//! \{
//! Aligns the current CodeBuffer position to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location and the current location depends on the
//! align `mode`, see \ref AlignMode. The `alignment` argument specifies alignment in bytes, so for example when
//! it's `32` it means that the code buffer will be aligned to `32` bytes.
ASMJIT_API virtual Error align(AlignMode alignMode, uint32_t alignment) = 0;
//! \}
//! \name Embed
//! \{
//! Embeds raw data into the \ref CodeBuffer.
ASMJIT_API virtual Error embed(const void* data, size_t dataSize) = 0;
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//!
//! - Assign a `typeId` to the data, so the emitter knows the type of items stored in `data`. Binary data should
//! use \ref TypeId::kUInt8.
//!
//! - Repeat the given data `repeatCount` times, so the data can be used as a fill pattern for example, or as a
//! pattern used by SIMD instructions.
ASMJIT_API virtual Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
//! Embeds int8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<float>::kTypeId), &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<double>::kTypeId), &value, 1, repeatCount); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using AlignMode::kData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the size of the address data. If it's zero
//! (default) the address size is deduced from the target architecture (either 4 or 8 bytes).
ASMJIT_API virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
//! Embeds a delta (distance) between the `label` and `base` calculating it as `label - base`. This function was
//! designed to make it easier to embed lookup tables where each index is a relative distance of two labels.
ASMJIT_API virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
//! \}
//! \name Comment
//! \{
//! Emits a comment stored in `data` with an optional `size` parameter.
ASMJIT_API virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
//! Emits a formatted comment specified by `fmt` and variable number of arguments.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emits a formatted comment specified by `fmt` and `ap`.
ASMJIT_API Error commentv(const char* fmt, va_list ap);
//! \}
//! \name Events
//! \{
//! Called after the emitter was attached to `CodeHolder`.
ASMJIT_API virtual Error onAttach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;
//! Called after the emitter was detached from `CodeHolder`.
ASMJIT_API virtual Error onDetach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;
//! Called when \ref CodeHolder has updated an important setting, which involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been called).
//!
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler() has been called).
//!
//! This function ensures that the settings are properly propagated from \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you do so, always call \ref
//! BaseEmitter::onSettingsUpdated() within your own implementation to ensure that the emitter is
//! in a consistent state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTER_H_INCLUDED

View File

@@ -1,127 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter_p.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept {
ASMJIT_ASSERT(binSize >= offsetSize);
const size_t kNoBinSize = SIZE_MAX;
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
char sep = ';';
size_t padding = Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
ASMJIT_PROPAGATE(sb.padEnd(padding));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
ASMJIT_PROPAGATE(sb.append(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - offsetSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', offsetSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
}
sep = '|';
padding += Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kMachineCode);
}
}
return sb.append('\n');
}
void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatFlags::kMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append(':');
finishFormattedLine(sb, logger->options(), nullptr, binSize, 0, 0, self->_inlineComment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
BaseAssembler* self,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
FormatFlags formatFlags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (Support::test(formatFlags, FormatFlags::kMachineCode))
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
logger->log(sb);
}
Error logInstructionFailed(
BaseEmitter* self,
Error err,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
self->_funcs.formatInstruction(sb, FormatFlags::kRegType, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (self->inlineComment()) {
sb.append(" ; ");
sb.append(self->inlineComment());
}
self->resetState();
return self->reportError(err, sb.data());
}
#endif
} // {EmitterUtils}
ASMJIT_END_NAMESPACE

View File

@@ -1,89 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
class BaseAssembler;
class FormatOptions;
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
//! Utilities used by various emitters, mostly Assembler implementations.
namespace EmitterUtils {
//! Default paddings used by Emitter utils and Formatter.
static constexpr Operand noExt[3] = { {}, {}, {} };
enum kOpIndex : uint32_t {
kOp3 = 0,
kOp4 = 1,
kOp5 = 2
};
static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
if (!o0.isNone()) opCount = 1;
if (!o1.isNone()) opCount = 2;
if (!o2.isNone()) opCount = 3;
}
else {
opCount = 4;
if (!opExt[kOp4].isNone()) {
opCount = 5 + uint32_t(!opExt[kOp5].isNone());
}
}
return opCount;
}
static ASMJIT_FORCE_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
dst[3].copyFrom(opExt[kOp3]);
dst[4].copyFrom(opExt[kOp4]);
dst[5].copyFrom(opExt[kOp5]);
}
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
BaseAssembler* self,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
Error logInstructionFailed(
BaseEmitter* self,
Error err,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
#endif
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED

View File

@@ -1,46 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/environment.h"
ASMJIT_BEGIN_NAMESPACE
// X86 Target
// ----------
//
// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
// stack alignment. Other operating systems are assumed to have
// 4-byte alignment by default for safety reasons.
// - 64-bit - stack must be aligned to 16 bytes.
//
// ARM Target
// ----------
//
// - 32-bit - Stack must be aligned to 8 bytes.
// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
uint32_t Environment::stackAlignment() const noexcept {
if (is64Bit()) {
// Assume 16-byte alignment on any 64-bit target.
return 16;
}
else {
// The following platforms use 16-byte alignment in 32-bit mode.
if (isPlatformLinux() ||
isPlatformBSD() ||
isPlatformApple() ||
isPlatformHaiku()) {
return 16u;
}
if (isFamilyARM())
return 8;
// Bail to 4-byte alignment if we don't know.
return 4;
}
}
ASMJIT_END_NAMESPACE

View File

@@ -1,508 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#include "../core/archtraits.h"
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided for future use, if required.
enum class Vendor : uint8_t {
//! Unknown or uninitialized platform vendor.
kUnknown = 0,
//! Maximum value of `PlatformVendor`.
kMaxValue = kUnknown,
//! Platform vendor detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Platform - runtime environment or operating system.
enum class Platform : uint8_t {
//! Unknown or uninitialized platform.
kUnknown = 0,
//! Windows OS.
kWindows,
//! Other platform that is not Windows, most likely POSIX based.
kOther,
//! Linux OS.
kLinux,
//! GNU/Hurd OS.
kHurd,
//! FreeBSD OS.
kFreeBSD,
//! OpenBSD OS.
kOpenBSD,
//! NetBSD OS.
kNetBSD,
//! DragonFly BSD OS.
kDragonFlyBSD,
//! Haiku OS.
kHaiku,
//! Apple OSX.
kOSX,
//! Apple iOS.
kIOS,
//! Apple TVOS.
kTVOS,
//! Apple WatchOS.
kWatchOS,
//! Emscripten platform.
kEmscripten,
//! Maximum value of `Platform`.
kMaxValue = kEmscripten,
//! Platform detected at compile-time (platform of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(__EMSCRIPTEN__)
kEmscripten
#elif defined(_WIN32)
kWindows
#elif defined(__linux__)
kLinux
#elif defined(__gnu_hurd__)
kHurd
#elif defined(__FreeBSD__)
kFreeBSD
#elif defined(__OpenBSD__)
kOpenBSD
#elif defined(__NetBSD__)
kNetBSD
#elif defined(__DragonFly__)
kDragonFlyBSD
#elif defined(__HAIKU__)
kHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kIOS
#else
kOther
#endif
};
//! Platform ABI (application binary interface).
enum class PlatformABI : uint8_t {
//! Unknown or uninitialied environment.
kUnknown = 0,
//! Microsoft ABI.
kMSVC,
//! GNU ABI.
kGNU,
//! Android Environment / ABI.
kAndroid,
//! Cygwin ABI.
kCygwin,
//! Maximum value of `PlatformABI`.
kMaxValue,
//! Host ABI detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(_MSC_VER)
kMSVC
#elif defined(__CYGWIN__)
kCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kGNU
#elif defined(__ANDROID__)
kAndroid
#else
kUnknown
#endif
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref ObjectFormat::kUnknown and \ref ObjectFormat::kJIT at
//! the moment. Object file formats are provided for future extensibility and a possibility to generate object
//! files at some point.
enum class ObjectFormat : uint8_t {
//! Unknown or uninitialized object format.
kUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kJIT,
//! Executable and linkable format (ELF).
kELF,
//! Common object file format.
kCOFF,
//! Extended COFF object format.
kXCOFF,
//! Mach object file format.
kMachO,
//! Maximum value of `ObjectFormat`.
kMaxValue
};
//! Represents an environment, which is usually related to a \ref Target.
//!
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is sometimes called "Triple" (historically
//! it used to be 3 only parts) or "Tuple", which is a convention used by Debian Linux.
//!
//! AsmJit doesn't support all possible combinations or architectures and ABIs, however, it models the environment
//! similarly to other compilers for future extensibility.
class Environment {
public:
//! \name Members
//! \{
//! Architecture.
Arch _arch;
//! Sub-architecture type.
SubArch _subArch;
//! Vendor type.
Vendor _vendor;
//! Platform.
Platform _platform;
//! Platform ABI.
PlatformABI _platformABI;
//! Object format.
ObjectFormat _objectFormat;
//! Reserved for future use, must be zero.
uint8_t _reserved[2];
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG Environment() noexcept :
_arch(Arch::kUnknown),
_subArch(SubArch::kUnknown),
_vendor(Vendor::kUnknown),
_platform(Platform::kUnknown),
_platformABI(PlatformABI::kUnknown),
_objectFormat(ObjectFormat::kUnknown),
_reserved { 0, 0 } {}
ASMJIT_INLINE_NODEBUG explicit Environment(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI abi = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown) noexcept {
init(arch, subArch, vendor, platform, abi, objectFormat);
}
ASMJIT_INLINE_NODEBUG Environment(const Environment& other) noexcept = default;
//! Returns the host environment constructed from preprocessor macros defined by the compiler.
//!
//! The returned environment should precisely match the target host architecture, sub-architecture, platform,
//! and ABI.
static ASMJIT_INLINE_NODEBUG Environment host() noexcept {
return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown);
}
//! \}
//! \name Overloaded Operators
//! \{
ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default;
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); }
ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the environment is not set up.
//!
//! Returns true if all members are zero, and thus unknown.
ASMJIT_INLINE_NODEBUG bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
}
//! Tests whether the environment is initialized, which means it must have
//! a valid architecture.
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept {
return _arch != Arch::kUnknown;
}
ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept {
uint64_t x;
memcpy(&x, this, 8);
return x;
}
//! Resets all members of the environment to zero / unknown.
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_arch = Arch::kUnknown;
_subArch = SubArch::kUnknown;
_vendor = Vendor::kUnknown;
_platform = Platform::kUnknown;
_platformABI = PlatformABI::kUnknown;
_objectFormat = ObjectFormat::kUnknown;
_reserved[0] = 0;
_reserved[1] = 0;
}
ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept {
return _packed() == other._packed();
}
//! Returns the architecture.
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the sub-architecture.
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns vendor.
ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system.
ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; }
//! Returns target's ABI.
ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; }
//! Returns target's object format.
ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; }
inline void init(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown) noexcept {
_arch = arch;
_subArch = subArch;
_vendor = vendor;
_platform = platform;
_platformABI = platformABI;
_objectFormat = objectFormat;
_reserved[0] = 0;
_reserved[1] = 0;
}
ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; }
ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; }
ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); }
ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); }
ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit.
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit.
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian.
ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian.
ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family.
ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is ARM, THUMB, or AArch64.
ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is AArch32 (ARM or THUMB).
ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
//! Tests whether this architecture family is AArch64.
ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether the environment platform is Windows.
ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux.
ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd.
ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku.
ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD.
ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept {
return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD ||
_platform == Platform::kNetBSD ||
_platform == Platform::kDragonFlyBSD;
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept {
return _platform == Platform::kOSX ||
_platform == Platform::kIOS ||
_platform == Platform::kTVOS ||
_platform == Platform::kWatchOS;
}
//! Tests whether the ABI is MSVC.
ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU.
ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
//! Returns a calculated stack alignment for this environment.
ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture.
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; }
//! Sets the sub-architecture to `subArch`.
ASMJIT_INLINE_NODEBUG void setSubArch(SubArch subArch) noexcept { _subArch = subArch; }
//! Sets the vendor to `vendor`.
ASMJIT_INLINE_NODEBUG void setVendor(Vendor vendor) noexcept { _vendor = vendor; }
//! Sets the platform to `platform`.
ASMJIT_INLINE_NODEBUG void setPlatform(Platform platform) noexcept { _platform = platform; }
//! Sets the ABI to `platformABI`.
ASMJIT_INLINE_NODEBUG void setPlatformABI(PlatformABI platformABI) noexcept { _platformABI = platformABI; }
//! Sets the object format to `objectFormat`.
ASMJIT_INLINE_NODEBUG void setObjectFormat(ObjectFormat objectFormat) noexcept { _objectFormat = objectFormat; }
//! \}
//! \name Static Utilities
//! \{
static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
//! Tests whether the given architecture `arch` is 32-bit.
static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
}
//! Tests whether the given architecture `arch` is 64-bit.
static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture `arch` is big endian.
static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture is Thumb or Thumb_BE.
static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
}
//! Tests whether the given architecture is ARM or ARM_BE.
static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE;
}
//! Tests whether the given architecture is AArch64 or AArch64_BE.
static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
}
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
}
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
}
//! Tests whether the given architecture family is X86 or X64.
static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64;
}
//! Tests whether the given architecture family is ARM, THUMB, or AArch64.
static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept {
return isArchARM(arch) || isArchAArch64(arch) || isArchThumb(arch);
}
//! Tests whether the given architecture family is AArch32 (ARM or THUMB).
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept {
return isArchARM(arch) || isArchThumb(arch);
}
//! Tests whether the given architecture family is AArch64.
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept {
return isArchAArch64(arch);
}
//! Tests whether the given architecture family is MISP or MIPS64.
static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch);
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
}
//! Returns a native general purpose register size from the given architecture.
static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}
//! \}
};
static_assert(sizeof(Environment) == 8,
"Environment must occupy exactly 8 bytes.");
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED

View File

@@ -1,18 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
ASMJIT_BEGIN_NAMESPACE
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
void ErrorHandler::handleError(Error err, const char* message, BaseEmitter* origin) {
DebugUtils::unused(err, message, origin);
}
ASMJIT_END_NAMESPACE

View File

@@ -1,228 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_error_handling
//! \{
class BaseEmitter;
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override \ref ErrorHandler::handleError() to implement
//! your own error handler.
//!
//! The following use-cases are supported:
//!
//! - Record the error and continue code generation. This is the simplest approach that can be used to at least log
//! possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal
//! to throw an exception from the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler, Builder and Compiler to
//! a consistent state before calling \ref handleError(), so `longjmp()` can be used without issues to cancel the
//! code generation if an error occurred. This method can be used if exception handling in your project is turned
//! off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref Zone memory and the
//! ownership of memory it allocates always ends with the instance that allocated it. If using this approach please
//! never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter, which has a priority. The example below
//! uses error handler that just prints the error, but lets AsmJit continue:
//!
//! ```
//! // Error Handling #1 - Logging and returning Error.
//! #include <asmjit/x86.h>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that just prints the error and lets AsmJit ignore it.
//! class SimpleErrorHandler : public ErrorHandler {
//! public:
//! Error err;
//!
//! inline SimpleErrorHandler() : err(kErrorOk) {}
//!
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! this->err = err;
//! fprintf(stderr, "ERROR: %s\n", message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! SimpleErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! // Try to emit instruction that doesn't exist.
//! x86::Assembler a(&code);
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//!
//! if (eh.err) {
//! // Assembler failed!
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If error happens during instruction emitting / encoding the assembler behaves transactionally - the output buffer
//! won't advance if encoding failed, thus either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors in AsmJit. The following example
//! shows how to use exception handling to handle errors in a more C++ way:
//!
//! ```
//! // Error Handling #2 - Throwing an exception.
//! #include <asmjit/x86.h>
//! #include <exception>
//! #include <string>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that throws a user-defined `AsmJitException`.
//! class AsmJitException : public std::exception {
//! public:
//! Error err;
//! std::string message;
//!
//! AsmJitException(Error err, const char* message) noexcept
//! : err(err),
//! message(message) {}
//!
//! const char* what() const noexcept override { return message.c_str(); }
//! };
//!
//! class ThrowableErrorHandler : public ErrorHandler {
//! public:
//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! throw AsmJitException(err, message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! ThrowableErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! // Try to emit instruction that doesn't exist.
//! try {
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! catch (const AsmJitException& ex) {
//! printf("EXCEPTION THROWN: %s\n", ex.what());
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If C++ exceptions are not what you like or your project turns off them completely there is still a way of reducing
//! the error handling to a minimum by using a standard setjmp/longjmp approach. AsmJit is exception-safe and cleans
//! up everything before calling the ErrorHandler, so any approach is safe. You can simply jump from the error handler
//! without causing any side-effects or memory leaks. The following example demonstrates how it could be done:
//!
//! ```
//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
//! #include <asmjit/x86.h>
//! #include <setjmp.h>
//! #include <stdio.h>
//!
//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
//! public:
//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
//!
//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! this->err = err;
//! longjmp(state, 1);
//! }
//!
//! jmp_buf state;
//! asmjit::Error err;
//! };
//!
//! int main(int argc, char* argv[]) {
//! using namespace asmjit;
//!
//! JitRuntime rt;
//! LongJmpErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! if (!setjmp(eh.state)) {
//! // Try to emit instruction that doesn't exist.
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! else {
//! Error err = eh.err;
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
//! }
//!
//! return 0;
//! }
//! ```
class ASMJIT_VIRTAPI ErrorHandler {
public:
ASMJIT_BASE_CLASS(ErrorHandler)
//! \name Construction & Destruction
//! \{
//! Creates a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroys the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
//! \}
//! \name Interface
//! \{
//! Error handler (must be reimplemented).
//!
//! Error handler is called after an error happened and before it's propagated to the caller. There are multiple
//! ways how the error handler can be used:
//!
//! 1. User-based error handling without throwing exception or using C's`longjmp()`. This is for users that don't
//! use exceptions and want customized error handling.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely exception-safe, but you can throw
//! exception from your error handler if this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `BaseEmitter` to a consistent state before
//! calling `handleError()` so `longjmp()` can be used without any issues to cancel the code generation if an
//! error occurred. There is no difference between exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you would leak memory.
ASMJIT_API virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ERRORHANDLER_H_INCLUDED

View File

@@ -1,584 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/archtraits.h"
#include "../core/builder.h"
#include "../core/codeholder.h"
#include "../core/compiler.h"
#include "../core/emitter.h"
#include "../core/formatter_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86formatter_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64formatter_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_NO_COMPILER)
class VirtReg;
#endif
namespace Formatter {
static const char wordNameTable[][8] = {
"db",
"dw",
"dd",
"dq",
"byte",
"half",
"word",
"hword",
"dword",
"qword",
"xword",
"short",
"long",
"quad"
};
Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid)
return sb.append("void");
if (!TypeUtils::isValid(typeId))
return sb.append("unknown");
const char* typeName = "unknown";
uint32_t typeSize = TypeUtils::sizeOf(typeId);
TypeId scalarType = TypeUtils::scalarOf(typeId);
switch (scalarType) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kInt8 : typeName = "int8" ; break;
case TypeId::kUInt8 : typeName = "uint8" ; break;
case TypeId::kInt16 : typeName = "int16" ; break;
case TypeId::kUInt16 : typeName = "uint16" ; break;
case TypeId::kInt32 : typeName = "int32" ; break;
case TypeId::kUInt32 : typeName = "uint32" ; break;
case TypeId::kInt64 : typeName = "int64" ; break;
case TypeId::kUInt64 : typeName = "uint64" ; break;
case TypeId::kFloat32: typeName = "float32"; break;
case TypeId::kFloat64: typeName = "float64"; break;
case TypeId::kFloat80: typeName = "float80"; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
default:
typeName = "unknown";
break;
}
uint32_t baseSize = TypeUtils::sizeOf(scalarType);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
}
else {
return sb.append(typeName);
}
}
Error formatFeature(
String& sb,
Arch arch,
uint32_t featureId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatFeature(sb, featureId);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatFeature(sb, featureId);
#endif
return kErrorInvalidArch;
}
Error formatLabel(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
DebugUtils::unused(formatFlags);
if (emitter && emitter->code()) {
const LabelEntry* le = emitter->code()->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
return sb.appendFormat("<InvalidLabel:%u>", labelId);
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->parentId();
const LabelEntry* pe = emitter->code()->labelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
else
ASMJIT_PROPAGATE(sb.append(pe->name()));
ASMJIT_PROPAGATE(sb.append('.'));
}
if (le->type() == LabelType::kAnonymous)
ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId));
return sb.append(le->name());
}
}
return sb.appendFormat("L%u", labelId);
}
Error formatRegister(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
return kErrorInvalidArch;
}
Error formatOperand(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
return kErrorInvalidArch;
}
ASMJIT_API Error formatDataType(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue)))
return DebugUtils::errored(kErrorInvalidArch);
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8)
return DebugUtils::errored(kErrorInvalidState);
uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
}
static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSize, const uint8_t* data, size_t itemCount) noexcept {
sb.append('.');
sb.append(typeName);
sb.append(' ');
for (size_t i = 0; i < itemCount; i++) {
uint64_t v = 0;
if (i != 0)
ASMJIT_PROPAGATE(sb.append(", ", 2));
switch (typeSize) {
case 1: v = data[0]; break;
case 2: v = Support::readU16u(data); break;
case 4: v = Support::readU32u(data); break;
case 8: v = Support::readU64u(data); break;
}
ASMJIT_PROPAGATE(sb.appendUInt(v, 16, typeSize * 2, StringFormatFlags::kAlternate));
data += typeSize;
}
return kErrorOk;
}
Error formatData(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch)))
return DebugUtils::errored(kErrorInvalidArch);
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0)
return DebugUtils::errored(kErrorInvalidState);
if (!Support::isPowerOf2(typeSize)) {
itemCount *= typeSize;
typeSize = 1;
}
while (typeSize > 8u) {
typeSize >>= 1;
itemCount <<= 1;
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
if (repeatCount > 1)
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount);
}
Error formatInstruction(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch))
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
return kErrorInvalidArch;
}
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
TypeId typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append('['));
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
// show in logs.
if (value.isReg()) {
ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
}
if (value.isStack()) {
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
}
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
}
static Error formatFuncValuePack(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncValuePack& pack,
const RegOnly* vRegs) noexcept {
size_t count = pack.count();
if (!count)
return sb.append("void");
if (count > 1)
sb.append('[');
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
if (!value)
break;
if (valueIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
if (vRegs) {
const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id()))
virtReg = cc->virtRegById(vRegs[valueIndex].id());
ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg));
}
}
if (count > 1)
sb.append(']');
return kErrorOk;
}
static Error formatFuncRets(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, cc, fd.retPack(), nullptr);
}
static Error formatFuncArgs(
String& sb,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount)
return sb.append("void");
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
}
return kErrorOk;
}
#endif
Error formatNode(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions))
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
size_t startLineIndex = sb.size();
switch (node->type()) {
case NodeType::kInst:
case NodeType::kJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
break;
}
case NodeType::kSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->id())) {
const Section* section = builder->_code->sectionById(sectionNode->id());
ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
}
break;
}
case NodeType::kLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, labelNode->labelId()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case NodeType::kAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(sb.appendFormat(".align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == AlignMode::kCode ? "code" : "data"));
break;
}
case NodeType::kEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatDataType(sb, formatOptions.flags(), builder->arch(), embedNode->typeId()));
ASMJIT_PROPAGATE(sb.appendFormat(" {Count=%zu Repeat=%zu TotalSize=%zu}", embedNode->itemCount(), embedNode->repeatCount(), embedNode->dataSize()));
break;
}
case NodeType::kEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
break;
}
case NodeType::kEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case NodeType::kConstPool: {
const ConstPoolNode* constPoolNode = node->as<ConstPoolNode>();
ASMJIT_PROPAGATE(sb.appendFormat("[ConstPool Size=%zu Alignment=%zu]", constPoolNode->size(), constPoolNode->alignment()));
break;
};
case NodeType::kComment: {
const CommentNode* commentNode = node->as<CommentNode>();
return sb.appendFormat("; %s", commentNode->inlineComment());
}
case NodeType::kSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
switch (sentinelNode->sentinelType()) {
case SentinelType::kFuncEnd:
sentinelName = "[FuncEnd]";
break;
default:
sentinelName = "[Sentinel]";
break;
}
ASMJIT_PROPAGATE(sb.append(sentinelName));
break;
}
#ifndef ASMJIT_NO_COMPILER
case NodeType::kFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
if (builder->isCompiler()) {
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
}
break;
}
case NodeType::kFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
for (uint32_t i = 0; i < 2; i++) {
const Operand_& op = retNode->_opArray[i];
if (!op.isNone()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatOptions.flags(), builder, builder->arch(), op));
}
}
break;
}
case NodeType::kInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
break;
}
#endif
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
break;
}
}
if (node->hasInlineComment()) {
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex;
if (currentPadding < requiredPadding)
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment()));
}
return kErrorOk;
}
Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatOptions, builder, builder->firstNode(), nullptr);
}
Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatOptions, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}
return kErrorOk;
}
#endif
} // {Formatter}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,249 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_H_INCLUDED
#include "../core/globals.h"
#include "../core/inst.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
class BaseBuilder;
class BaseEmitter;
class BaseNode;
struct Operand_;
//! Format flags used by \ref Logger and \ref FormatOptions.
enum class FormatFlags : uint32_t {
//! No formatting flags.
kNone = 0u,
//! Show also binary form of each logged instruction (Assembler).
kMachineCode = 0x00000001u,
//! Show a text explanation of some immediate values.
kExplainImms = 0x00000002u,
//! Use hexadecimal notation of immediate values.
kHexImms = 0x00000004u,
//! Use hexadecimal notation of addresses and offsets in addresses.
kHexOffsets = 0x00000008u,
//! Show casts between virtual register types (Compiler output).
kRegCasts = 0x00000010u,
//! Show positions associated with nodes (Compiler output).
kPositions = 0x00000020u,
//! Always format a register type (Compiler output).
kRegType = 0x00000040u
};
ASMJIT_DEFINE_ENUM_FLAGS(FormatFlags)
//! Format indentation group, used by \ref FormatOptions.
enum class FormatIndentationGroup : uint32_t {
//! Indentation used for instructions and directives.
kCode = 0u,
//! Indentation used for labels and function nodes.
kLabel = 1u,
//! Indentation used for comments (not inline comments).
kComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kReserved = 3u,
//! \endcond
//! Maximum value of `FormatIndentationGroup`.
kMaxValue = kReserved
};
//! Format padding group, used by \ref FormatOptions.
enum class FormatPaddingGroup : uint32_t {
//! Describes padding of a regular line, which can represent instruction, data, or assembler directives.
kRegularLine = 0,
//! Describes padding of machine code dump that is visible next to the instruction, if enabled.
kMachineCode = 1,
//! Maximum value of `FormatPaddingGroup`.
kMaxValue = kMachineCode
};
//! Formatting options used by \ref Logger and \ref Formatter.
class FormatOptions {
public:
//! \name Members
//! \{
//! Format flags.
FormatFlags _flags = FormatFlags::kNone;
//! Indentations for each indentation group.
Support::Array<uint8_t, uint32_t(FormatIndentationGroup::kMaxValue) + 1> _indentation {};
//! Paddings for each padding group.
Support::Array<uint16_t, uint32_t(FormatPaddingGroup::kMaxValue) + 1> _padding {};
//! \}
//! \name Reset
//! \{
//! Resets FormatOptions to its default initialized state.
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_flags = FormatFlags::kNone;
_indentation.fill(uint8_t(0));
_padding.fill(uint16_t(0));
}
//! \}
//! \name Accessors
//! \{
//! Returns format flags.
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Resets all format flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets padding for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }
//! \}
};
//! Provides formatting functionality to format operands, instructions, and nodes.
namespace Formatter {
#ifndef ASMJIT_NO_LOGGING
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
String& sb,
TypeId typeId) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//!
//! See \ref CpuFeatures.
ASMJIT_API Error formatFeature(
String& sb,
Arch arch,
uint32_t featureId) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers, which won't be formatted properly
//! if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels properly, otherwise the formatted as
//! it is an anonymous label.
ASMJIT_API Error formatLabel(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatOperand(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
//! Appends a formatted data-type to the output string `sb`.
ASMJIT_API Error formatDataType(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept;
//! Appends a formatted data to the output string `sb`.
ASMJIT_API Error formatData(
String& sb,
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatInstruction(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
#ifndef ASMJIT_NO_BUILDER
//! Appends a formatted node to the output string `sb`.
//!
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes to the output string,
//! separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
String& sb,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;
#endif
#endif
} // {Formatter}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED

View File

@@ -1,34 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#include "../core/formatter.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_logging
//! \{
namespace Formatter {
static ASMJIT_FORCE_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");
size_t padding = formatOptions.padding(group);
return padding ? padding : size_t(_defaultPaddingTable[uint32_t(group)]);
}
} // {Formatter}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FORMATTER_H_P_INCLUDED

View File

@@ -1,286 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/funcargscontext_p.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86func_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64func_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// CallConv - Init & Reset
// =======================
ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& environment) noexcept {
reset();
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86())
return x86::FuncInternal::initCallConv(*this, ccId, environment);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64())
return a64::FuncInternal::initCallConv(*this, ccId, environment);
#endif
return DebugUtils::errored(kErrorInvalidArgument);
}
// FuncDetail - Init / Reset
// =========================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
return DebugUtils::errored(kErrorInvalidArgument);
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize);
const TypeId* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(TypeUtils::deabstract(signatureArgs[argIndex], deabstractDelta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
TypeId ret = signature.ret();
if (ret != TypeId::kVoid)
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86())
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64())
return a64::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
// architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// FuncFrame - Init
// ================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`. Properties like `_localStackSize` will
// be set by the user before the frame is finalized.
reset();
_arch = arch;
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(BaseReg::kIdBad);
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment)
minDynamicAlignment <<= 1;
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
_redZoneSize = uint8_t(func.redZoneSize());
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
if (func.hasFlag(CallConvFlags::kCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
}
// Initial masks of dirty and preserved registers.
for (RegGroup group : RegGroupVirtValues{}) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[RegGroup::kGp] &= ~Support::bitMask(archTraits.spRegId());
// The size and alignment of save/restore area of registers for each virtual register group
_saveRestoreRegSize = func.callConv()._saveRestoreRegSize;
_saveRestoreAlignment = func.callConv()._saveRestoreAlignment;
return kErrorOk;
}
// FuncFrame - Finalize
// ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch()))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch());
uint32_t registerSize = _saveRestoreRegSize[RegGroup::kGp];
uint32_t vectorSize = _saveRestoreRegSize[RegGroup::kVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment,
_callStackAlignment,
_localStackAlignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
uint32_t kSp = archTraits.spRegId();
uint32_t kFp = archTraits.fpRegId();
uint32_t kLr = archTraits.linkRegId();
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kFp);
// Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv.
if (kLr != BaseReg::kIdBad)
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == BaseReg::kIdBad)
saRegId = kSp;
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment.
if (hasDA && saRegId == kSp)
saRegId = kFp;
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp)
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (RegGroup group : RegGroupVirtValues{})
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
v += callStackSize(); // Count 'callStackSize' <- This is used to call functions.
v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary to store vector registers, and set
// `FuncAttributes::kAlignedVecSR` to inform PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncAttributes::kAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
}
_extraRegSaveOffset = v; // Store 'extraRegSaveOffset' <- Non-GP save/restore starts here.
v += _extraRegSaveSize; // Count 'extraRegSaveSize' <- Non-GP save/restore ends here.
// Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
if (hasDA && !hasFP) {
_daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
v += registerSize; // Count 'daOffset'.
}
else {
_daOffset = FuncFrame::kTagInvalidOffset;
}
// Link Register
// -------------
//
// The stack is aligned after the function call as the return address is stored in a link register. Some
// architectures may require to always have aligned stack after PUSH/POP operation, which is represented
// by ArchTraits::stackAlignmentConstraint().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has the same size as `registerSize`
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize)
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg())
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA)
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
// Calculate where the function arguments start relative to FP or user-provided register.
_saOffsetFromSA = hasFP ? returnAddressSize + registerSize // Return address + frame pointer.
: returnAddressSize + _pushPopSaveSize; // Return address + all push/pop regs.
return kErrorOk;
}
// FuncArgsAssignment - UpdateFuncFrame
// ====================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
Arch arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func)
return DebugUtils::errored(kErrorInvalidState);
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));
FuncArgsContext ctx;
ASMJIT_PROPAGATE(ctx.initWorkData(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
return kErrorOk;
}
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

View File

@@ -1,293 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/funcargscontext_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (RegGroup group : RegGroupVirtValues{})
_workData[size_t(group)].reset();
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
Arch arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
_archTraits = &ArchTraits::byArch(arch);
_constraints = constraints;
_arch = arch;
// Initialize `_archRegs`.
for (RegGroup group : RegGroupVirtValues{})
_workData[group]._archRegs = _constraints->availableRegs(group);
if (frame.hasPreservedFP())
_workData[size_t(RegGroup::kGp)]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
if (!dst_.isAssigned())
continue;
const FuncValue& src_ = func.arg(argIndex, valueIndex);
if (ASMJIT_UNLIKELY(!src_.isAssigned()))
return DebugUtils::errored(kErrorInvalidState);
Var& var = _vars[varId];
var.init(src_, dst_);
FuncValue& src = var.cur;
FuncValue& dst = var.out;
RegGroup dstGroup = RegGroup::kMaxValue;
uint32_t dstId = BaseReg::kIdBad;
WorkData* dstWd = nullptr;
// Not supported.
if (src.isIndirect())
return DebugUtils::errored(kErrorInvalidAssignment);
if (dst.isReg()) {
RegType dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType)))
return DebugUtils::errored(kErrorInvalidRegType);
// Copy TypeId from source if the destination doesn't have it. The RA used by BaseCompiler would never
// leave TypeId undefined, but users of FuncAPI can just assign phys regs without specifying the type.
if (!dst.hasTypeId())
dst.setTypeId(archTraits().regTypeToTypeId(dst.regType()));
dstGroup = archTraits().regTypeToGroup(dstType);
if (ASMJIT_UNLIKELY(dstGroup > RegGroup::kMaxVirt))
return DebugUtils::errored(kErrorInvalidRegGroup);
dstWd = &_workData[dstGroup];
dstId = dst.regId();
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
return DebugUtils::errored(kErrorInvalidPhysId);
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
return DebugUtils::errored(kErrorOverlappedRegs);
dstWd->_dstRegs |= Support::bitMask(dstId);
dstWd->_dstShuf |= Support::bitMask(dstId);
dstWd->_usedRegs |= Support::bitMask(dstId);
}
else {
if (!dst.hasTypeId())
dst.setTypeId(src.typeId());
OperandSignature signature = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid()))
return DebugUtils::errored(kErrorInvalidState);
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(signature.regGroup()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
RegGroup srcGroup = archTraits().regTypeToGroup(src.regType());
if (dstGroup == srcGroup) {
ASMJIT_ASSERT(dstWd != nullptr);
dstWd->assign(varId, srcId);
// The best case, register is allocated where it is expected to be.
if (dstId == srcId)
var.markDone();
}
else {
if (ASMJIT_UNLIKELY(srcGroup > RegGroup::kMaxVirt))
return DebugUtils::errored(kErrorInvalidState);
WorkData& srcData = _workData[size_t(srcGroup)];
srcData.assign(varId, srcId);
}
}
else {
if (dstWd)
dstWd->_numStackArgs++;
_hasStackSrc = true;
}
varId++;
}
}
// Initialize WorkData::workRegs.
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._workRegs =
(_workData[group].archRegs() & (frame.dirtyRegs(group) | ~frame.preservedRegs(group))) | _workData[group].dstRegs() | _workData[group].assignedRegs();
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
WorkData& gpRegs = _workData[RegGroup::kGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
if (saCurRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with input registers.
if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
}
if (saOutRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with argument assignments.
if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
saRegRequired = true;
}
if (saRegRequired) {
TypeId ptrTypeId = Environment::is32Bit(arch) ? TypeId::kUInt32 : TypeId::kUInt64;
RegType ptrRegType = Environment::is32Bit(arch) ? RegType::kGp32 : RegType::kGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
Var& var = _vars[varId];
var.reset();
if (saCurRegId == BaseReg::kIdBad) {
if (saOutRegId != BaseReg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
saCurRegId = saOutRegId;
}
else {
RegMask availableRegs = gpRegs.availableRegs();
if (!availableRegs)
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorNoMorePhysRegs);
saCurRegId = Support::ctz(availableRegs);
}
}
var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
gpRegs.assign(varId, saCurRegId);
gpRegs._workRegs |= Support::bitMask(saCurRegId);
if (saOutRegId != BaseReg::kIdBad) {
var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
gpRegs._dstRegs |= Support::bitMask(saOutRegId);
gpRegs._workRegs |= Support::bitMask(saOutRegId);
}
else {
var.markDone();
}
varId++;
}
_varCount = varId;
// Detect register swaps.
for (varId = 0; varId < _varCount; varId++) {
Var& var = _vars[varId];
if (var.cur.isReg() && var.out.isReg()) {
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
RegGroup group = archTraits().regTypeToGroup(var.cur.regType());
if (group != archTraits().regTypeToGroup(var.out.regType()))
continue;
WorkData& wd = _workData[group];
if (wd.isAssigned(dstId)) {
Var& other = _vars[wd._physToVarId[dstId]];
if (archTraits().regTypeToGroup(other.out.regType()) == group && other.out.regId() == srcId) {
wd._numSwaps++;
_regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
}
}
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (RegGroup group : RegGroupVirtValues{}) {
WorkData& wd = _workData[group];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
wd._workRegs |= regs;
frame.addDirtyRegs(group, regs);
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
uint32_t groupMask = 0;
// Handle stack to stack moves.
groupMask |= _stackDstMask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(RegGroup::kGp);
if (!groupMask)
return kErrorOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (RegGroup group : RegGroupVirtValues{}) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
// Initially, pick some clobbered or dirty register.
RegMask workRegs = wd.workRegs();
RegMask regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs)
regs = workRegs & ~wd.usedRegs();
// If that didn't work out pick any other register that is allocable.
// This last resort case will, however, result in marking one more
// register dirty.
if (!regs)
regs = wd.archRegs() & ~workRegs;
// If that didn't work out we will have to use XORs instead of MOVs.
if (!regs)
continue;
RegMask regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
if (_saVarId != kVarIdNone) {
const Var& var = _vars[_saVarId];
frame.setSARegId(var.cur.regId());
}
else if (frame.hasPreservedFP()) {
frame.setSARegId(archTraits().fpRegId());
}
return kErrorOk;
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE

View File

@@ -1,199 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#define ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/environment.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/radefs_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId dstTypeId, TypeId srcTypeId) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
uint32_t regSize = Environment::registerSizeFromArch(arch);
OperandSignature signature{0};
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId)))
signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32)
: archTraits.regTypeToSignature(RegType::kGp64);
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64))
signature = archTraits.regTypeToSignature(RegType::kVec64);
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128))
signature = archTraits.regTypeToSignature(RegType::kVec128);
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256))
signature = archTraits.regTypeToSignature(RegType::kVec256);
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512))
signature = archTraits.regTypeToSignature(RegType::kVec512);
return signature;
}
class FuncArgsContext {
public:
enum VarId : uint32_t {
kVarIdNone = 0xFF
};
//! Contains information about a single argument or SA register that may need shuffling.
struct Var {
FuncValue cur;
FuncValue out;
inline void init(const FuncValue& cur_, const FuncValue& out_) noexcept {
cur = cur_;
out = out_;
}
//! Reset the value to its unassigned state.
inline void reset() noexcept {
cur.reset();
out.reset();
}
inline bool isDone() const noexcept { return cur.isDone(); }
inline void markDone() noexcept { cur.addFlags(FuncValue::kFlagIsDone); }
};
struct WorkData {
//! All allocable registers provided by the architecture.
RegMask _archRegs;
//! All registers that can be used by the shuffler.
RegMask _workRegs;
//! Registers used by the shuffler (all).
RegMask _usedRegs;
//! Assigned registers.
RegMask _assignedRegs;
//! Destination registers assigned to arguments or SA.
RegMask _dstRegs;
//! Destination registers that require shuffling.
RegMask _dstShuf;
//! Number of register swaps.
uint8_t _numSwaps;
//! Number of stack loads.
uint8_t _numStackArgs;
//! Reserved (only used as padding).
uint8_t _reserved[6];
//! Physical ID to variable ID mapping.
uint8_t _physToVarId[32];
inline void reset() noexcept {
_archRegs = 0;
_workRegs = 0;
_usedRegs = 0;
_assignedRegs = 0;
_dstRegs = 0;
_dstShuf = 0;
_numSwaps = 0;
_numStackArgs = 0;
memset(_reserved, 0, sizeof(_reserved));
memset(_physToVarId, kVarIdNone, 32);
}
inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId);
}
inline void assign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(!isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == kVarIdNone);
_physToVarId[regId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(regId);
}
inline void reassign(uint32_t varId, uint32_t newId, uint32_t oldId) noexcept {
ASMJIT_ASSERT( isAssigned(oldId));
ASMJIT_ASSERT(!isAssigned(newId));
ASMJIT_ASSERT(_physToVarId[oldId] == varId);
ASMJIT_ASSERT(_physToVarId[newId] == kVarIdNone);
_physToVarId[oldId] = uint8_t(kVarIdNone);
_physToVarId[newId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(newId) ^ Support::bitMask(oldId);
}
inline void swap(uint32_t aVarId, uint32_t aRegId, uint32_t bVarId, uint32_t bRegId) noexcept {
ASMJIT_ASSERT(isAssigned(aRegId));
ASMJIT_ASSERT(isAssigned(bRegId));
ASMJIT_ASSERT(_physToVarId[aRegId] == aVarId);
ASMJIT_ASSERT(_physToVarId[bRegId] == bVarId);
_physToVarId[aRegId] = uint8_t(bVarId);
_physToVarId[bRegId] = uint8_t(aVarId);
}
inline void unassign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == varId);
DebugUtils::unused(varId);
_physToVarId[regId] = uint8_t(kVarIdNone);
_assignedRegs ^= Support::bitMask(regId);
}
inline RegMask archRegs() const noexcept { return _archRegs; }
inline RegMask workRegs() const noexcept { return _workRegs; }
inline RegMask usedRegs() const noexcept { return _usedRegs; }
inline RegMask assignedRegs() const noexcept { return _assignedRegs; }
inline RegMask dstRegs() const noexcept { return _dstRegs; }
inline RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
};
//! Architecture traits.
const ArchTraits* _archTraits = nullptr;
//! Architecture constraints.
const RAConstraints* _constraints = nullptr;
//! Target architecture.
Arch _arch = Arch::kUnknown;
//! Has arguments passed via stack (SRC).
bool _hasStackSrc = false;
//! Has preserved frame-pointer (FP).
bool _hasPreservedFP = false;
//! Has arguments assigned to stack (DST).
uint8_t _stackDstMask = 0;
//! Register swap groups (bit-mask).
uint8_t _regSwapsMask = 0;
uint8_t _saVarId = kVarIdNone;
uint32_t _varCount = 0;
Support::Array<WorkData, Globals::kNumVirtGroups> _workData;
Var _vars[Globals::kMaxFuncArgs * Globals::kMaxValuePack + 1];
FuncArgsContext() noexcept;
ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; }
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; }
ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; }
ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;
Error markScratchRegs(FuncFrame& frame) noexcept;
Error markDstRegsDirty(FuncFrame& frame) noexcept;
Error markStackArgsReg(FuncFrame& frame) noexcept;
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED

View File

@@ -1,134 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// DebugUtils - Error As String
// ============================
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#ifndef ASMJIT_NO_TEXT
// @EnumStringBegin{"enum": "ErrorCode", "output": "sError", "strip": "kError"}@
static const char sErrorString[] =
"Ok\0"
"OutOfMemory\0"
"InvalidArgument\0"
"InvalidState\0"
"InvalidArch\0"
"NotInitialized\0"
"AlreadyInitialized\0"
"FeatureNotEnabled\0"
"TooManyHandles\0"
"TooLarge\0"
"NoCodeGenerated\0"
"InvalidDirective\0"
"InvalidLabel\0"
"TooManyLabels\0"
"LabelAlreadyBound\0"
"LabelAlreadyDefined\0"
"LabelNameTooLong\0"
"InvalidLabelName\0"
"InvalidParentLabel\0"
"InvalidSection\0"
"TooManySections\0"
"InvalidSectionName\0"
"TooManyRelocations\0"
"InvalidRelocEntry\0"
"RelocOffsetOutOfRange\0"
"InvalidAssignment\0"
"InvalidInstruction\0"
"InvalidRegType\0"
"InvalidRegGroup\0"
"InvalidPhysId\0"
"InvalidVirtId\0"
"InvalidElementIndex\0"
"InvalidPrefixCombination\0"
"InvalidLockPrefix\0"
"InvalidXAcquirePrefix\0"
"InvalidXReleasePrefix\0"
"InvalidRepPrefix\0"
"InvalidRexPrefix\0"
"InvalidExtraReg\0"
"InvalidKMaskUse\0"
"InvalidKZeroUse\0"
"InvalidBroadcast\0"
"InvalidEROrSAE\0"
"InvalidAddress\0"
"InvalidAddressIndex\0"
"InvalidAddressScale\0"
"InvalidAddress64Bit\0"
"InvalidAddress64BitZeroExtension\0"
"InvalidDisplacement\0"
"InvalidSegment\0"
"InvalidImmediate\0"
"InvalidOperandSize\0"
"AmbiguousOperandSize\0"
"OperandSizeMismatch\0"
"InvalidOption\0"
"OptionAlreadyDefined\0"
"InvalidTypeId\0"
"InvalidUseOfGpbHi\0"
"InvalidUseOfGpq\0"
"InvalidUseOfF80\0"
"NotConsecutiveRegs\0"
"ConsecutiveRegsAllocation\0"
"IllegalVirtReg\0"
"TooManyVirtRegs\0"
"NoMorePhysRegs\0"
"OverlappedRegs\0"
"OverlappingStackRegWithRegArg\0"
"ExpressionLabelNotBound\0"
"ExpressionOverflow\0"
"FailedToOpenAnonymousMemory\0"
"FailedToOpenFile\0"
"<Unknown>\0";
static const uint16_t sErrorIndex[] = {
0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
247, 264, 283, 298, 314, 333, 352, 370, 392, 410, 429, 444, 460, 474, 488,
508, 533, 551, 573, 595, 612, 629, 645, 661, 677, 694, 709, 724, 744, 764,
784, 817, 837, 852, 869, 888, 909, 929, 943, 964, 978, 996, 1012, 1028, 1047,
1073, 1088, 1104, 1119, 1134, 1164, 1188, 1207, 1235, 1252
};
// @EnumStringEnd@
return sErrorString + sErrorIndex[Support::min<Error>(err, kErrorCount)];
#else
DebugUtils::unused(err);
static const char noMessage[] = "";
return noMessage;
#endif
}
// DebugUtils - Debug Output
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
::fputs(str, stderr);
#endif
}
// DebugUtils - Fatal Errors
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
debugOutput(str);
::abort();
}
ASMJIT_END_NAMESPACE

View File

@@ -1,401 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
#define ASMJIT_CORE_GLOBALS_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
namespace Support {
//! Cast designed to cast between function and void* pointers.
template<typename Dst, typename Src>
static inline Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; }
} // {Support}
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_FORCE_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_FORCE_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_FORCE_INLINE void* operator new(size_t n) noexcept { \
return Support::operatorNew(n); \
} \
\
ASMJIT_FORCE_INLINE void operator delete(void* p) noexcept { \
Support::operatorDelete(p); \
} \
\
ASMJIT_FORCE_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
ASMJIT_FORCE_INLINE void operator delete(void*, void*) noexcept {}
#else
#define ASMJIT_BASE_CLASS(TYPE)
#endif
//! \}
//! \endcond
//! \addtogroup asmjit_core
//! \{
//! Byte order.
enum class ByteOrder {
//! Little endian.
kLE = 0,
//! Big endian.
kBE = 1,
//! Native byte order of the target architecture.
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
//! Swapped byte order of the target architecture.
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
//! A policy that can be used with some `reset()` member functions.
enum class ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
kSoft = 0,
//! Hard reset, releases all memory used, if any.
kHard = 1
};
//! Contains typedefs, constants, and variables used globally by AsmJit.
namespace Globals {
//! Host memory allocator overhead.
static constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
//! Host memory allocator alignment.
static constexpr uint32_t kAllocAlignment = 8;
//! Aggressive growing strategy threshold.
static constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
//! Maximum depth of RB-Tree is:
//!
//! `2 * log2(n + 1)`
//!
//! Size of RB node is at least two pointers (without data), so a theoretical architecture limit would be:
//!
//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
//!
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch. The final value was adjusted by +1 for safety reasons.
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
//! Maximum number of operands per a single instruction.
static constexpr uint32_t kMaxOpCount = 6;
//! Maximum arguments of a function supported by the Compiler / Function API.
static constexpr uint32_t kMaxFuncArgs = 16;
//! The number of values that can be assigned to a single function argument or
//! return value.
static constexpr uint32_t kMaxValuePack = 4;
//! Maximum number of physical registers AsmJit can use per register group.
static constexpr uint32_t kMaxPhysRegs = 32;
//! Maximum alignment.
static constexpr uint32_t kMaxAlignment = 64;
//! Maximum label or symbol size in bytes.
static constexpr uint32_t kMaxLabelNameSize = 2048;
//! Maximum section name size.
static constexpr uint32_t kMaxSectionNameSize = 35;
//! Maximum size of comment.
static constexpr uint32_t kMaxCommentSize = 1024;
//! Invalid identifier.
static constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
//! Invalid base address.
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
//! Number of virtual register groups.
static constexpr uint32_t kNumVirtGroups = 4;
struct Init_ {};
struct NoInit_ {};
static const constexpr Init_ Init {};
static const constexpr NoInit_ NoInit {};
} // {Globals}
template<typename Func>
static ASMJIT_INLINE_NODEBUG Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
template<typename Func>
static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
//! \}
//! \addtogroup asmjit_error_handling
//! \{
//! AsmJit error type (uint32_t).
typedef uint32_t Error;
//! AsmJit error codes.
enum ErrorCode : uint32_t {
// @EnumValuesBegin{"enum": "ErrorCode"}@
//! No error (success).
kErrorOk = 0,
//! Out of memory.
kErrorOutOfMemory,
//! Invalid argument.
kErrorInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something wrong or AsmJit caught itself by
//! doing something wrong. This error should never be ignored.
kErrorInvalidState,
//! Invalid or incompatible architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
//! Either a built-in feature was disabled at compile time and it's not available or the feature is not
//! available on the target platform.
//!
//! For example trying to allocate large pages on unsupported platform would return this error.
kErrorFeatureNotEnabled,
//! Too many handles (Windows) or file descriptors (Unix/Posix).
kErrorTooManyHandles,
//! Code generated is larger than allowed.
kErrorTooLarge,
//! No code generated.
//!
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
//! Invalid directive.
kErrorInvalidDirective,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single \ref BaseAssembler instance can hold almost 2^32 (4 billion) labels. If
//! there is an attempt to create more labels then this error is returned.
kErrorTooManyLabels,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to \ref CodeHolder::newNamedLabelEntry() was either invalid or parent is not supported
//! by the requested `LabelType`.
kErrorInvalidParentLabel,
//! Invalid section.
kErrorInvalidSection,
//! Too many sections (section index overflow).
kErrorTooManySections,
//! Invalid section name (most probably too long).
kErrorInvalidSectionName,
//! Relocation index overflow (too many relocations).
kErrorTooManyRelocations,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
//! Reloc entry contains address that is out of range (unencodable).
kErrorRelocOffsetOutOfRange,
//! Invalid assignment to a register, function argument, or function return value.
kErrorInvalidAssignment,
//! Invalid instruction.
kErrorInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
//! Invalid register group.
kErrorInvalidRegGroup,
//! Invalid physical register id.
kErrorInvalidPhysId,
//! Invalid virtual register id.
kErrorInvalidVirtId,
//! Invalid element index (ARM).
kErrorInvalidElementIndex,
//! Invalid prefix combination (X86|X64).
kErrorInvalidPrefixCombination,
//! Invalid LOCK prefix (X86|X64).
kErrorInvalidLockPrefix,
//! Invalid XACQUIRE prefix (X86|X64).
kErrorInvalidXAcquirePrefix,
//! Invalid XRELEASE prefix (X86|X64).
kErrorInvalidXReleasePrefix,
//! Invalid REP prefix (X86|X64).
kErrorInvalidRepPrefix,
//! Invalid REX prefix (X86|X64).
kErrorInvalidRexPrefix,
//! Invalid {...} register (X86|X64).
kErrorInvalidExtraReg,
//! Invalid {k} use (not supported by the instruction) (X86|X64).
kErrorInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction) (X86|X64).
kErrorInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox} (X86|X64).
kErrorInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512) (X86|X64).
kErrorInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
//! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
kErrorInvalidAddress64BitZeroExtension,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
//! Invalid segment (X86).
kErrorInvalidSegment,
//! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
kErrorInvalidImmediate,
//! Invalid operand size.
kErrorInvalidOperandSize,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
//! Invalid option.
kErrorInvalidOption,
//! Option already defined.
kErrorOptionAlreadyDefined,
//! Invalid TypeId.
kErrorInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (\ref TypeId::kFloat80).
kErrorInvalidUseOfF80,
//! Instruction requires the use of consecutive registers, but registers in operands weren't (AVX512, ASIMD load/store, etc...).
kErrorNotConsecutiveRegs,
//! Failed to allocate consecutive registers - allocable registers either too restricted or a bug in RW info.
kErrorConsecutiveRegsAllocation,
//! Illegal virtual register - reported by instruction validation.
kErrorIllegalVirtReg,
//! AsmJit cannot create more virtual registers.
kErrorTooManyVirtRegs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (BaseCompiler).
kErrorOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
//! Unbound label cannot be evaluated by expression.
kErrorExpressionLabelNotBound,
//! Arithmetic overflow during expression evaluation.
kErrorExpressionOverflow,
//! Failed to open anonymous memory handle or file descriptor.
kErrorFailedToOpenAnonymousMemory,
//! Failed to open a file.
//!
//! \note This is a generic error that is used by internal filesystem API.
kErrorFailedToOpenFile,
// @EnumValuesEnd@
//! Count of AsmJit error codes.
kErrorCount
};
//! Debugging utilities.
namespace DebugUtils {
//! \cond INTERNAL
//! Used to silence warnings about unused arguments or variables.
template<typename... Args>
static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {}
//! \endcond
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit.
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put at \ref assertionFailed() function
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
//! \def ASMJIT_ASSERT(...)
//!
//! AsmJit's own assert macro used in AsmJit code-base.
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_ASSERT(...) \
do { \
if (ASMJIT_LIKELY(__VA_ARGS__)) \
break; \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} while (0)
#else
#define ASMJIT_ASSERT(...) ((void)0)
#endif
//! \def ASMJIT_PROPAGATE(...)
//!
//! Propagates a possible `Error` produced by `...` to the caller by returning the error immediately. Used by AsmJit
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \
return _err; \
} while (0)
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_GLOBALS_H_INCLUDED

View File

@@ -1,113 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/inst.h"
#if !defined(ASMJIT_NO_X86)
#include "../x86/x86instapi_p.h"
#endif
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64instapi_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// InstAPI - InstId <-> String
// ===========================
#ifndef ASMJIT_NO_TEXT
Error InstAPI::instIdToString(Arch arch, InstId instId, String& output) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::instIdToString(arch, instId, output);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::InstInternal::instIdToString(arch, instId, output);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::stringToInstId(arch, s, len);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::InstInternal::stringToInstId(arch, s, len);
#endif
return 0;
}
#endif // !ASMJIT_NO_TEXT
// InstAPI - Validate
// ==================
#ifndef ASMJIT_NO_VALIDATION
Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_VALIDATION
// InstAPI - QueryRWInfo
// =====================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount))
return DebugUtils::errored(kErrorInvalidArgument);
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
// InstAPI - QueryFeatures
// =======================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch))
return a64::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
ASMJIT_END_NAMESPACE

View File

@@ -1,790 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_INST_H_INCLUDED
#define ASMJIT_CORE_INST_H_INCLUDED
#include "../core/cpuinfo.h"
#include "../core/operand.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_instruction_db
//! \{
//! Describes an instruction id and modifiers used together with the id.
//!
//! Each architecture has a set of valid instructions indexed from 0. Instruction with 0 id is, however, a special
//! instruction that describes a "no instruction" or "invalid instruction". Different architectures can assign a.
//! different instruction to the same id, each architecture typically has its own instructions indexed from 1.
//!
//! Instruction identifiers listed by architecture:
//!
//! - \ref x86::Inst (X86 and X86_64)
//! - \ref a64::Inst (AArch64)
typedef uint32_t InstId;
//! Instruction id parts.
//!
//! A mask that specifies a bit-layout of \ref InstId.
enum class InstIdParts : uint32_t {
// Common Masks
// ------------
//! Real id without any modifiers (always 16 least significant bits).
kRealId = 0x0000FFFFu,
//! Instruction is abstract (or virtual, IR, etc...).
kAbstract = 0x80000000u,
// ARM Specific
// ------------
//! AArch32 first data type, used by ASIMD instructions (`inst.dt.dt2`).
kA32_DT = 0x000F0000u,
//! AArch32 second data type, used by ASIMD instructions (`inst.dt.dt2`).
kA32_DT2 = 0x00F00000u,
//! AArch32/AArch64 condition code.
kARM_Cond = 0x78000000u
};
//! Instruction options.
//!
//! Instruction options complement instruction identifier and attributes.
enum class InstOptions : uint32_t {
//! No options.
kNone = 0,
//! Used internally by emitters for handling errors and rare cases.
kReserved = 0x00000001u,
//! Prevents following a jump during compilation (Compiler).
kUnfollow = 0x00000002u,
//! Overwrite the destination operand(s) (Compiler).
//!
//! Hint that is important for register liveness analysis. It tells the compiler that the destination operand will
//! be overwritten now or by adjacent instructions. Compiler knows when a register is completely overwritten by a
//! single instruction, for example you don't have to mark "movaps" or "pxor x, x", however, if a pair of
//! instructions is used and the first of them doesn't completely overwrite the content of the destination,
//! Compiler fails to mark that register as dead.
//!
//! X86 Specific
//! ------------
//!
//! - All instructions that always overwrite at least the size of the register the virtual-register uses, for
//! example "mov", "movq", "movaps" don't need the overwrite option to be used - conversion, shuffle, and
//! other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands are the same, for example "xor x, x",
//! "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until there is no old content require
//! `BaseCompiler::overwrite()` to be used. Some examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If the allocated virtual register is used temporarily for scalar operations. For example if you allocate a
//! full vector like `x86::Compiler::newXmm()` and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
//! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
kOverwrite = 0x00000004u,
//! Emit short-form of the instruction.
kShortForm = 0x00000010u,
//! Emit long-form of the instruction.
kLongForm = 0x00000020u,
//! Conditional jump is likely to be taken.
kTaken = 0x00000040u,
//! Conditional jump is unlikely to be taken.
kNotTaken = 0x00000080u,
// X86 & X64 Options
// -----------------
//! Use ModMR instead of ModRM if applicable.
kX86_ModMR = 0x00000100u,
//! Use ModRM instead of ModMR if applicable.
kX86_ModRM = 0x00000200u,
//! Use 3-byte VEX prefix if possible (AVX) (must be 0x00000400).
kX86_Vex3 = 0x00000400u,
//! Use VEX prefix when both VEX|EVEX prefixes are available (HINT: AVX_VNNI).
kX86_Vex = 0x00000800u,
//! Use 4-byte EVEX prefix if possible (AVX-512) (must be 0x00001000).
kX86_Evex = 0x00001000u,
//! LOCK prefix (lock-enabled instructions only).
kX86_Lock = 0x00002000u,
//! REP prefix (string instructions only).
kX86_Rep = 0x00004000u,
//! REPNE prefix (string instructions only).
kX86_Repne = 0x00008000u,
//! XACQUIRE prefix (only allowed instructions).
kX86_XAcquire = 0x00010000u,
//! XRELEASE prefix (only allowed instructions).
kX86_XRelease = 0x00020000u,
//! AVX-512: embedded-rounding {er} and implicit {sae}.
kX86_ER = 0x00040000u,
//! AVX-512: suppress-all-exceptions {sae}.
kX86_SAE = 0x00080000u,
//! AVX-512: round-to-nearest (even) {rn-sae} (bits 00).
kX86_RN_SAE = 0x00000000u,
//! AVX-512: round-down (toward -inf) {rd-sae} (bits 01).
kX86_RD_SAE = 0x00200000u,
//! AVX-512: round-up (toward +inf) {ru-sae} (bits 10).
kX86_RU_SAE = 0x00400000u,
//! AVX-512: round-toward-zero (truncate) {rz-sae} (bits 11).
kX86_RZ_SAE = 0x00600000u,
//! AVX-512: Use zeroing {k}{z} instead of merging {k}.
kX86_ZMask = 0x00800000u,
//! AVX-512: Mask to get embedded rounding bits (2 bits).
kX86_ERMask = kX86_RZ_SAE,
//! AVX-512: Mask of all possible AVX-512 options except EVEX prefix flag.
kX86_AVX512Mask = 0x00FC0000u,
//! Force REX.B and/or VEX.B field (X64 only).
kX86_OpCodeB = 0x01000000u,
//! Force REX.X and/or VEX.X field (X64 only).
kX86_OpCodeX = 0x02000000u,
//! Force REX.R and/or VEX.R field (X64 only).
kX86_OpCodeR = 0x04000000u,
//! Force REX.W and/or VEX.W field (X64 only).
kX86_OpCodeW = 0x08000000u,
//! Force REX prefix (X64 only).
kX86_Rex = 0x40000000u,
//! Invalid REX prefix (set by X86 or when AH|BH|CH|DH regs are used on X64).
kX86_InvalidRex = 0x80000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstOptions)
//! Instruction control flow.
enum class InstControlFlow : uint32_t {
//! Regular instruction.
kRegular = 0u,
//! Unconditional jump.
kJump = 1u,
//! Conditional jump (branch).
kBranch = 2u,
//! Function call.
kCall = 3u,
//! Function return.
kReturn = 4u,
//! Maximum value of `InstType`.
kMaxValue = kReturn
};
//! Hint that is used when both input operands to the instruction are the same.
//!
//! Provides hints to the instruction RW query regarding special cases in which two or more operands are the same
//! registers. This is required by instructions such as XOR, AND, OR, SUB, etc... These hints will influence the
//! RW operations query.
enum class InstSameRegHint : uint8_t {
//! No special handling.
kNone = 0,
//! Operands become read-only, the operation doesn't change the content - `X & X` and similar.
kRO = 1,
//! Operands become write-only, the content of the input(s) don't matter - `X ^ X`, `X - X`, and similar.
kWO = 2
};
//! Instruction id, options, and extraReg in a single structure. This structure exists mainly to simplify analysis
//! and validation API that requires `BaseInst` and `Operand[]` array.
class BaseInst {
public:
//! \name Members
//! \{
//! Instruction id with modifiers.
InstId _id;
//! Instruction options.
InstOptions _options;
//! Extra register used by the instruction (either REP register or AVX-512 selector).
RegOnly _extraReg;
enum Id : uint32_t {
//! Invalid or uninitialized instruction id.
kIdNone = 0x00000000u,
//! Abstract instruction (BaseBuilder and BaseCompiler).
kIdAbstract = 0x80000000u
};
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new BaseInst instance with `id` and `options` set.
//!
//! Default values of `id` and `options` are zero, which means 'none' instruction. Such instruction is guaranteed
//! to never exist for any architecture supported by AsmJit.
ASMJIT_INLINE_NODEBUG explicit BaseInst(InstId instId = 0, InstOptions options = InstOptions::kNone) noexcept
: _id(instId),
_options(options),
_extraReg() {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const RegOnly& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg(extraReg) {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const BaseReg& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg { extraReg.signature(), extraReg.id() } {}
//! \}
//! \name Instruction id and modifiers
//! \{
//! Returns the instruction id with modifiers.
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; }
//! Sets the instruction id and modiiers from `id`.
ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; }
//! Resets the instruction id and modifiers to zero, see \ref kIdNone.
ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; }
//! Returns a real instruction id that doesn't contain any modifiers.
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
template<InstIdParts kPart>
ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept {
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
}
template<InstIdParts kPart>
ASMJIT_INLINE_NODEBUG void setInstIdPart(uint32_t value) noexcept {
_id = (_id & ~uint32_t(kPart)) | (value << Support::ConstCTZ<uint32_t(kPart)>::value);
}
//! \}
//! \name Instruction Options
//! \{
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; }
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; }
ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; }
ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; }
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; }
//! \}
//! \name Extra Register
//! \{
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! \}
//! \name ARM Specific
//! \{
ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
//! \}
//! \name Statics
//! \{
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::DataType dt, arm::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
static ASMJIT_INLINE_NODEBUG constexpr InstId extractRealId(uint32_t id) noexcept {
return id & uint32_t(InstIdParts::kRealId);
}
static ASMJIT_INLINE_NODEBUG constexpr arm::CondCode extractARMCondCode(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
}
//! \}
};
//! CPU read/write flags used by \ref InstRWInfo.
//!
//! These flags can be used to get a basic overview about CPU specifics flags used by instructions.
enum class CpuRWFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
// Common RW Flags (0x000000FF)
// ----------------------------
//! Carry flag.
kCF = 0x00000001u,
//! Signed overflow flag.
kOF = 0x00000002u,
//! Sign flag (negative/sign, if set).
kSF = 0x00000004u,
//! Zero and/or equality flag (1 if zero/equal).
kZF = 0x00000008u,
// X86 Specific RW Flags (0xFFFFFF00)
// ----------------------------------
//! Carry flag (X86, X86_64).
kX86_CF = kCF,
//! Overflow flag (X86, X86_64).
kX86_OF = kOF,
//! Sign flag (X86, X86_64).
kX86_SF = kSF,
//! Zero flag (X86, X86_64).
kX86_ZF = kZF,
//! Adjust flag (X86, X86_64).
kX86_AF = 0x00000100u,
//! Parity flag (X86, X86_64).
kX86_PF = 0x00000200u,
//! Direction flag (X86, X86_64).
kX86_DF = 0x00000400u,
//! Interrupt enable flag (X86, X86_64).
kX86_IF = 0x00000800u,
//! Alignment check flag (X86, X86_64).
kX86_AC = 0x00001000u,
//! FPU C0 status flag (X86, X86_64).
kX86_C0 = 0x00010000u,
//! FPU C1 status flag (X86, X86_64).
kX86_C1 = 0x00020000u,
//! FPU C2 status flag (X86, X86_64).
kX86_C2 = 0x00040000u,
//! FPU C3 status flag (X86, X86_64).
kX86_C3 = 0x00080000u
};
ASMJIT_DEFINE_ENUM_FLAGS(CpuRWFlags)
//! Operand read/write flags describe how the operand is accessed and some additional features.
enum class OpRWFlags {
//! No flags.
kNone = 0,
//! Operand is read.
kRead = 0x00000001u,
//! Operand is written.
kWrite = 0x00000002u,
//! Operand is both read and written.
kRW = 0x00000003u,
//! Register operand can be replaced by a memory operand.
kRegMem = 0x00000004u,
//! The register must be allocated to the index of the previous register + 1.
//!
//! This flag is used by all architectures to describe instructions that use consecutive registers, where only the
//! first one is encoded in the instruction, and the others are just a sequence that starts with the first one. On
//! X86/X86_64 architecture this is used by instructions such as V4FMADDPS, V4FMADDSS, V4FNMADDPS, V4FNMADDSS,
//! VP4DPWSSD, VP4DPWSSDS, VP2INTERSECTD, and VP2INTERSECTQ. On ARM/AArch64 this is used by vector load and store
//! instructions that can load or store multiple registers at once.
kConsecutive = 0x00000008u,
//! The `extendByteMask()` represents a zero extension.
kZExt = 0x00000010u,
//! The register must have assigned a unique physical ID, which cannot be assigned to any other register.
kUnique = 0x00000080u,
//! Register operand must use \ref OpRWInfo::physId().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref OpRWInfo::physId().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
//!
//! X86 Specific
//! ------------
//!
//! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
kMemFake = 0x000000400u,
//! Base register of the memory operand will be read.
kMemBaseRead = 0x00001000u,
//! Base register of the memory operand will be written.
kMemBaseWrite = 0x00002000u,
//! Base register of the memory operand will be read & written.
kMemBaseRW = 0x00003000u,
//! Index register of the memory operand will be read.
kMemIndexRead = 0x00004000u,
//! Index register of the memory operand will be written.
kMemIndexWrite = 0x00008000u,
//! Index register of the memory operand will be read & written.
kMemIndexRW = 0x0000C000u,
//! Base register of the memory operand will be modified before the operation.
kMemBasePreModify = 0x00010000u,
//! Base register of the memory operand will be modified after the operation.
kMemBasePostModify = 0x00020000u
};
ASMJIT_DEFINE_ENUM_FLAGS(OpRWFlags)
// Don't remove these asserts. Read/Write flags are used extensively
// by Compiler and they must always be compatible with constants below.
static_assert(uint32_t(OpRWFlags::kRead) == 0x1, "OpRWFlags::kRead flag must be 0x1");
static_assert(uint32_t(OpRWFlags::kWrite) == 0x2, "OpRWFlags::kWrite flag must be 0x2");
static_assert(uint32_t(OpRWFlags::kRegMem) == 0x4, "OpRWFlags::kRegMem flag must be 0x4");
//! Read/Write information related to a single operand, used by \ref InstRWInfo.
struct OpRWInfo {
//! \name Members
//! \{
//! Read/Write flags.
OpRWFlags _opFlags;
//! Physical register index, if required.
uint8_t _physId;
//! Size of a possible memory operand that can replace a register operand.
uint8_t _rmSize;
//! If non-zero, then this is a consecutive lead register, and the value describes how many registers follow.
uint8_t _consecutiveLeadCount;
//! Reserved for future use.
uint8_t _reserved[1];
//! Read bit-mask where each bit represents one byte read from Reg/Mem.
uint64_t _readByteMask;
//! Write bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _writeByteMask;
//! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _extendByteMask;
//! \}
//! \name Reset
//! \{
//! Resets this operand information to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = OpRWInfo{}; }
//! Resets this operand info (resets all members) and set common information
//! to the given `opFlags`, `regSize`, and possibly `physId`.
inline void reset(OpRWFlags opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
_opFlags = opFlags;
_physId = uint8_t(physId);
_rmSize = Support::test(opFlags, OpRWFlags::kRegMem) ? uint8_t(regSize) : uint8_t(0);
_consecutiveLeadCount = 0;
_resetReserved();
uint64_t mask = Support::lsbMask<uint64_t>(regSize);
_readByteMask = Support::test(opFlags, OpRWFlags::kRead) ? mask : uint64_t(0);
_writeByteMask = Support::test(opFlags, OpRWFlags::kWrite) ? mask : uint64_t(0);
_extendByteMask = 0;
}
ASMJIT_INLINE_NODEBUG void _resetReserved() noexcept {
_reserved[0] = 0;
}
//! \}
//! \name Operand Flags
//! \{
//! Returns operand flags.
ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`.
ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
//! Adds the given `flags` to operand flags.
ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags.
ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from.
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
//! Tests whether this operand is written to.
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write.
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only.
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only.
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers.
ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended.
ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
//! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register
//! operands.
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); }
//! \}
//! \name Memory Flags
//! \{
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers.
ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers.
ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers.
ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \}
//! \name Physical Register ID
//! \{
//! Returns a physical id of the register that is fixed for this operand.
//!
//! Returns \ref BaseReg::kIdBad if any register can be used.
ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; }
//! Tests whether \ref physId() would return a valid physical register id.
ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand.
ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
//! Sets Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
//! \}
//! \name Read & Write Masks
//! \{
//! Returns read mask.
ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; }
//! Returns write mask.
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Returns extend mask.
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets read mask.
ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
//! Sets write mask.
ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
//! Sets extend mask.
ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
//! \}
};
//! Flags used by \ref InstRWInfo.
enum class InstRWFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
//! Describes a move operation.
//!
//! This flag is used by RA to eliminate moves that are guaranteed to be moves only.
kMovOp = 0x00000001u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstRWFlags)
//! Read/Write information of an instruction.
struct InstRWInfo {
//! \name Members
//! \{
//! Instruction flags (there are no flags at the moment, this field is reserved).
InstRWFlags _instFlags;
//! CPU flags read.
CpuRWFlags _readFlags;
//! CPU flags written.
CpuRWFlags _writeFlags;
//! Count of operands.
uint8_t _opCount;
//! CPU feature required for replacing register operand with memory operand.
uint8_t _rmFeature;
//! Reserved for future use.
uint8_t _reserved[18];
//! Read/Write info of extra register (rep{} or kz{}).
OpRWInfo _extraReg;
//! Read/Write info of instruction operands.
OpRWInfo _operands[Globals::kMaxOpCount];
//! \}
//! \name Commons
//! \{
//! Resets this RW information to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = InstRWInfo{}; }
//! \}
//! \name Instruction Flags
//! \{
//! Returns flags associated with the instruction, see \ref InstRWFlags.
ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; }
//! Tests whether the instruction flags contain `flag`.
ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); }
//! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp.
ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); }
//! \}
//! \name CPU Flags Information
//! \{
//! Returns a mask of CPU flags read.
ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; }
//! Returns a mask of CPU flags written.
ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns the CPU feature required to replace a register operand with memory operand. If the returned feature is
//! zero (none) then this instruction either doesn't provide memory operand combination or there is no extra CPU
//! feature required.
//!
//! X86 Specific
//! ------------
//!
//! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination.
ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \}
//! \name Operand Read/Write Information
//! \{
//! Returns RW information of extra register operand (extraReg).
ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; }
//! Returns RW information of all instruction's operands.
ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; }
//! Returns RW information of the operand at the given `index`.
inline const OpRWInfo& operand(size_t index) const noexcept {
ASMJIT_ASSERT(index < Globals::kMaxOpCount);
return _operands[index];
}
//! Returns the number of operands this instruction has.
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; }
//! \}
};
//! Validation flags that can be used with \ref InstAPI::validate().
enum class ValidationFlags : uint32_t {
//! No flags.
kNone = 0,
//! Allow virtual registers in the instruction.
kEnableVirtRegs = 0x01u
};
ASMJIT_DEFINE_ENUM_FLAGS(ValidationFlags)
//! Instruction API.
namespace InstAPI {
#ifndef ASMJIT_NO_TEXT
//! Appends the name of the instruction specified by `instId` and `instOptions` into the `output` string.
//!
//! \note Instruction options would only affect instruction prefix & suffix, other options would be ignored.
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, String& output) noexcept;
//! Parses an instruction name in the given string `s`. Length is specified by `len` argument, which can be
//! `SIZE_MAX` if `s` is known to be null terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the given `validationFlags`.
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
//! Gets Read/Write information of the given instruction.
ASMJIT_API Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
//! Gets CPU features required by the given instruction.
ASMJIT_API Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstAPI}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_INST_H_INCLUDED

File diff suppressed because it is too large Load Diff

View File

@@ -1,569 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/globals.h"
#include "../core/support.h"
#include "../core/virtmem.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
//! Options used by \ref JitAllocator.
enum class JitAllocatorOptions : uint32_t {
//! No options.
kNone = 0,
//! Enables the use of an anonymous memory-mapped memory that is mapped into two buffers having a different pointer.
//! The first buffer has read and execute permissions and the second buffer has read+write permissions.
//!
//! See \ref VirtMem::allocDualMapping() for more details about this feature.
//!
//! \remarks Dual mapping would be automatically turned on by \ref JitAllocator in case of hardened runtime that
//! enforces `W^X` policy, so specifying this flag is essentially forcing to use dual mapped pages even when RWX
//! pages can be allocated and dual mapping is not necessary.
kUseDualMapping = 0x00000001u,
//! Enables the use of multiple pools with increasing granularity instead of a single pool. This flag would enable
//! 3 internal pools in total having 64, 128, and 256 bytes granularity.
//!
//! This feature is only recommended for users that generate a lot of code and would like to minimize the overhead
//! of `JitAllocator` itself by having blocks of different allocation granularities. Using this feature only for
//! few allocations won't pay off as the allocator may need to create more blocks initially before it can take the
//! advantage of variable block granularity.
kUseMultiplePools = 0x00000002u,
//! Always fill reserved memory by a fill-pattern.
//!
//! Causes a new block to be cleared by the fill pattern and freshly released memory to be cleared before making
//! it ready for another use.
kFillUnusedMemory = 0x00000004u,
//! When this flag is set the allocator would immediately release unused blocks during `release()` or `reset()`.
//! When this flag is not set the allocator would keep one empty block in each pool to prevent excessive virtual
//! memory allocations and deallocations in border cases, which involve constantly allocating and deallocating a
//! single block caused by repetitive calling `alloc()` and `release()` when the allocator has either no blocks
//! or have all blocks fully occupied.
kImmediateRelease = 0x00000008u,
//! This flag enables placing functions (or allocating memory) at the very beginning of each memory mapped region.
//!
//! Initially, this was the default behavior. However, LLVM developers working on undefined behavior sanitizer
//! (UBSAN) decided that they want to store metadata before each function and to access such metadata before an
//! indirect function call. This means that the instrumented code always reads from `[fnPtr - 8]` to decode whether
//! the function has his metadata present. However, reading 8 bytes below a function means that if a function is
//! placed at the very beginning of a memory mapped region, it could try to read bytes that are inaccessible. And
//! since AsmJit can be compiled as a shared library and used by applications instrumented by UBSAN, it's not
//! possible to conditionally compile the support only when necessary.
//!
//! \important This flag controls a workaround to make it possible to use LLVM UBSAN with AsmJit's \ref JitAllocator.
//! There is no undefined behavior even when `kDisableInitialPadding` is used, however, that doesn't really matter
//! as LLVM's UBSAN introduces one, and according to LLVM developers it's a "trade-off". This flag is safe to use
//! when the code is not instrumented with LLVM's UBSAN.
kDisableInitialPadding = 0x00000010u,
//! Enables the use of large pages, if they are supported and the process can actually allocate them.
//!
//! \important This flag is a hint - if large pages can be allocated, JitAllocator would try to allocate them.
//! However, if the allocation fails, it will still try to fallback to use regular pages as \ref JitAllocator
//! is designed to minimize allocation failures, so a regular page is better than no page at all. Also, if a
//! block \ref JitAllocator wants to allocate is too small to consume a whole large page, regular page(s) will
//! be allocated as well.
kUseLargePages = 0x00000020u,
//! Forces \ref JitAllocator to always align block size to be at least as big as a large page, if large pages are
//! enabled. This option does nothing if large pages are disabled.
//!
//! \remarks If \ref kUseLargePages option is used, the allocator would prefer large pages only when allocating a
//! block that has a sufficient size. Usually the allocator first allocates smaller block and when more requests
//! come it will start increasing the block size of next allocations. This option makes it sure that even the first
//! allocation would be the same as a minimum large page when large pages are enabled and can be allocated.
kAlignBlockSizeToLargePage = 0x00000040u,
//! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
kCustomFillPattern = 0x10000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(JitAllocatorOptions)
//! A simple implementation of memory manager that uses `asmjit::VirtMem`
//! functions to manage virtual memory for JIT compiled code.
//!
//! Implementation notes:
//!
//! - Granularity of allocated blocks is different than granularity for a typical C malloc. In addition, the allocator
//! can use several memory pools having a different granularity to minimize the maintenance overhead. Multiple pools
//! feature requires `kFlagUseMultiplePools` flag to be set.
//!
//! - The allocator doesn't store any information in executable memory, instead, the implementation uses two
//! bit-vectors to manage allocated memory of each allocator-block. The first bit-vector called 'used' is used to
//! track used memory (where each bit represents memory size defined by granularity) and the second bit vector called
//! 'stop' is used as a sentinel to mark where the allocated area ends.
//!
//! - Internally, the allocator also uses RB tree to keep track of all blocks across all pools. Each inserted block is
//! added to the tree so it can be matched fast during `release()` and `shrink()`.
class JitAllocator {
public:
ASMJIT_NONCOPYABLE(JitAllocator)
//! Visible \ref JitAllocator implementation data.
struct Impl {
//! Allocator options.
JitAllocatorOptions options;
//! Base block size (0 if the allocator is not initialized).
uint32_t blockSize;
//! Base granularity (0 if the allocator is not initialized).
uint32_t granularity;
//! A pattern that is used to fill unused memory if secure mode is enabled.
uint32_t fillPattern;
};
//! \name Members
//! \{
//! Allocator implementation (private).
Impl* _impl;
//! \}
//! \name Construction & Destruction
//! \{
//! Parameters that can be passed to `JitAllocator` constructor.
//!
//! Use it like this:
//!
//! ```
//! // Zero initialize (zero means the default value) and change what you need.
//! JitAllocator::CreateParams params {};
//! params.blockSize = 1024 * 1024;
//!
//! // Create the allocator.
//! JitAllocator allocator(&params);
//! ```
struct CreateParams {
//! Allocator options.
//!
//! No options are used by default.
JitAllocatorOptions options = JitAllocatorOptions::kNone;
//! Base size of a single block in bytes (default 64kB).
//!
//! \remarks Block size must be equal to or greater than page size and must be power of 2. If the input is not
//! valid then the default block size will be used instead.
uint32_t blockSize = 0;
//! Base granularity (and also natural alignment) of allocations in bytes (default 64).
//!
//! Since the `JitAllocator` uses bit-arrays to mark used memory the granularity also specifies how many bytes
//! correspond to a single bit in such bit-array. Higher granularity means more waste of virtual memory (as it
//! increases the natural alignment), but smaller bit-arrays as less bits would be required per a single block.
uint32_t granularity = 0;
//! Patter to use to fill unused memory.
//!
//! Only used if \ref JitAllocatorOptions::kCustomFillPattern is set.
uint32_t fillPattern = 0;
// Reset the content of `CreateParams`.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
};
//! Creates a `JitAllocator` instance.
ASMJIT_API explicit JitAllocator(const CreateParams* params = nullptr) noexcept;
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
//!
//! \remarks This function is not thread-safe as it's designed to be used when nobody else is using allocator.
//! The reason is that there is no point of calling `reset()` when the allocator is still in use.
ASMJIT_API void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns allocator options, see `Flags`.
ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator.
ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \}
//! \name Alloc & Release
//! \{
//! A memory reference returned by \ref JitAllocator::alloc().
//!
//! Span contains everything needed to actually write new code to the memory chunk it references.
class Span {
public:
//! \name Constants
//! \{
//! Span flags
enum class Flags : uint32_t {
//! No flags.
kNone = 0u,
//! The process has never executed the region of the span.
//!
//! If this flag is set on a \ref Span it would mean that the allocator can avoid flushing
//! instruction cache after a code has been written to it.
kInstructionCacheClean = 0x00000001u
};
//! \}
//! \name Members
//! \{
//! Address of memory that has Read and Execute permissions.
void* _rx = nullptr;
//! Address of memory that has Read and Write permissions.
void* _rw = nullptr;
//! Size of the span in bytes (rounded up to the allocation granularity).
size_t _size = 0;
//! Pointer that references a memory block maintained by \ref JitAllocator.
//!
//! This pointer is considered private and should never be used nor inspected outside of AsmJit.
void* _block = nullptr;
//! Span flags.
Flags _flags = Flags::kNone;
//! Reserved for future use.
uint32_t _reserved = 0;
//! \}
//! \name Accessors
//! \{
//! Returns a pointer having Read & Execute permissions (references executable memory).
//!
//! This pointer is never NULL if the allocation succeeded, it points to an executable memory.
ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; }
//! Returns a pointer having Read & Write permissions (references writable memory).
//!
//! Depending on the type of the allocation strategy this could either be:
//!
//! - the same address as returned by `rx()` if the allocator uses RWX mapping (pages have all of Read, Write,
//! and Execute permissions) or MAP_JIT, which requires either \ref ProtectJitReadWriteScope or to call
//! VirtMem::protectJitMemory() manually.
//! - a valid pointer, but not the same as `rx` - this would be valid if dual mapping is used.
//! - NULL pointer, in case that the allocation strategy doesn't use RWX, MAP_JIT, or dual mapping. In this
//! case only \ref JitAllocator can copy new code into the executable memory referenced by \ref Addr.
//!
//! \note If `rw()` returns a non-null pointer it's important to use either VirtMem::protectJitMemory() or
//! \ref ProtectJitReadWriteScope to guard the write, because in case of `MAP_JIT` it would temporarily switch
//! the permissions of the pointer to RW (that's per thread permissions). if \ref ProtectJitReadWriteScope is
//! not used it's important to clear the instruction cache via \ref VirtMem::flushInstructionCache() after the
//! write is done.
ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; }
//! Returns size of this span, aligned to the allocator granularity.
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns span flags.
ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; }
//! Shrinks this span to `newSize`.
//!
//! \note This is the only function that is able to change the size of a span, and it's only use case is to
//! shrink the span size during \ref JitAllocator::write(). When the writer detects that the span size shrunk,
//! it will automatically shrink the memory used by the span, and propagate the new aligned size to the caller.
ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); }
//! Returns whether \ref rw() returns a non-null pointer.
ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; }
//! \}
};
//! Allocates a new memory span of the requested `size`.
ASMJIT_API Error alloc(Span& out, size_t size) noexcept;
//! Releases a memory block returned by `alloc()`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error release(void* rx) noexcept;
//! Frees extra memory allocated with `rx` by shrinking it to the given `newSize`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error shrink(Span& span, size_t newSize) noexcept;
//! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`.
//!
//! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span.
ASMJIT_API Error query(Span& out, void* rx) const noexcept;
#if !defined(ASMJIT_NO_DEPRECATED)
//! Allocates a new memory block of the requested `size`.
ASMJIT_DEPRECATED("Use alloc(Span& out, size_t size) instead")
ASMJIT_FORCE_INLINE Error alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcept {
Span span;
Error err = alloc(span, size);
*rwPtrOut = span.rw();
*rxPtrOut = span.rx();
return err;
}
ASMJIT_DEPRECATED("Use shrink(Span& span, size_t newSize) instead")
ASMJIT_FORCE_INLINE Error shrink(void* rxPtr, size_t newSize) noexcept {
Span span;
ASMJIT_PROPAGATE(query(span, rxPtr));
return (span.size() > newSize) ? shrink(span, newSize) : Error(kErrorOk);
}
ASMJIT_DEPRECATED("Use query(Span& out, void* rx) instead")
ASMJIT_FORCE_INLINE Error query(void* rxPtr, void** rxPtrOut, void** rwPtrOut, size_t* sizeOut) const noexcept {
Span span;
Error err = query(span, rxPtr);
*rxPtrOut = span.rx();
*rwPtrOut = span.rw();
*sizeOut = span.size();
return err;
}
#endif
//! \}
//! \name Write Operations
//! \{
typedef Error (ASMJIT_CDECL* WriteFunc)(Span& span, void* userData) ASMJIT_NOEXCEPT_TYPE;
ASMJIT_API Error write(
Span& span,
size_t offset,
const void* src,
size_t size,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
ASMJIT_API Error write(
Span& span,
WriteFunc writeFunc,
void* userData,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
template<class Lambda>
ASMJIT_FORCE_INLINE Error write(
Span& span,
Lambda&& lambdaFunc,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
return lambdaFunc(span);
};
return write(span, wrapperFunc, (void*)(&lambdaFunc), policy);
}
//! \}
//! \name Write Operations with Scope
//! \{
//! \cond INTERNAL
//! Write scope data.
//!
//! This is mostly for internal purposes, please use \ref WriteScope instead.
struct WriteScopeData {
//! \name Members
//! \{
//! Link to the allocator.
JitAllocator* _allocator;
//! Cache policy passed to \ref JitAllocator::beginWriteScope().
VirtMem::CachePolicy _policy;
//! Internal flags used by the implementation.
uint32_t _flags;
//! Internal data used by the implementation.
size_t _data[64];
//! \}
};
//! Begins a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope constructor instead.
ASMJIT_API Error beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
//! Ends a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope destructor instead.
ASMJIT_API Error endWriteScope(WriteScopeData& scope) noexcept;
//! Flushes accumulated changes in a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope destructor or \ref WriteScope::flush() instead.
ASMJIT_API Error flushWriteScope(WriteScopeData& scope) noexcept;
//! Alternative to `JitAllocator::write(span, offset, src, size)`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept;
//! Alternative to `JitAllocator::write(span. writeFunc, userData)`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept;
//! Alternative to `JitAllocator::write(span. <lambda>)`, but under a write `scope`.
//!
//! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
template<class Lambda>
inline Error scopedWrite(WriteScopeData& scope, Span& span, Lambda&& lambdaFunc) noexcept {
WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
return lambdaFunc(span);
};
return scopedWrite(scope, span, wrapperFunc, (void*)(&lambdaFunc));
}
//! \endcond
//! Write scope can be used to create a single scope that is optimized for writing multiple spans.
class WriteScope : public WriteScopeData {
public:
ASMJIT_NONCOPYABLE(WriteScope)
//! \name Construction & Destruction
//! \{
// Begins a write scope.
inline explicit WriteScope(JitAllocator* allocator, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
allocator->beginWriteScope(*this, policy);
}
// Ends a write scope.
inline ~WriteScope() noexcept {
if (_allocator)
_allocator->endWriteScope(*this);
}
//! \}
//! \name Accessors
//! \{
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; }
ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; }
//! \}
//! \name Operations
//! \{
//! Similar to `JitAllocator::write(span, offset, src, size)`, but under a write scope.
ASMJIT_INLINE_NODEBUG Error write(Span& span, size_t offset, const void* src, size_t size) noexcept {
return _allocator->scopedWrite(*this, span, offset, src, size);
}
//! Similar to `JitAllocator::write(span, writeFunc, userData)`, but under a write scope.
ASMJIT_INLINE_NODEBUG Error write(Span& span, WriteFunc writeFunc, void* userData) noexcept {
return _allocator->scopedWrite(*this, span, writeFunc, userData);
}
//! Similar to `JitAllocator::write(span, <lambda>)`, but under a write scope.
template<class Lambda>
ASMJIT_INLINE_NODEBUG Error write(Span& span, Lambda&& lambdaFunc) noexcept {
return _allocator->scopedWrite(*this, span, lambdaFunc);
}
//! Flushes accumulated changes in this write scope.
ASMJIT_INLINE_NODEBUG Error flush() noexcept {
return _allocator->flushWriteScope(*this);
}
//! \}
};
//! \}
//! \name Statistics
//! \{
//! Statistics about `JitAllocator`.
struct Statistics {
//! Number of blocks `JitAllocator` maintains.
size_t _blockCount;
//! Number of active allocations.
size_t _allocationCount;
//! How many bytes are currently used / allocated.
size_t _usedSize;
//! How many bytes are currently reserved by the allocator.
size_t _reservedSize;
//! Allocation overhead (in bytes) required to maintain all blocks.
size_t _overheadSize;
//! Resets the statistics to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; }
//! Returns count of blocks managed by `JitAllocator` at the moment.
ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; }
//! Returns the number of active allocations.
ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used.
ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment.
ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
//! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks).
ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory.
ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; }
ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
};
//! Returns JIT allocator statistics.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Statistics statistics() const noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

View File

@@ -1,79 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
#include "../core/cpuinfo.h"
#include "../core/jitruntime.h"
ASMJIT_BEGIN_NAMESPACE
JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
: _allocator(params) {
_environment = Environment::host();
_environment.setObjectFormat(ObjectFormat::kJIT);
_cpuFeatures = CpuInfo::host().features();
}
JitRuntime::~JitRuntime() noexcept {}
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
*dst = nullptr;
ASMJIT_PROPAGATE(code->flatten());
ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
size_t estimatedCodeSize = code->codeSize();
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
return DebugUtils::errored(kErrorNoCodeGenerated);
JitAllocator::Span span;
ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize));
// Relocate the code.
Error err = code->relocateToBase(uintptr_t(span.rx()));
if (ASMJIT_UNLIKELY(err)) {
_allocator.release(span.rx());
return err;
}
// Recalculate the final code size and shrink the memory we allocated for it
// in case that some relocations didn't require records in an address table.
size_t codeSize = code->codeSize();
ASMJIT_ASSERT(codeSize <= estimatedCodeSize);
_allocator.write(span, [&](JitAllocator::Span& span) noexcept -> Error {
uint8_t* rw = static_cast<uint8_t*>(span.rw());
for (Section* section : code->_sections) {
size_t offset = size_t(section->offset());
size_t bufferSize = size_t(section->bufferSize());
size_t virtualSize = size_t(section->virtualSize());
ASMJIT_ASSERT(offset + bufferSize <= span.size());
memcpy(rw + offset, section->data(), bufferSize);
if (virtualSize > bufferSize) {
ASMJIT_ASSERT(offset + virtualSize <= span.size());
memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
}
}
span.shrink(codeSize);
return kErrorOk;
});
*dst = span.rx();
return kErrorOk;
}
Error JitRuntime::_release(void* p) noexcept {
return _allocator.release(p);
}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,89 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/codeholder.h"
#include "../core/jitallocator.h"
#include "../core/target.h"
ASMJIT_BEGIN_NAMESPACE
class CodeHolder;
//! \addtogroup asmjit_virtual_memory
//! \{
//! JIT execution runtime is a special `Target` that is designed to store and
//! execute the generated code.
class ASMJIT_VIRTAPI JitRuntime : public Target {
public:
ASMJIT_NONCOPYABLE(JitRuntime)
//! Virtual memory allocator.
JitAllocator _allocator;
//! \name Construction & Destruction
//! \{
//! Creates a `JitRuntime` instance.
ASMJIT_API explicit JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
//! Destroys the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime() noexcept;
ASMJIT_INLINE_NODEBUG void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept {
_allocator.reset(resetPolicy);
}
//! \}
//! \name Accessors
//! \{
//! Returns the associated `JitAllocator`.
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
//! \}
//! \name Utilities
//! \{
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates instead.
//! Allocates memory needed for a code stored in the `CodeHolder` and relocates the code to the pointer allocated.
//!
//! The beginning of the memory allocated for the function is returned in `dst`. If failed `Error` code is returned
//! and `dst` is explicitly set to `nullptr` (this means that you don't have to set it to null before calling `add()`).
template<typename Func>
ASMJIT_INLINE_NODEBUG Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
}
//! Releases `p` which was obtained by calling `add()`.
template<typename Func>
ASMJIT_INLINE_NODEBUG Error release(Func p) noexcept {
return _release(Support::ptr_cast_impl<void*, Func>(p));
}
//! Type-unsafe version of `add()`.
ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept;
//! Type-unsafe version of `release()`.
ASMJIT_API virtual Error _release(void* p) noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

View File

@@ -1,77 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/logger.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// Logger - Implementation
// =======================
Logger::Logger() noexcept
: _options() {}
Logger::~Logger() noexcept {}
// [[pure virtual]]
Error Logger::_log(const char* data, size_t size) noexcept {
DebugUtils::unused(data, size);
// Do not error in this case - the logger would just sink to /dev/null.
return kErrorOk;
}
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = logv(fmt, ap);
va_end(ap);
return err;
}
Error Logger::logv(const char* fmt, va_list ap) noexcept {
StringTmp<2048> sb;
ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap));
return log(sb);
}
// FileLogger - Implementation
// ===========================
FileLogger::FileLogger(FILE* file) noexcept
: _file(file) {}
FileLogger::~FileLogger() noexcept {}
Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file)
return kErrorOk;
if (size == SIZE_MAX)
size = strlen(data);
fwrite(data, 1, size, _file);
return kErrorOk;
}
// StringLogger - Implementation
// =============================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
Error StringLogger::_log(const char* data, size_t size) noexcept {
return _content.append(data, size);
}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,198 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
#define ASMJIT_CORE_LOGGING_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
#include "../core/formatter.h"
#ifndef ASMJIT_NO_LOGGING
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
//! Logging interface.
//!
//! This class can be inherited and reimplemented to fit into your own logging needs. When reimplementing a logger
//! use \ref Logger::_log() method to log customize the output.
//!
//! There are two `Logger` implementations offered by AsmJit:
//! - \ref FileLogger - logs into a `FILE*`.
//! - \ref StringLogger - concatenates all logs into a \ref String.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_BASE_CLASS(Logger)
ASMJIT_NONCOPYABLE(Logger)
//! Format options.
FormatOptions _options;
//! \name Construction & Destruction
//! \{
//! Creates a `Logger` instance.
ASMJIT_API Logger() noexcept;
//! Destroys the `Logger` instance.
ASMJIT_API virtual ~Logger() noexcept;
//! \}
//! \name Format Options
//! \{
//! Returns \ref FormatOptions of this logger.
ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; }
//! \overload
ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; }
//! Sets formatting options of this Logger to `options`.
ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; }
//! Resets formatting options of this Logger to defaults.
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); }
//! Returns formatting flags.
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled.
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of a given indentation `group`.
ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `group` to `n` spaces.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `group` to 0 spaces.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); }
//! Returns padding of a given padding `group`.
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); }
//! Sets padding of a given padding `group` to `n`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); }
//! Resets padding of a given padding `group` to 0, which means that a default will be used.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); }
//! \}
//! \name Logging Interface
//! \{
//! Logs `str` - must be reimplemented.
//!
//! The function can accept either a null terminated string if `size` is `SIZE_MAX` or a non-null terminated
//! string of the given `size`. The function cannot assume that the data is null terminated and must handle
//! non-null terminated inputs.
ASMJIT_API virtual Error _log(const char* data, size_t size) noexcept = 0;
//! Logs string `str`, which is either null terminated or having size `size`.
ASMJIT_INLINE_NODEBUG Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); }
//! Logs content of a string `str`.
ASMJIT_INLINE_NODEBUG Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
//! Formats the message by using `snprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Formats the message by using `vsnprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! \}
};
//! Logger that can log to a `FILE*`.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(FileLogger)
FILE* _file;
//! \name Construction & Destruction
//! \{
//! Creates a new `FileLogger` that logs to `FILE*`.
ASMJIT_API FileLogger(FILE* file = nullptr) noexcept;
//! Destroys the `FileLogger`.
ASMJIT_API virtual ~FileLogger() noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the logging output stream or null if the logger has no output stream.
ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null.
//!
//! \note If the `file` is null the logging will be disabled. When a logger is attached to `CodeHolder` or any
//! emitter the logging API will always be called regardless of the output file. This means that if you really
//! want to disable logging at emitter level you must not attach a logger to it.
ASMJIT_INLINE_NODEBUG void setFile(FILE* file) noexcept { _file = file; }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(StringLogger)
//! Logger data as string.
String _content;
//! \name Construction & Destruction
//! \{
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroys the `StringLogger`.
ASMJIT_API virtual ~StringLogger() noexcept;
//! \}
//! \name Logger Data Accessors
//! \{
//! Returns the content of the logger as \ref String.
//!
//! It can be moved, if desired.
ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; }
//! \overload
ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; }
//! Returns aggregated logger data as `char*` pointer.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); }
//! Returns size of the data returned by `data()`.
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); }
//! \}
//! \name Logger Data Manipulation
//! \{
//! Clears the accumulated logger data.
ASMJIT_INLINE_NODEBUG void clear() noexcept { _content.clear(); }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_LOGGER_H_INCLUDED

View File

@@ -1,33 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
#define ASMJIT_CORE_MISC_P_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
#define ASMJIT_LOOKUP_TABLE_4(T, I) T((I)), T((I+1)), T((I+2)), T((I+3))
#define ASMJIT_LOOKUP_TABLE_8(T, I) ASMJIT_LOOKUP_TABLE_4(T, I), ASMJIT_LOOKUP_TABLE_4(T, I + 4)
#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8)
#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16)
#define ASMJIT_LOOKUP_TABLE_40(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16), ASMJIT_LOOKUP_TABLE_8(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64)
#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128)
#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256)
#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512)
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_MISC_P_H_INCLUDED

View File

@@ -1,132 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
// Operand - Tests
// ===============
#if defined(ASMJIT_TEST)
enum class StrongEnumForImmTests : uint32_t {
kValue0,
kValue0xFFFFFFFF = 0xFFFFFFFFu
};
UNIT(operand) {
INFO("Checking operand sizes");
EXPECT_EQ(sizeof(Operand), 16u);
EXPECT_EQ(sizeof(BaseReg), 16u);
EXPECT_EQ(sizeof(BaseMem), 16u);
EXPECT_EQ(sizeof(Imm), 16u);
EXPECT_EQ(sizeof(Label), 16u);
INFO("Checking basic functionality of Operand");
Operand a, b;
Operand dummy;
EXPECT_TRUE(a.isNone());
EXPECT_FALSE(a.isReg());
EXPECT_FALSE(a.isMem());
EXPECT_FALSE(a.isImm());
EXPECT_FALSE(a.isLabel());
EXPECT_EQ(a, b);
EXPECT_EQ(a._data[0], 0u);
EXPECT_EQ(a._data[1], 0u);
INFO("Checking basic functionality of Label");
Label label;
EXPECT_FALSE(label.isValid());
EXPECT_EQ(label.id(), Globals::kInvalidId);
INFO("Checking basic functionality of BaseReg");
EXPECT_TRUE(BaseReg().isReg());
EXPECT_FALSE(BaseReg().isValid());
EXPECT_EQ(BaseReg()._data[0], 0u);
EXPECT_EQ(BaseReg()._data[1], 0u);
EXPECT_FALSE(dummy.as<BaseReg>().isValid());
// Create some register (not specific to any architecture).
OperandSignature rSig = OperandSignature::fromOpType(OperandType::kReg) |
OperandSignature::fromRegType(RegType::kVec128) |
OperandSignature::fromRegGroup(RegGroup::kVec) |
OperandSignature::fromSize(8);
BaseReg r1(rSig, 5);
EXPECT_TRUE(r1.isValid());
EXPECT_TRUE(r1.isReg());
EXPECT_TRUE(r1.isReg(RegType::kVec128));
EXPECT_TRUE(r1.isPhysReg());
EXPECT_FALSE(r1.isVirtReg());
EXPECT_EQ(r1.signature(), rSig);
EXPECT_EQ(r1.type(), RegType::kVec128);
EXPECT_EQ(r1.group(), RegGroup::kVec);
EXPECT_EQ(r1.size(), 8u);
EXPECT_EQ(r1.id(), 5u);
EXPECT_TRUE(r1.isReg(RegType::kVec128, 5)); // RegType and Id.
EXPECT_EQ(r1._data[0], 0u);
EXPECT_EQ(r1._data[1], 0u);
// The same type of register having different id.
BaseReg r2(r1, 6);
EXPECT_TRUE(r2.isValid());
EXPECT_TRUE(r2.isReg());
EXPECT_TRUE(r2.isReg(RegType::kVec128));
EXPECT_TRUE(r2.isPhysReg());
EXPECT_FALSE(r2.isVirtReg());
EXPECT_EQ(r2.signature(), rSig);
EXPECT_EQ(r2.type(), r1.type());
EXPECT_EQ(r2.group(), r1.group());
EXPECT_EQ(r2.size(), r1.size());
EXPECT_EQ(r2.id(), 6u);
EXPECT_TRUE(r2.isReg(RegType::kVec128, 6));
r1.reset();
EXPECT_FALSE(r1.isReg());
EXPECT_FALSE(r1.isValid());
INFO("Checking basic functionality of BaseMem");
BaseMem m;
EXPECT_TRUE(m.isMem());
EXPECT_EQ(m, BaseMem());
EXPECT_FALSE(m.hasBase());
EXPECT_FALSE(m.hasIndex());
EXPECT_FALSE(m.hasOffset());
EXPECT_TRUE(m.isOffset64Bit());
EXPECT_EQ(m.offset(), 0);
m.setOffset(-1);
EXPECT_EQ(m.offsetLo32(), -1);
EXPECT_EQ(m.offset(), -1);
int64_t x = int64_t(0xFF00FF0000000001u);
int32_t xHi = int32_t(0xFF00FF00u);
m.setOffset(x);
EXPECT_EQ(m.offset(), x);
EXPECT_EQ(m.offsetLo32(), 1);
EXPECT_EQ(m.offsetHi32(), xHi);
INFO("Checking basic functionality of Imm");
Imm immValue(-42);
EXPECT_EQ(immValue.type(), ImmType::kInt);
EXPECT_EQ(Imm(-1).value(), -1);
EXPECT_EQ(imm(-1).value(), -1);
EXPECT_EQ(immValue.value(), -42);
EXPECT_EQ(imm(0xFFFFFFFF).value(), int64_t(0xFFFFFFFF));
Imm immDouble(0.4);
EXPECT_EQ(immDouble.type(), ImmType::kDouble);
EXPECT_EQ(immDouble.valueAs<double>(), 0.4);
EXPECT_EQ(immDouble, imm(0.4));
EXPECT_EQ(Imm(StrongEnumForImmTests::kValue0).value(), 0);
EXPECT_EQ(Imm(StrongEnumForImmTests::kValue0xFFFFFFFF).value(), 0xFFFFFFFFu);
}
#endif
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

View File

@@ -1,110 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/osutils_p.h"
#include "../core/support.h"
#if defined(_WIN32)
#include <atomic>
#else
#include <fcntl.h>
#include <unistd.h>
#if defined(__APPLE__)
#include <mach/mach_time.h>
#else
#include <time.h>
#endif
#endif
ASMJIT_BEGIN_NAMESPACE
uint32_t OSUtils::getTickCount() noexcept {
#if defined(_WIN32)
enum HiResStatus : uint32_t {
kHiResUnknown = 0,
kHiResAvailable = 1,
kHiResNotAvailable = 2
};
static std::atomic<uint32_t> _hiResStatus(kHiResUnknown);
static volatile double _hiResFreq(0);
uint32_t status = _hiResStatus.load();
LARGE_INTEGER now, qpf;
if (status != kHiResNotAvailable && ::QueryPerformanceCounter(&now)) {
double freq = _hiResFreq;
if (status == kHiResUnknown) {
// Detects the availability of high resolution counter.
if (::QueryPerformanceFrequency(&qpf)) {
freq = double(qpf.QuadPart) / 1000.0;
_hiResFreq = freq;
_hiResStatus.compare_exchange_strong(status, kHiResAvailable);
status = kHiResAvailable;
}
else {
// High resolution not available.
_hiResStatus.compare_exchange_strong(status, kHiResNotAvailable);
}
}
if (status == kHiResAvailable)
return uint32_t(uint64_t(int64_t(double(now.QuadPart) / freq)) & 0xFFFFFFFFu);
}
// Bail to `GetTickCount()` if we cannot use high resolution.
return ::GetTickCount();
#elif defined(__APPLE__)
// See Apple's QA1398.
static mach_timebase_info_data_t _machTime;
uint32_t denom = _machTime.denom;
if (ASMJIT_UNLIKELY(!denom)) {
if (mach_timebase_info(&_machTime) != KERN_SUCCESS || !(denom = _machTime.denom))
return 0;
}
// `mach_absolute_time()` returns nanoseconds, we want milliseconds.
uint64_t t = mach_absolute_time() / 1000000u;
t = (t * _machTime.numer) / _machTime.denom;
return uint32_t(t & 0xFFFFFFFFu);
#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
struct timespec ts;
if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u);
return uint32_t(t & 0xFFFFFFFFu);
#else
#pragma message("[asmjit] OSUtils::getTickCount() doesn't have implementation for the target OS.")
return 0;
#endif
}
#if !defined(_WIN32)
Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept {
char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize);
if (ASMJIT_UNLIKELY(!buffer))
return DebugUtils::errored(kErrorOutOfMemory);
int fd = ::open(name, O_RDONLY);
if (fd < 0) {
dst.clear();
return DebugUtils::errored(kErrorFailedToOpenFile);
}
intptr_t len = ::read(fd, buffer, maxSize);
if (len >= 0) {
buffer[len] = '\0';
dst._setSize(size_t(len));
}
::close(fd);
return kErrorOk;
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -1,62 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
//! Operating system utilities.
namespace OSUtils {
//! Gets the current CPU tick count, used for benchmarking (1ms resolution).
ASMJIT_API uint32_t getTickCount() noexcept;
} // {OSUtils}
//! \cond INTERNAL
//! Lock.
//!
//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
//! layout is exposed as it's used by some other classes, which are public.
class Lock {
public:
ASMJIT_NONCOPYABLE(Lock)
#if defined(_WIN32)
#pragma pack(push, 8)
struct ASMJIT_MAY_ALIAS Handle {
void* DebugInfo;
long LockCount;
long RecursionCount;
void* OwningThread;
void* LockSemaphore;
unsigned long* SpinCount;
};
Handle _handle;
#pragma pack(pop)
#elif !defined(__EMSCRIPTEN__)
typedef pthread_mutex_t Handle;
Handle _handle;
#endif
ASMJIT_INLINE_NODEBUG Lock() noexcept;
ASMJIT_INLINE_NODEBUG ~Lock() noexcept;
ASMJIT_INLINE_NODEBUG void lock() noexcept;
ASMJIT_INLINE_NODEBUG void unlock() noexcept;
};
//! \endcond
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_H_INCLUDED

View File

@@ -1,78 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#include "../core/osutils.h"
#include "../core/string.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
#if defined(_WIN32)
// Windows implementation.
static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
#elif !defined(__EMSCRIPTEN__)
// PThread implementation.
#ifdef PTHREAD_MUTEX_INITIALIZER
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
#else
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
#endif
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
#else
// Dummy implementation - Emscripten or other unsupported platform.
ASMJIT_INLINE_NODEBUG Lock::Lock() noexcept {}
ASMJIT_INLINE_NODEBUG Lock::~Lock() noexcept {}
ASMJIT_INLINE_NODEBUG void Lock::lock() noexcept {}
ASMJIT_INLINE_NODEBUG void Lock::unlock() noexcept {}
#endif
//! Scoped lock.
class LockGuard {
public:
ASMJIT_NONCOPYABLE(LockGuard)
Lock& _target;
ASMJIT_INLINE_NODEBUG LockGuard(Lock& target) noexcept
: _target(target) { _target.lock(); }
ASMJIT_INLINE_NODEBUG ~LockGuard() noexcept { _target.unlock(); }
};
#if !defined(_WIN32)
namespace OSUtils {
//! Reads a file, only used on non-Windows platforms to access /sys or other files when necessary.
Error readFile(const char* name, String& dst, size_t maxSize) noexcept;
} // {OSUtils}
#endif
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_P_H_INCLUDED

View File

@@ -1,418 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/radefs_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
//! Holds the current register assignment.
//!
//! Has two purposes:
//!
//! 1. Holds register assignment of a local register allocator (see \ref RALocalAllocator).
//! 2. Holds register assignment of the entry of basic blocks (see \ref RABlock).
class RAAssignment {
public:
ASMJIT_NONCOPYABLE(RAAssignment)
enum Ids : uint32_t {
kPhysNone = 0xFF,
kWorkNone = RAWorkReg::kIdNone
};
enum DirtyBit : uint32_t {
kClean = 0,
kDirty = 1
};
struct Layout {
//! Index of architecture registers per group.
RARegIndex physIndex;
//! Count of architecture registers per group.
RARegCount physCount;
//! Count of physical registers of all groups.
uint32_t physTotal;
//! Count of work registers.
uint32_t workCount;
//! WorkRegs data (vector).
const RAWorkRegs* workRegs;
inline void reset() noexcept {
physIndex.reset();
physCount.reset();
physTotal = 0;
workCount = 0;
workRegs = nullptr;
}
};
struct PhysToWorkMap {
//! Assigned registers (each bit represents one physical reg).
RARegMask assigned;
//! Dirty registers (spill slot out of sync or no spill slot).
RARegMask dirty;
//! PhysReg to WorkReg mapping.
uint32_t workIds[1 /* ... */];
static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept {
return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t);
}
inline void reset(size_t count) noexcept {
assigned.reset();
dirty.reset();
for (size_t i = 0; i < count; i++)
workIds[i] = kWorkNone;
}
inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
memcpy(this, other, size);
}
inline void unassign(RegGroup group, uint32_t physId, uint32_t indexInWorkIds) noexcept {
assigned.clear(group, Support::bitMask(physId));
dirty.clear(group, Support::bitMask(physId));
workIds[indexInWorkIds] = kWorkNone;
}
};
struct WorkToPhysMap {
//! WorkReg to PhysReg mapping
uint8_t physIds[1 /* ... */];
static inline size_t sizeOf(size_t count) noexcept {
return size_t(count) * sizeof(uint8_t);
}
inline void reset(size_t count) noexcept {
for (size_t i = 0; i < count; i++)
physIds[i] = kPhysNone;
}
inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
if (ASMJIT_LIKELY(size))
memcpy(this, other, size);
}
};
//! \name Members
//! \{
//! Physical registers layout.
Layout _layout;
//! WorkReg to PhysReg mapping.
WorkToPhysMap* _workToPhysMap;
//! PhysReg to WorkReg mapping and assigned/dirty bits.
PhysToWorkMap* _physToWorkMap;
//! Optimization to translate PhysRegs to WorkRegs faster.
Support::Array<uint32_t*, Globals::kNumVirtGroups> _physToWorkIds;
//! \}
//! \name Construction & Destruction
//! \{
inline RAAssignment() noexcept {
_layout.reset();
resetMaps();
}
ASMJIT_FORCE_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr);
_layout.physIndex.buildIndexes(physCount);
_layout.physCount = physCount;
_layout.physTotal = uint32_t(_layout.physIndex[RegGroup::kMaxVirt]) +
uint32_t(_layout.physCount[RegGroup::kMaxVirt]) ;
_layout.workCount = workRegs.size();
_layout.workRegs = &workRegs;
}
ASMJIT_FORCE_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
for (RegGroup group : RegGroupVirtValues{})
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
ASMJIT_FORCE_INLINE void resetMaps() noexcept {
_physToWorkMap = nullptr;
_workToPhysMap = nullptr;
_physToWorkIds.fill(nullptr);
}
//! \}
//! \name Accessors
//! \{
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; }
ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; }
inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept {
DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone);
ASMJIT_ASSERT(workId < _layout.workCount);
return _workToPhysMap->physIds[workId];
}
inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId];
}
inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId);
}
inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId);
}
//! \}
//! \name Assignment
//!
//! These are low-level allocation helpers that are used to update the current mappings between physical and
//! virt/work registers and also to update masks that represent allocated and dirty registers. These functions
//! don't emit any code; they are only used to update and keep all mappings in sync.
//!
//! \{
//! Assign [VirtReg/WorkReg] to a physical register.
inline void assign(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
ASMJIT_ASSERT(!isPhysAssigned(group, physId));
ASMJIT_ASSERT(!isPhysDirty(group, physId));
_workToPhysMap->physIds[workId] = uint8_t(physId);
_physToWorkIds[group][physId] = workId;
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] |= regMask;
_physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
//! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
inline void reassign(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
ASMJIT_ASSERT(dstPhysId != srcPhysId);
ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true);
ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false);
_workToPhysMap->physIds[workId] = uint8_t(dstPhysId);
_physToWorkIds[group][srcPhysId] = kWorkNone;
_physToWorkIds[group][dstPhysId] = workId;
RegMask srcMask = Support::bitMask(srcPhysId);
RegMask dstMask = Support::bitMask(dstPhysId);
bool dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
RegMask regMask = dstMask | srcMask;
_physToWorkMap->assigned[group] ^= regMask;
_physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
inline void swap(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
ASMJIT_ASSERT(aPhysId != bPhysId);
ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId);
ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId);
ASMJIT_ASSERT(isPhysAssigned(group, aPhysId));
ASMJIT_ASSERT(isPhysAssigned(group, bPhysId));
_workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId);
_workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId);
_physToWorkIds[group][aPhysId] = bWorkId;
_physToWorkIds[group][bPhysId] = aWorkId;
RegMask aMask = Support::bitMask(aPhysId);
RegMask bMask = Support::bitMask(bPhysId);
RegMask flipMask = Support::bitMaskFromBool<RegMask>(((_physToWorkMap->dirty[group] & aMask) != 0) ^ ((_physToWorkMap->dirty[group] & bMask) != 0));
RegMask regMask = aMask | bMask;
_physToWorkMap->dirty[group] ^= regMask & flipMask;
verify();
}
//! Unassign [VirtReg/WorkReg] from a physical register.
inline void unassign(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, physId));
_workToPhysMap->physIds[workId] = kPhysNone;
_physToWorkIds[group][physId] = kWorkNone;
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] &= ~regMask;
_physToWorkMap->dirty[group] &= ~regMask;
verify();
}
inline void makeClean(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] &= ~regMask;
}
inline void makeDirty(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] |= regMask;
}
//! \}
//! \name Utilities
//! \{
ASMJIT_FORCE_INLINE void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap);
_physToWorkIds.swap(other._physToWorkIds);
}
inline void assignWorkIdsFromPhysIds() noexcept {
memset(_workToPhysMap, uint8_t(BaseReg::kIdBad), WorkToPhysMap::sizeOf(_layout.workCount));
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t physBaseIndex = _layout.physIndex[group];
Support::BitWordIterator<RegMask> it(_physToWorkMap->assigned[group]);
while (it.hasNext()) {
uint32_t physId = it.next();
uint32_t workId = _physToWorkMap->workIds[physBaseIndex + physId];
ASMJIT_ASSERT(workId != kWorkNone);
_workToPhysMap->physIds[workId] = uint8_t(physId);
}
}
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap) noexcept {
memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
assignWorkIdsFromPhysIds();
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount));
}
inline void copyFrom(const RAAssignment& other) noexcept {
copyFrom(other.physToWorkMap(), other.workToPhysMap());
}
// Not really useful outside of debugging.
bool equals(const RAAssignment& other) const noexcept {
// Layout should always match.
if (_layout.physIndex != other._layout.physIndex ||
_layout.physCount != other._layout.physCount ||
_layout.physTotal != other._layout.physTotal ||
_layout.workCount != other._layout.workCount ||
_layout.workRegs != other._layout.workRegs)
return false;
uint32_t physTotal = _layout.physTotal;
uint32_t workCount = _layout.workCount;
for (uint32_t physId = 0; physId < physTotal; physId++) {
uint32_t thisWorkId = _physToWorkMap->workIds[physId];
uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
if (thisWorkId != otherWorkId)
return false;
}
for (uint32_t workId = 0; workId < workCount; workId++) {
uint32_t thisPhysId = _workToPhysMap->physIds[workId];
uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
if (thisPhysId != otherPhysId)
return false;
}
if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||
_physToWorkMap->dirty != other._physToWorkMap->dirty )
return false;
return true;
}
#if defined(ASMJIT_BUILD_DEBUG)
ASMJIT_NOINLINE void verify() noexcept {
// Verify WorkToPhysMap.
{
for (uint32_t workId = 0; workId < _layout.workCount; workId++) {
uint32_t physId = _workToPhysMap->physIds[workId];
if (physId != kPhysNone) {
const RAWorkReg* workReg = _layout.workRegs->at(workId);
RegGroup group = workReg->group();
ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
}
}
}
// Verify PhysToWorkMap.
{
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t physCount = _layout.physCount[group];
for (uint32_t physId = 0; physId < physCount; physId++) {
uint32_t workId = _physToWorkIds[group][physId];
if (workId != kWorkNone) {
ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId);
}
}
}
}
}
#else
inline void verify() noexcept {}
#endif
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED

View File

@@ -1,612 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/formatter.h"
#include "../core/rapass_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
template<typename This>
class RACFGBuilderT {
public:
enum : uint32_t {
kRootIndentation = 2,
kCodeIndentation = 4,
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
kNodePositionDidOnBefore = 0xFFFFFFFFu
};
//! \name Members
//! \{
BaseRAPass* _pass = nullptr;
BaseCompiler* _cc = nullptr;
RABlock* _curBlock = nullptr;
RABlock* _retBlock = nullptr;
FuncNode* _funcNode = nullptr;
RARegsStats _blockRegStats {};
uint32_t _exitLabelId = Globals::kInvalidId;
ZoneVector<uint32_t> _sharedAssignmentsMap {};
// Only used by logging, it's fine to be here to prevent more #ifdefs...
bool _hasCode = false;
RABlock* _lastLoggedBlock = nullptr;
#ifndef ASMJIT_NO_LOGGING
Logger* _logger = nullptr;
FormatOptions _formatOptions {};
StringTmp<512> _sb;
#endif
//! \}
inline RACFGBuilderT(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING
_logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr;
if (_logger)
_formatOptions = _logger->options();
#endif
}
inline BaseCompiler* cc() const noexcept { return _cc; }
//! \name Run
//! \{
//! Called per function by an architecture-specific CFG builder.
Error run() noexcept {
log("[BuildCFG]\n");
ASMJIT_PROPAGATE(prepare());
logNode(_funcNode, kRootIndentation);
logBlock(_curBlock, kRootIndentation);
RABlock* entryBlock = _curBlock;
BaseNode* node = _funcNode->next();
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
_curBlock->setFirst(_funcNode);
_curBlock->setLast(_funcNode);
RAInstBuilder ib;
ZoneVector<RABlock*> blocksWithUnknownJumps;
for (;;) {
BaseNode* next = node->next();
ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore);
if (node->isInst()) {
// Instruction | Jump | Invoke | Return
// ------------------------------------
// Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them share the same interface that provides
// operands that have read/write semantics.
if (ASMJIT_UNLIKELY(!_curBlock)) {
// Unreachable code has to be removed, we cannot allocate registers in such code as we cannot do proper
// liveness analysis in such case.
removeNode(node);
node = next;
continue;
}
_hasCode = true;
if (node->isInvoke() || node->isFuncRet()) {
if (node->position() != kNodePositionDidOnBefore) {
// Call and Reg are complicated as they may insert some surrounding code around them. The simplest
// approach is to get the previous node, call the `onBefore()` handlers and then check whether
// anything changed and restart if so. By restart we mean that the current `node` would go back to
// the first possible inserted node by `onBeforeInvoke()` or `onBeforeRet()`.
BaseNode* prev = node->prev();
if (node->type() == NodeType::kInvoke)
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeInvoke(node->as<InvokeNode>()));
else
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
if (prev != node->prev()) {
// If this was the first node in the block and something was
// inserted before it then we have to update the first block.
if (_curBlock->first() == node)
_curBlock->setFirst(prev->next());
node->setPosition(kNodePositionDidOnBefore);
node = prev->next();
// `onBeforeInvoke()` and `onBeforeRet()` can only insert instructions.
ASMJIT_ASSERT(node->isInst());
}
// Necessary if something was inserted after `node`, but nothing before.
next = node->next();
}
else {
// Change the position back to its original value.
node->setPosition(0);
}
}
InstNode* inst = node->as<InstNode>();
logNode(inst, kCodeIndentation);
InstControlFlow cf = InstControlFlow::kRegular;
ib.reset();
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, cf, ib));
if (node->isInvoke()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInvoke(inst->as<InvokeNode>(), ib));
}
if (node->isFuncRet()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
cf = InstControlFlow::kReturn;
}
if (cf == InstControlFlow::kJump) {
uint32_t fixedRegCount = 0;
for (RATiedReg& tiedReg : ib) {
RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
if (workReg->group() == RegGroup::kGp) {
uint32_t useId = tiedReg.useId();
if (useId == BaseReg::kIdBad) {
useId = _pass->_scratchRegIndexes[fixedRegCount++];
tiedReg.setUseId(useId);
}
_curBlock->addExitScratchGpRegs(Support::bitMask(useId));
}
}
}
ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
_blockRegStats.combineWith(ib._stats);
if (cf != InstControlFlow::kRegular) {
// Support for conditional and unconditional jumps.
if (cf == InstControlFlow::kJump || cf == InstControlFlow::kBranch) {
_curBlock->setLast(node);
_curBlock->addFlags(RABlockFlags::kHasTerminator);
_curBlock->makeConstructed(_blockRegStats);
if (!inst->hasOption(InstOptions::kUnfollow)) {
// Jmp/Jcc/Call/Loop/etc...
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
// Cannot jump anywhere without operands.
if (ASMJIT_UNLIKELY(!opCount))
return DebugUtils::errored(kErrorInvalidState);
if (opArray[opCount - 1].isLabel()) {
// Labels are easy for constructing the control flow.
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, opArray[opCount - 1].as<Label>()));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock))
return DebugUtils::errored(kErrorOutOfMemory);
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
else {
// Not a label - could be jump with reg/mem operand, which means that it can go anywhere. Such jumps
// must either be annotated so the CFG can be properly constructed, otherwise we assume the worst case
// - can jump to any basic block.
JumpAnnotation* jumpAnnotation = nullptr;
_curBlock->addFlags(RABlockFlags::kHasJumpTable);
if (inst->type() == NodeType::kJump)
jumpAnnotation = inst->as<JumpNode>()->annotation();
if (jumpAnnotation) {
uint64_t timestamp = _pass->nextTimestamp();
for (uint32_t id : jumpAnnotation->labelIds()) {
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, id));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock))
return DebugUtils::errored(kErrorOutOfMemory);
// Prevents adding basic-block successors multiple times.
if (!targetBlock->hasTimestamp(timestamp)) {
targetBlock->setTimestamp(timestamp);
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
}
ASMJIT_PROPAGATE(shareAssignmentAcrossSuccessors(_curBlock));
}
else {
ASMJIT_PROPAGATE(blocksWithUnknownJumps.append(_pass->allocator(), _curBlock));
}
}
}
if (cf == InstControlFlow::kJump) {
// Unconditional jump makes the code after the jump unreachable, which will be removed instantly during
// the CFG construction; as we cannot allocate registers for instructions that are not part of any block.
// Of course we can leave these instructions as they are, however, that would only postpone the problem
// as assemblers can't encode instructions that use virtual registers.
_curBlock = nullptr;
}
else {
node = next;
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
RABlock* consecutiveBlock;
if (node->type() == NodeType::kLabel) {
if (node->hasPassData()) {
consecutiveBlock = node->passData<RABlock>();
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock))
return DebugUtils::errored(kErrorOutOfMemory);
node->setPassData<RABlock>(consecutiveBlock);
}
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock))
return DebugUtils::errored(kErrorOutOfMemory);
}
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
_curBlock = consecutiveBlock;
_hasCode = false;
_blockRegStats.reset();
if (_curBlock->isConstructed())
break;
ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
logBlock(_curBlock, kRootIndentation);
continue;
}
}
if (cf == InstControlFlow::kReturn) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
_curBlock = nullptr;
}
}
}
else if (node->type() == NodeType::kLabel) {
// Label - Basic-Block Management
// ------------------------------
if (!_curBlock) {
// If the current code is unreachable the label makes it reachable again. We may remove the whole block in
// the future if it's not referenced though.
_curBlock = node->passData<RABlock>();
if (_curBlock) {
// If the label has a block assigned we can either continue with it or skip it if the block has been
// constructed already.
if (_curBlock->isConstructed())
break;
}
else {
// No block assigned - create a new one and assign it.
_curBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!_curBlock))
return DebugUtils::errored(kErrorOutOfMemory);
node->setPassData<RABlock>(_curBlock);
}
_curBlock->makeTargetable();
_hasCode = false;
_blockRegStats.reset();
ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
}
else {
if (node->hasPassData()) {
RABlock* consecutive = node->passData<RABlock>();
consecutive->makeTargetable();
if (_curBlock == consecutive) {
// The label currently processed is part of the current block. This is only possible for multiple labels
// that are right next to each other or labels that are separated by non-code nodes like directives and
// comments.
if (ASMJIT_UNLIKELY(_hasCode))
return DebugUtils::errored(kErrorInvalidState);
}
else {
// Label makes the current block constructed. There is a chance that the Label is not used, but we don't
// know that at this point. In the worst case there would be two blocks next to each other, it's just fine.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
}
else {
// First time we see this label.
if (_hasCode || _curBlock == entryBlock) {
// Cannot continue the current block if it already contains some code or it's a block entry. We need to
// create a new block and make it a successor.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
RABlock* consecutive = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutive))
return DebugUtils::errored(kErrorOutOfMemory);
consecutive->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
node->setPassData<RABlock>(_curBlock);
}
}
if (_curBlock && _curBlock != _lastLoggedBlock)
logBlock(_curBlock, kRootIndentation);
logNode(node, kRootIndentation);
// Unlikely: Assume that the exit label is reached only once per function.
if (ASMJIT_UNLIKELY(node->as<LabelNode>()->labelId() == _exitLabelId)) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_pass->addExitBlock(_curBlock));
_curBlock = nullptr;
}
}
else {
// Other Nodes | Function Exit
// ---------------------------
logNode(node, kCodeIndentation);
if (node->type() == NodeType::kSentinel) {
if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock && _hasCode))
return DebugUtils::errored(kErrorInvalidState);
break;
}
}
else if (node->type() == NodeType::kFunc) {
// RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode))
return DebugUtils::errored(kErrorInvalidState);
// PASS if this is the first node.
}
else {
// PASS if this is a non-interesting or unknown node.
}
}
// Advance to the next node.
node = next;
// NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`)
// node. If we encountered a NULL node it means that something went wrong and this node list is corrupted;
// bail in such case.
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
}
if (_pass->hasDanglingBlocks())
return DebugUtils::errored(kErrorInvalidState);
for (RABlock* block : blocksWithUnknownJumps)
handleBlockWithUnknownJump(block);
return _pass->initSharedAssignments(_sharedAssignmentsMap);
}
//! \}
//! \name Prepare
//! \{
//! Prepares the CFG builder of the current function.
Error prepare() noexcept {
FuncNode* func = _pass->func();
BaseNode* node = nullptr;
// Create entry and exit blocks.
_funcNode = func;
_retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node);
if (ASMJIT_UNLIKELY(!_retBlock))
return DebugUtils::errored(kErrorOutOfMemory);
_retBlock->makeTargetable();
ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
if (node != func) {
_curBlock = _pass->newBlock();
if (ASMJIT_UNLIKELY(!_curBlock))
return DebugUtils::errored(kErrorOutOfMemory);
}
else {
// Function that has no code at all.
_curBlock = _retBlock;
}
// Reset everything we may need.
_blockRegStats.reset();
_exitLabelId = func->exitNode()->labelId();
// Initially we assume there is no code in the function body.
_hasCode = false;
return _pass->addBlock(_curBlock);
}
//! \}
//! \name Utilities
//! \{
//! Called when a `node` is removed, e.g. because of a dead code elimination.
void removeNode(BaseNode* node) noexcept {
logNode(node, kRootIndentation, "<Removed>");
cc()->removeNode(node);
}
//! Handles block with unknown jump, which could be a jump to a jump table.
//!
//! If we encounter such block we basically insert all existing blocks as successors except the function entry
//! block and a natural successor, if such block exists.
Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size();
// NOTE: Iterate from `1` as the first block is the entry block, we don't
// allow the entry to be a successor of any block.
RABlock* consecutive = block->consecutive();
for (size_t i = 1; i < blockCount; i++) {
RABlock* candidate = blocks[i];
if (candidate == consecutive || !candidate->isTargetable())
continue;
block->appendSuccessor(candidate);
}
return shareAssignmentAcrossSuccessors(block);
}
Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
if (block->successors().size() <= 1)
return kErrorOk;
RABlock* consecutive = block->consecutive();
uint32_t sharedAssignmentId = Globals::kInvalidId;
for (RABlock* successor : block->successors()) {
if (successor == consecutive)
continue;
if (successor->hasSharedAssignmentId()) {
if (sharedAssignmentId == Globals::kInvalidId)
sharedAssignmentId = successor->sharedAssignmentId();
else
_sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
}
else {
if (sharedAssignmentId == Globals::kInvalidId)
ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
successor->setSharedAssignmentId(sharedAssignmentId);
}
}
return kErrorOk;
}
Error newSharedAssignmentId(uint32_t* out) noexcept {
uint32_t id = _sharedAssignmentsMap.size();
ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
*out = id;
return kErrorOk;
}
//! \}
//! \name Logging
//! \{
#ifndef ASMJIT_NO_LOGGING
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
if (_logger)
_logger->logf(fmt, std::forward<Args>(args)...);
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
if (_logger)
_logBlock(block, indentation);
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
if (_logger)
_logNode(node, indentation, action);
}
void _logBlock(RABlock* block, uint32_t indentation) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
_sb.appendFormat("{#%u}\n", block->blockId());
_logger->log(_sb);
_lastLoggedBlock = block;
}
void _logNode(BaseNode* node, uint32_t indentation, const char* action) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
if (action) {
_sb.append(action);
_sb.append(' ');
}
Formatter::formatNode(_sb, _formatOptions, cc(), node);
_sb.append('\n');
_logger->log(_sb);
}
#else
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
DebugUtils::unused(fmt);
DebugUtils::unused(std::forward<Args>(args)...);
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
DebugUtils::unused(block, indentation);
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
DebugUtils::unused(node, indentation, action);
}
#endif
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RABUILDERS_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,254 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/raassignment_p.h"
#include "../core/radefs_p.h"
#include "../core/rapass_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
//! Local register allocator.
class RALocalAllocator {
public:
ASMJIT_NONCOPYABLE(RALocalAllocator)
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! Link to `BaseRAPass`.
BaseRAPass* _pass;
//! Link to `BaseCompiler`.
BaseCompiler* _cc;
//! Architecture traits.
const ArchTraits* _archTraits;
//! Registers available to the allocator.
RARegMask _availableRegs;
//! Registers clobbered by the allocator.
RARegMask _clobberedRegs;
//! Register assignment (current).
RAAssignment _curAssignment;
//! Register assignment used temporarily during assignment switches.
RAAssignment _tmpAssignment;
//! Link to the current `RABlock`.
RABlock* _block;
//! InstNode.
InstNode* _node;
//! RA instruction.
RAInst* _raInst;
//! Count of all TiedReg's.
uint32_t _tiedTotal;
//! TiedReg's total counter.
RARegCount _tiedCount;
//! Temporary workToPhysMap that can be used freely by the allocator.
WorkToPhysMap* _tmpWorkToPhysMap;
//! \name Construction & Destruction
//! \{
inline RALocalAllocator(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()),
_archTraits(pass->_archTraits),
_availableRegs(pass->_availableRegs),
_clobberedRegs(),
_curAssignment(),
_block(nullptr),
_node(nullptr),
_raInst(nullptr),
_tiedTotal(),
_tiedCount() {}
Error init() noexcept;
//! \}
//! \name Accessors
//! \{
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
//! Returns the currently processed block.
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Sets the currently processed block.
ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; }
//! Returns the currently processed `InstNode`.
ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; }
//! Returns the currently processed `RAInst`.
ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; }
//! Returns all tied regs as `RATiedReg` array.
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`.
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction.
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`.
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); }
ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; }
//! \}
//! \name Assignment
//! \{
Error makeInitialAssignment() noexcept;
Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting code that reassigns them.
//! This is always used to switch to a previously stored assignment.
//!
//! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap`
//! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate
//! a code sequence that is always executed regardless of the flow.
Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept;
ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept {
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
}
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
//! \}
//! \name Allocation
//! \{
Error allocInst(InstNode* node) noexcept;
Error spillAfterAllocation(InstNode* node) noexcept;
Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
//! \}
//! \name Decision Making
//! \{
enum CostModel : uint32_t {
kCostOfFrequency = 1048576,
kCostOfDirtyFlag = kCostOfFrequency / 4
};
ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept {
return uint32_t(int32_t(freq * float(kCostOfFrequency)));
}
inline uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq());
if (_curAssignment.isPhysDirty(group, assignedId))
cost += kCostOfDirtyFlag;
return cost;
}
//! Decides on register assignment.
uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have
//! to be used by another WorkReg.
//!
//! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be
//! spilled, or a valid physical register ID, which means that the register should be moved to that physical register
//! instead.
uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on best spill given a register mask `spillableRegs`
uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \}
//! \name Emit
//! \{
//! Emits a move between a destination and source register, and fixes the
//! register assignment.
inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) return kErrorOk;
_curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
return _pass->emitMove(workId, dstPhysId, srcPhysId);
}
//! Emits a swap between two physical registers and fixes their assignment.
//!
//! \note Target must support this operation otherwise this would ASSERT.
inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
}
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean.
inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId);
}
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean.
inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
_curAssignment.makeClean(group, workId, physId);
return _pass->emitSave(workId, physId);
}
//! Assigns a register, the content of it is undefined at this point.
inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty);
return kErrorOk;
}
//! Spills a variable/register, saves the content to the memory-home if modified.
inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
return onKillReg(group, workId, physId);
}
inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId);
return kErrorOk;
}
inline Error onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId);
return kErrorOk;
}
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RALOCAL_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,184 +0,0 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/rastack_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// RAStackAllocator - Slots
// ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
return nullptr;
RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot))
return nullptr;
slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
slot->_flags = uint16_t(flags);
slot->_useCount = 0;
slot->_size = size;
slot->_weight = 0;
slot->_offset = 0;
_alignment = Support::max<uint32_t>(_alignment, alignment);
_slots.appendUnsafe(slot);
return slot;
}
// RAStackAllocator - Utilities
// ============================
struct RAStackGap {
inline RAStackGap() noexcept
: offset(0),
size(0) {}
inline RAStackGap(uint32_t offset, uint32_t size) noexcept
: offset(offset),
size(size) {}
inline RAStackGap(const RAStackGap& other) noexcept
: offset(other.offset),
size(other.size) {}
uint32_t offset;
uint32_t size;
};
Error RAStackAllocator::calculateStackFrame() noexcept {
// Base weight added to all registers regardless of their size and alignment.
uint32_t kBaseRegWeight = 16;
// STEP 1:
//
// Update usage based on the size of the slot. We boost smaller slots in a way that 32-bit register has a higher
// priority than a 128-bit register, however, if one 128-bit register is used 4 times more than some other 32-bit
// register it will overweight it.
for (RAStackSlot* slot : _slots) {
uint32_t alignment = slot->alignment();
ASMJIT_ASSERT(alignment > 0);
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight;
if (slot->isRegHome())
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
else
weight = power;
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all.
if (weight > 0xFFFFFFFFu)
weight = 0xFFFFFFFFu;
slot->setWeight(uint32_t(weight));
}
// STEP 2:
//
// Sort stack slots based on their newly calculated weight (in descending order).
_slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
return a->weight() > b->weight() ? 1 :
a->weight() == b->weight() ? 0 : -1;
});
// STEP 3:
//
// Calculate offset of each slot. We start from the slot that has the highest weight and advance to slots with
// lower weight. It could look that offsets start from the first slot in our list and then simply increase, but
// it's not always the case as we also try to fill all gaps introduced by the fact that slots are sorted by
// weight and not by size & alignment, so when we need to align some slot we distribute the gap caused by the
// alignment to `gaps`.
uint32_t offset = 0;
ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) {
if (slot->isStackArg())
continue;
uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
// Try to find a slot within gaps first, before advancing the `offset`.
bool foundGap = false;
uint32_t gapSize = 0;
uint32_t gapOffset = 0;
{
uint32_t slotSize = slot->size();
if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
// Iterate from the lowest to the highest possible.
uint32_t index = Support::ctz(slotSize);
do {
if (!gaps[index].empty()) {
RAStackGap gap = gaps[index].pop();
ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
slot->setOffset(int32_t(gap.offset));
gapSize = gap.size - slotSize;
gapOffset = gap.offset - slotSize;
foundGap = true;
break;
}
} while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
}
}
// No gap found, we may create a new one(s) if the current offset is not aligned.
if (!foundGap && offset != alignedOffset) {
gapSize = alignedOffset - offset;
gapOffset = alignedOffset;
offset = alignedOffset;
}
// True if we have found a gap and not filled all of it or we aligned the current offset.
if (gapSize) {
uint32_t gapEnd = gapSize + gapOffset;
while (gapOffset < gapEnd) {
uint32_t index = Support::ctz(gapOffset);
uint32_t slotSize = 1u << index;
// Weird case, better to bail...
if (gapEnd - gapOffset < slotSize)
break;
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize;
}
}
if (!foundGap) {
ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
slot->setOffset(int32_t(offset));
offset += slot->size();
}
}
_stackSize = Support::alignUp(offset, _alignment);
return kErrorOk;
}
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots)
if (!slot->isStackArg())
slot->_offset += offset;
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

Some files were not shown because too many files have changed in this diff Show More