Commit 9195a63d authored by swift_gan's avatar swift_gan

add vixl lib

parent 9ab57b60
...@@ -34,6 +34,13 @@ add_library( # Sets the name of the library. ...@@ -34,6 +34,13 @@ add_library( # Sets the name of the library.
src/main/cpp/inst/insts_arm64.cpp src/main/cpp/inst/insts_arm64.cpp
) )
include_directories(src/main/cpp/asm/vixl)
add_subdirectory(src/main/cpp/asm/vixl)
target_link_libraries(sandhook vixl)
# Searches for a specified prebuilt library and stores the path as a # Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by # variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library # default, you only need to specify the name of the public NDK library
......
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -fdiagnostics-show-option -Wextra -Wredundant-decls -pedantic -Wwrite-strings -Wunused")
add_definitions(-DVIXL_CODE_BUFFER_MALLOC)
set(VIXL_SOURCES
code-buffer-vixl.cc
compiler-intrinsics-vixl.cc
cpu-features.cc
utils-vixl.cc)
set(VIXL_AARCH32
aarch32/assembler-aarch32.cc
aarch32/constants-aarch32.cc
aarch32/instructions-aarch32.cc
aarch32/location-aarch32.cc
aarch32/macro-assembler-aarch32.cc
aarch32/operands-aarch32.cc
)
set(VIXL_AARCH64
aarch64/assembler-aarch64.cc
aarch64/cpu-aarch64.cc
aarch64/cpu-features-auditor-aarch64.cc
aarch64/decoder-aarch64.cc
aarch64/instructions-aarch64.cc
aarch64/instrument-aarch64.cc
aarch64/logic-aarch64.cc
aarch64/macro-assembler-aarch64.cc
aarch64/operands-aarch64.cc
aarch64/pointer-auth-aarch64.cc
aarch64/simulator-aarch64.cc
)
if (ENABLE_SIMULATOR)
add_definitions(-DVIXL_INCLUDE_SIMULATOR_AARCH64)
set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH32} ${VIXL_AARCH64})
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
add_definitions("-DVIXL_INCLUDE_TARGET_A32")
add_definitions("-DVIXL_INCLUDE_TARGET_T32")
set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH32})
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64")
set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH64})
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|aarch64)")
add_library(vixl ${VIXL_SOURCES})
endif ()
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "aarch32/constants-aarch32.h"
#include "utils-vixl.h"
namespace vixl {
namespace aarch32 {
// Start of generated code.
const char* ToCString(InstructionType type) {
switch (type) {
case kAdc:
return "adc";
case kAdcs:
return "adcs";
case kAdd:
return "add";
case kAdds:
return "adds";
case kAddw:
return "addw";
case kAdr:
return "adr";
case kAnd:
return "and";
case kAnds:
return "ands";
case kAsr:
return "asr";
case kAsrs:
return "asrs";
case kB:
return "b";
case kBfc:
return "bfc";
case kBfi:
return "bfi";
case kBic:
return "bic";
case kBics:
return "bics";
case kBkpt:
return "bkpt";
case kBl:
return "bl";
case kBlx:
return "blx";
case kBx:
return "bx";
case kBxj:
return "bxj";
case kCbnz:
return "cbnz";
case kCbz:
return "cbz";
case kClrex:
return "clrex";
case kClz:
return "clz";
case kCmn:
return "cmn";
case kCmp:
return "cmp";
case kCrc32b:
return "crc32b";
case kCrc32cb:
return "crc32cb";
case kCrc32ch:
return "crc32ch";
case kCrc32cw:
return "crc32cw";
case kCrc32h:
return "crc32h";
case kCrc32w:
return "crc32w";
case kDmb:
return "dmb";
case kDsb:
return "dsb";
case kEor:
return "eor";
case kEors:
return "eors";
case kFldmdbx:
return "fldmdbx";
case kFldmiax:
return "fldmiax";
case kFstmdbx:
return "fstmdbx";
case kFstmiax:
return "fstmiax";
case kHlt:
return "hlt";
case kHvc:
return "hvc";
case kIsb:
return "isb";
case kIt:
return "it";
case kLda:
return "lda";
case kLdab:
return "ldab";
case kLdaex:
return "ldaex";
case kLdaexb:
return "ldaexb";
case kLdaexd:
return "ldaexd";
case kLdaexh:
return "ldaexh";
case kLdah:
return "ldah";
case kLdm:
return "ldm";
case kLdmda:
return "ldmda";
case kLdmdb:
return "ldmdb";
case kLdmea:
return "ldmea";
case kLdmed:
return "ldmed";
case kLdmfa:
return "ldmfa";
case kLdmfd:
return "ldmfd";
case kLdmib:
return "ldmib";
case kLdr:
return "ldr";
case kLdrb:
return "ldrb";
case kLdrd:
return "ldrd";
case kLdrex:
return "ldrex";
case kLdrexb:
return "ldrexb";
case kLdrexd:
return "ldrexd";
case kLdrexh:
return "ldrexh";
case kLdrh:
return "ldrh";
case kLdrsb:
return "ldrsb";
case kLdrsh:
return "ldrsh";
case kLsl:
return "lsl";
case kLsls:
return "lsls";
case kLsr:
return "lsr";
case kLsrs:
return "lsrs";
case kMla:
return "mla";
case kMlas:
return "mlas";
case kMls:
return "mls";
case kMov:
return "mov";
case kMovs:
return "movs";
case kMovt:
return "movt";
case kMovw:
return "movw";
case kMrs:
return "mrs";
case kMsr:
return "msr";
case kMul:
return "mul";
case kMuls:
return "muls";
case kMvn:
return "mvn";
case kMvns:
return "mvns";
case kNop:
return "nop";
case kOrn:
return "orn";
case kOrns:
return "orns";
case kOrr:
return "orr";
case kOrrs:
return "orrs";
case kPkhbt:
return "pkhbt";
case kPkhtb:
return "pkhtb";
case kPld:
return "pld";
case kPldw:
return "pldw";
case kPli:
return "pli";
case kPop:
return "pop";
case kPush:
return "push";
case kQadd:
return "qadd";
case kQadd16:
return "qadd16";
case kQadd8:
return "qadd8";
case kQasx:
return "qasx";
case kQdadd:
return "qdadd";
case kQdsub:
return "qdsub";
case kQsax:
return "qsax";
case kQsub:
return "qsub";
case kQsub16:
return "qsub16";
case kQsub8:
return "qsub8";
case kRbit:
return "rbit";
case kRev:
return "rev";
case kRev16:
return "rev16";
case kRevsh:
return "revsh";
case kRor:
return "ror";
case kRors:
return "rors";
case kRrx:
return "rrx";
case kRrxs:
return "rrxs";
case kRsb:
return "rsb";
case kRsbs:
return "rsbs";
case kRsc:
return "rsc";
case kRscs:
return "rscs";
case kSadd16:
return "sadd16";
case kSadd8:
return "sadd8";
case kSasx:
return "sasx";
case kSbc:
return "sbc";
case kSbcs:
return "sbcs";
case kSbfx:
return "sbfx";
case kSdiv:
return "sdiv";
case kSel:
return "sel";
case kShadd16:
return "shadd16";
case kShadd8:
return "shadd8";
case kShasx:
return "shasx";
case kShsax:
return "shsax";
case kShsub16:
return "shsub16";
case kShsub8:
return "shsub8";
case kSmlabb:
return "smlabb";
case kSmlabt:
return "smlabt";
case kSmlad:
return "smlad";
case kSmladx:
return "smladx";
case kSmlal:
return "smlal";
case kSmlalbb:
return "smlalbb";
case kSmlalbt:
return "smlalbt";
case kSmlald:
return "smlald";
case kSmlaldx:
return "smlaldx";
case kSmlals:
return "smlals";
case kSmlaltb:
return "smlaltb";
case kSmlaltt:
return "smlaltt";
case kSmlatb:
return "smlatb";
case kSmlatt:
return "smlatt";
case kSmlawb:
return "smlawb";
case kSmlawt:
return "smlawt";
case kSmlsd:
return "smlsd";
case kSmlsdx:
return "smlsdx";
case kSmlsld:
return "smlsld";
case kSmlsldx:
return "smlsldx";
case kSmmla:
return "smmla";
case kSmmlar:
return "smmlar";
case kSmmls:
return "smmls";
case kSmmlsr:
return "smmlsr";
case kSmmul:
return "smmul";
case kSmmulr:
return "smmulr";
case kSmuad:
return "smuad";
case kSmuadx:
return "smuadx";
case kSmulbb:
return "smulbb";
case kSmulbt:
return "smulbt";
case kSmull:
return "smull";
case kSmulls:
return "smulls";
case kSmultb:
return "smultb";
case kSmultt:
return "smultt";
case kSmulwb:
return "smulwb";
case kSmulwt:
return "smulwt";
case kSmusd:
return "smusd";
case kSmusdx:
return "smusdx";
case kSsat:
return "ssat";
case kSsat16:
return "ssat16";
case kSsax:
return "ssax";
case kSsub16:
return "ssub16";
case kSsub8:
return "ssub8";
case kStl:
return "stl";
case kStlb:
return "stlb";
case kStlex:
return "stlex";
case kStlexb:
return "stlexb";
case kStlexd:
return "stlexd";
case kStlexh:
return "stlexh";
case kStlh:
return "stlh";
case kStm:
return "stm";
case kStmda:
return "stmda";
case kStmdb:
return "stmdb";
case kStmea:
return "stmea";
case kStmed:
return "stmed";
case kStmfa:
return "stmfa";
case kStmfd:
return "stmfd";
case kStmib:
return "stmib";
case kStr:
return "str";
case kStrb:
return "strb";
case kStrd:
return "strd";
case kStrex:
return "strex";
case kStrexb:
return "strexb";
case kStrexd:
return "strexd";
case kStrexh:
return "strexh";
case kStrh:
return "strh";
case kSub:
return "sub";
case kSubs:
return "subs";
case kSubw:
return "subw";
case kSvc:
return "svc";
case kSxtab:
return "sxtab";
case kSxtab16:
return "sxtab16";
case kSxtah:
return "sxtah";
case kSxtb:
return "sxtb";
case kSxtb16:
return "sxtb16";
case kSxth:
return "sxth";
case kTbb:
return "tbb";
case kTbh:
return "tbh";
case kTeq:
return "teq";
case kTst:
return "tst";
case kUadd16:
return "uadd16";
case kUadd8:
return "uadd8";
case kUasx:
return "uasx";
case kUbfx:
return "ubfx";
case kUdf:
return "udf";
case kUdiv:
return "udiv";
case kUhadd16:
return "uhadd16";
case kUhadd8:
return "uhadd8";
case kUhasx:
return "uhasx";
case kUhsax:
return "uhsax";
case kUhsub16:
return "uhsub16";
case kUhsub8:
return "uhsub8";
case kUmaal:
return "umaal";
case kUmlal:
return "umlal";
case kUmlals:
return "umlals";
case kUmull:
return "umull";
case kUmulls:
return "umulls";
case kUqadd16:
return "uqadd16";
case kUqadd8:
return "uqadd8";
case kUqasx:
return "uqasx";
case kUqsax:
return "uqsax";
case kUqsub16:
return "uqsub16";
case kUqsub8:
return "uqsub8";
case kUsad8:
return "usad8";
case kUsada8:
return "usada8";
case kUsat:
return "usat";
case kUsat16:
return "usat16";
case kUsax:
return "usax";
case kUsub16:
return "usub16";
case kUsub8:
return "usub8";
case kUxtab:
return "uxtab";
case kUxtab16:
return "uxtab16";
case kUxtah:
return "uxtah";
case kUxtb:
return "uxtb";
case kUxtb16:
return "uxtb16";
case kUxth:
return "uxth";
case kVaba:
return "vaba";
case kVabal:
return "vabal";
case kVabd:
return "vabd";
case kVabdl:
return "vabdl";
case kVabs:
return "vabs";
case kVacge:
return "vacge";
case kVacgt:
return "vacgt";
case kVacle:
return "vacle";
case kVaclt:
return "vaclt";
case kVadd:
return "vadd";
case kVaddhn:
return "vaddhn";
case kVaddl:
return "vaddl";
case kVaddw:
return "vaddw";
case kVand:
return "vand";
case kVbic:
return "vbic";
case kVbif:
return "vbif";
case kVbit:
return "vbit";
case kVbsl:
return "vbsl";
case kVceq:
return "vceq";
case kVcge:
return "vcge";
case kVcgt:
return "vcgt";
case kVcle:
return "vcle";
case kVcls:
return "vcls";
case kVclt:
return "vclt";
case kVclz:
return "vclz";
case kVcmp:
return "vcmp";
case kVcmpe:
return "vcmpe";
case kVcnt:
return "vcnt";
case kVcvt:
return "vcvt";
case kVcvta:
return "vcvta";
case kVcvtb:
return "vcvtb";
case kVcvtm:
return "vcvtm";
case kVcvtn:
return "vcvtn";
case kVcvtp:
return "vcvtp";
case kVcvtr:
return "vcvtr";
case kVcvtt:
return "vcvtt";
case kVdiv:
return "vdiv";
case kVdup:
return "vdup";
case kVeor:
return "veor";
case kVext:
return "vext";
case kVfma:
return "vfma";
case kVfms:
return "vfms";
case kVfnma:
return "vfnma";
case kVfnms:
return "vfnms";
case kVhadd:
return "vhadd";
case kVhsub:
return "vhsub";
case kVld1:
return "vld1";
case kVld2:
return "vld2";
case kVld3:
return "vld3";
case kVld4:
return "vld4";
case kVldm:
return "vldm";
case kVldmdb:
return "vldmdb";
case kVldmia:
return "vldmia";
case kVldr:
return "vldr";
case kVmax:
return "vmax";
case kVmaxnm:
return "vmaxnm";
case kVmin:
return "vmin";
case kVminnm:
return "vminnm";
case kVmla:
return "vmla";
case kVmlal:
return "vmlal";
case kVmls:
return "vmls";
case kVmlsl:
return "vmlsl";
case kVmov:
return "vmov";
case kVmovl:
return "vmovl";
case kVmovn:
return "vmovn";
case kVmrs:
return "vmrs";
case kVmsr:
return "vmsr";
case kVmul:
return "vmul";
case kVmull:
return "vmull";
case kVmvn:
return "vmvn";
case kVneg:
return "vneg";
case kVnmla:
return "vnmla";
case kVnmls:
return "vnmls";
case kVnmul:
return "vnmul";
case kVorn:
return "vorn";
case kVorr:
return "vorr";
case kVpadal:
return "vpadal";
case kVpadd:
return "vpadd";
case kVpaddl:
return "vpaddl";
case kVpmax:
return "vpmax";
case kVpmin:
return "vpmin";
case kVpop:
return "vpop";
case kVpush:
return "vpush";
case kVqabs:
return "vqabs";
case kVqadd:
return "vqadd";
case kVqdmlal:
return "vqdmlal";
case kVqdmlsl:
return "vqdmlsl";
case kVqdmulh:
return "vqdmulh";
case kVqdmull:
return "vqdmull";
case kVqmovn:
return "vqmovn";
case kVqmovun:
return "vqmovun";
case kVqneg:
return "vqneg";
case kVqrdmulh:
return "vqrdmulh";
case kVqrshl:
return "vqrshl";
case kVqrshrn:
return "vqrshrn";
case kVqrshrun:
return "vqrshrun";
case kVqshl:
return "vqshl";
case kVqshlu:
return "vqshlu";
case kVqshrn:
return "vqshrn";
case kVqshrun:
return "vqshrun";
case kVqsub:
return "vqsub";
case kVraddhn:
return "vraddhn";
case kVrecpe:
return "vrecpe";
case kVrecps:
return "vrecps";
case kVrev16:
return "vrev16";
case kVrev32:
return "vrev32";
case kVrev64:
return "vrev64";
case kVrhadd:
return "vrhadd";
case kVrinta:
return "vrinta";
case kVrintm:
return "vrintm";
case kVrintn:
return "vrintn";
case kVrintp:
return "vrintp";
case kVrintr:
return "vrintr";
case kVrintx:
return "vrintx";
case kVrintz:
return "vrintz";
case kVrshl:
return "vrshl";
case kVrshr:
return "vrshr";
case kVrshrn:
return "vrshrn";
case kVrsqrte:
return "vrsqrte";
case kVrsqrts:
return "vrsqrts";
case kVrsra:
return "vrsra";
case kVrsubhn:
return "vrsubhn";
case kVseleq:
return "vseleq";
case kVselge:
return "vselge";
case kVselgt:
return "vselgt";
case kVselvs:
return "vselvs";
case kVshl:
return "vshl";
case kVshll:
return "vshll";
case kVshr:
return "vshr";
case kVshrn:
return "vshrn";
case kVsli:
return "vsli";
case kVsqrt:
return "vsqrt";
case kVsra:
return "vsra";
case kVsri:
return "vsri";
case kVst1:
return "vst1";
case kVst2:
return "vst2";
case kVst3:
return "vst3";
case kVst4:
return "vst4";
case kVstm:
return "vstm";
case kVstmdb:
return "vstmdb";
case kVstmia:
return "vstmia";
case kVstr:
return "vstr";
case kVsub:
return "vsub";
case kVsubhn:
return "vsubhn";
case kVsubl:
return "vsubl";
case kVsubw:
return "vsubw";
case kVswp:
return "vswp";
case kVtbl:
return "vtbl";
case kVtbx:
return "vtbx";
case kVtrn:
return "vtrn";
case kVtst:
return "vtst";
case kVuzp:
return "vuzp";
case kVzip:
return "vzip";
case kYield:
return "yield";
case kUndefInstructionType:
VIXL_UNREACHABLE();
return "";
}
VIXL_UNREACHABLE();
return "";
} // NOLINT(readability/fn_size)
// End of generated code.
} // namespace aarch32
} // namespace vixl
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CONSTANTS_AARCH32_H_
#define VIXL_CONSTANTS_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include "globals-vixl.h"
namespace vixl {
namespace aarch32 {
enum InstructionSet { A32, T32 };
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
const InstructionSet kDefaultISA = T32;
#else
const InstructionSet kDefaultISA = A32;
#endif
const unsigned kRegSizeInBits = 32;
const unsigned kRegSizeInBytes = kRegSizeInBits / 8;
const unsigned kSRegSizeInBits = 32;
const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8;
const unsigned kDRegSizeInBits = 64;
const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8;
const unsigned kQRegSizeInBits = 128;
const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8;
const unsigned kNumberOfRegisters = 16;
const unsigned kNumberOfSRegisters = 32;
const unsigned kMaxNumberOfDRegisters = 32;
const unsigned kNumberOfQRegisters = 16;
const unsigned kNumberOfT32LowRegisters = 8;
const unsigned kIpCode = 12;
const unsigned kSpCode = 13;
const unsigned kLrCode = 14;
const unsigned kPcCode = 15;
const unsigned kT32PcDelta = 4;
const unsigned kA32PcDelta = 8;
const unsigned kRRXEncodedValue = 3;
const unsigned kCoprocMask = 0xe;
const unsigned kInvalidCoprocMask = 0xa;
const unsigned kLowestT32_32Opcode = 0xe8000000;
const uint32_t kUnknownValue = 0xdeadbeef;
const uint32_t kMaxInstructionSizeInBytes = 4;
const uint32_t kA32InstructionSizeInBytes = 4;
const uint32_t k32BitT32InstructionSizeInBytes = 4;
const uint32_t k16BitT32InstructionSizeInBytes = 2;
// Maximum size emitted by a single T32 unconditional macro-instruction.
const uint32_t kMaxT32MacroInstructionSizeInBytes = 32;
const uint32_t kCallerSavedRegistersMask = 0x500f;
const uint16_t k16BitT32NopOpcode = 0xbf00;
const uint16_t kCbzCbnzMask = 0xf500;
const uint16_t kCbzCbnzValue = 0xb100;
const int32_t kCbzCbnzRange = 126;
const int32_t kBConditionalNarrowRange = 254;
const int32_t kBNarrowRange = 2046;
const int32_t kNearLabelRange = kBNarrowRange;
enum SystemFunctionsOpcodes { kPrintfCode };
enum BranchHint { kNear, kFar, kBranchWithoutHint };
// Start of generated code.
// AArch32 version implemented by the library (v8.0).
// The encoding for vX.Y is: (X << 8) | Y.
#define AARCH32_VERSION 0x0800
enum InstructionAttribute {
kNoAttribute = 0,
kArithmetic = 0x1,
kBitwise = 0x2,
kShift = 0x4,
kAddress = 0x8,
kBranch = 0x10,
kSystem = 0x20,
kFpNeon = 0x40,
kLoadStore = 0x80,
kLoadStoreMultiple = 0x100
};
enum InstructionType {
kUndefInstructionType,
kAdc,
kAdcs,
kAdd,
kAdds,
kAddw,
kAdr,
kAnd,
kAnds,
kAsr,
kAsrs,
kB,
kBfc,
kBfi,
kBic,
kBics,
kBkpt,
kBl,
kBlx,
kBx,
kBxj,
kCbnz,
kCbz,
kClrex,
kClz,
kCmn,
kCmp,
kCrc32b,
kCrc32cb,
kCrc32ch,
kCrc32cw,
kCrc32h,
kCrc32w,
kDmb,
kDsb,
kEor,
kEors,
kFldmdbx,
kFldmiax,
kFstmdbx,
kFstmiax,
kHlt,
kHvc,
kIsb,
kIt,
kLda,
kLdab,
kLdaex,
kLdaexb,
kLdaexd,
kLdaexh,
kLdah,
kLdm,
kLdmda,
kLdmdb,
kLdmea,
kLdmed,
kLdmfa,
kLdmfd,
kLdmib,
kLdr,
kLdrb,
kLdrd,
kLdrex,
kLdrexb,
kLdrexd,
kLdrexh,
kLdrh,
kLdrsb,
kLdrsh,
kLsl,
kLsls,
kLsr,
kLsrs,
kMla,
kMlas,
kMls,
kMov,
kMovs,
kMovt,
kMovw,
kMrs,
kMsr,
kMul,
kMuls,
kMvn,
kMvns,
kNop,
kOrn,
kOrns,
kOrr,
kOrrs,
kPkhbt,
kPkhtb,
kPld,
kPldw,
kPli,
kPop,
kPush,
kQadd,
kQadd16,
kQadd8,
kQasx,
kQdadd,
kQdsub,
kQsax,
kQsub,
kQsub16,
kQsub8,
kRbit,
kRev,
kRev16,
kRevsh,
kRor,
kRors,
kRrx,
kRrxs,
kRsb,
kRsbs,
kRsc,
kRscs,
kSadd16,
kSadd8,
kSasx,
kSbc,
kSbcs,
kSbfx,
kSdiv,
kSel,
kShadd16,
kShadd8,
kShasx,
kShsax,
kShsub16,
kShsub8,
kSmlabb,
kSmlabt,
kSmlad,
kSmladx,
kSmlal,
kSmlalbb,
kSmlalbt,
kSmlald,
kSmlaldx,
kSmlals,
kSmlaltb,
kSmlaltt,
kSmlatb,
kSmlatt,
kSmlawb,
kSmlawt,
kSmlsd,
kSmlsdx,
kSmlsld,
kSmlsldx,
kSmmla,
kSmmlar,
kSmmls,
kSmmlsr,
kSmmul,
kSmmulr,
kSmuad,
kSmuadx,
kSmulbb,
kSmulbt,
kSmull,
kSmulls,
kSmultb,
kSmultt,
kSmulwb,
kSmulwt,
kSmusd,
kSmusdx,
kSsat,
kSsat16,
kSsax,
kSsub16,
kSsub8,
kStl,
kStlb,
kStlex,
kStlexb,
kStlexd,
kStlexh,
kStlh,
kStm,
kStmda,
kStmdb,
kStmea,
kStmed,
kStmfa,
kStmfd,
kStmib,
kStr,
kStrb,
kStrd,
kStrex,
kStrexb,
kStrexd,
kStrexh,
kStrh,
kSub,
kSubs,
kSubw,
kSvc,
kSxtab,
kSxtab16,
kSxtah,
kSxtb,
kSxtb16,
kSxth,
kTbb,
kTbh,
kTeq,
kTst,
kUadd16,
kUadd8,
kUasx,
kUbfx,
kUdf,
kUdiv,
kUhadd16,
kUhadd8,
kUhasx,
kUhsax,
kUhsub16,
kUhsub8,
kUmaal,
kUmlal,
kUmlals,
kUmull,
kUmulls,
kUqadd16,
kUqadd8,
kUqasx,
kUqsax,
kUqsub16,
kUqsub8,
kUsad8,
kUsada8,
kUsat,
kUsat16,
kUsax,
kUsub16,
kUsub8,
kUxtab,
kUxtab16,
kUxtah,
kUxtb,
kUxtb16,
kUxth,
kVaba,
kVabal,
kVabd,
kVabdl,
kVabs,
kVacge,
kVacgt,
kVacle,
kVaclt,
kVadd,
kVaddhn,
kVaddl,
kVaddw,
kVand,
kVbic,
kVbif,
kVbit,
kVbsl,
kVceq,
kVcge,
kVcgt,
kVcle,
kVcls,
kVclt,
kVclz,
kVcmp,
kVcmpe,
kVcnt,
kVcvt,
kVcvta,
kVcvtb,
kVcvtm,
kVcvtn,
kVcvtp,
kVcvtr,
kVcvtt,
kVdiv,
kVdup,
kVeor,
kVext,
kVfma,
kVfms,
kVfnma,
kVfnms,
kVhadd,
kVhsub,
kVld1,
kVld2,
kVld3,
kVld4,
kVldm,
kVldmdb,
kVldmia,
kVldr,
kVmax,
kVmaxnm,
kVmin,
kVminnm,
kVmla,
kVmlal,
kVmls,
kVmlsl,
kVmov,
kVmovl,
kVmovn,
kVmrs,
kVmsr,
kVmul,
kVmull,
kVmvn,
kVneg,
kVnmla,
kVnmls,
kVnmul,
kVorn,
kVorr,
kVpadal,
kVpadd,
kVpaddl,
kVpmax,
kVpmin,
kVpop,
kVpush,
kVqabs,
kVqadd,
kVqdmlal,
kVqdmlsl,
kVqdmulh,
kVqdmull,
kVqmovn,
kVqmovun,
kVqneg,
kVqrdmulh,
kVqrshl,
kVqrshrn,
kVqrshrun,
kVqshl,
kVqshlu,
kVqshrn,
kVqshrun,
kVqsub,
kVraddhn,
kVrecpe,
kVrecps,
kVrev16,
kVrev32,
kVrev64,
kVrhadd,
kVrinta,
kVrintm,
kVrintn,
kVrintp,
kVrintr,
kVrintx,
kVrintz,
kVrshl,
kVrshr,
kVrshrn,
kVrsqrte,
kVrsqrts,
kVrsra,
kVrsubhn,
kVseleq,
kVselge,
kVselgt,
kVselvs,
kVshl,
kVshll,
kVshr,
kVshrn,
kVsli,
kVsqrt,
kVsra,
kVsri,
kVst1,
kVst2,
kVst3,
kVst4,
kVstm,
kVstmdb,
kVstmia,
kVstr,
kVsub,
kVsubhn,
kVsubl,
kVsubw,
kVswp,
kVtbl,
kVtbx,
kVtrn,
kVtst,
kVuzp,
kVzip,
kYield
};
const char* ToCString(InstructionType type);
// End of generated code.
inline InstructionAttribute operator|(InstructionAttribute left,
InstructionAttribute right) {
return static_cast<InstructionAttribute>(static_cast<uint32_t>(left) |
static_cast<uint32_t>(right));
}
} // namespace aarch32
} // namespace vixl
#endif // VIXL_CONSTANTS_AARCH32_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_DISASM_AARCH32_H_
#define VIXL_DISASM_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include <iomanip>
#include "aarch32/constants-aarch32.h"
#include "aarch32/operands-aarch32.h"
namespace vixl {
namespace aarch32 {
class ITBlock {
Condition first_condition_;
Condition condition_;
uint16_t it_mask_;
public:
ITBlock() : first_condition_(al), condition_(al), it_mask_(0) {}
void Advance() {
condition_ = Condition((condition_.GetCondition() & 0xe) | (it_mask_ >> 3));
it_mask_ = (it_mask_ << 1) & 0xf;
}
bool InITBlock() const { return it_mask_ != 0; }
bool OutsideITBlock() const { return !InITBlock(); }
bool LastInITBlock() const { return it_mask_ == 0x8; }
bool OutsideITBlockOrLast() const {
return OutsideITBlock() || LastInITBlock();
}
void Set(Condition first_condition, uint16_t mask) {
condition_ = first_condition_ = first_condition;
it_mask_ = mask;
}
Condition GetFirstCondition() const { return first_condition_; }
Condition GetCurrentCondition() const { return condition_; }
};
class Disassembler {
public:
enum LocationType {
kAnyLocation,
kCodeLocation,
kDataLocation,
kCoprocLocation,
kLoadByteLocation,
kLoadHalfWordLocation,
kLoadWordLocation,
kLoadDoubleWordLocation,
kLoadSignedByteLocation,
kLoadSignedHalfWordLocation,
kLoadSinglePrecisionLocation,
kLoadDoublePrecisionLocation,
kStoreByteLocation,
kStoreHalfWordLocation,
kStoreWordLocation,
kStoreDoubleWordLocation,
kStoreSinglePrecisionLocation,
kStoreDoublePrecisionLocation,
kVld1Location,
kVld2Location,
kVld3Location,
kVld4Location,
kVst1Location,
kVst2Location,
kVst3Location,
kVst4Location
};
class ConditionPrinter {
const ITBlock& it_block_;
Condition cond_;
public:
ConditionPrinter(const ITBlock& it_block, Condition cond)
: it_block_(it_block), cond_(cond) {}
const ITBlock& GetITBlock() const { return it_block_; }
Condition GetCond() const { return cond_; }
friend std::ostream& operator<<(std::ostream& os, ConditionPrinter cond) {
if (cond.it_block_.InITBlock() && cond.cond_.Is(al) &&
!cond.cond_.IsNone()) {
return os << "al";
}
return os << cond.cond_;
}
};
class ImmediatePrinter {
uint32_t imm_;
public:
explicit ImmediatePrinter(uint32_t imm) : imm_(imm) {}
uint32_t GetImm() const { return imm_; }
friend std::ostream& operator<<(std::ostream& os, ImmediatePrinter imm) {
return os << "#" << imm.GetImm();
}
};
class SignedImmediatePrinter {
int32_t imm_;
public:
explicit SignedImmediatePrinter(int32_t imm) : imm_(imm) {}
int32_t GetImm() const { return imm_; }
friend std::ostream& operator<<(std::ostream& os,
SignedImmediatePrinter imm) {
return os << "#" << imm.GetImm();
}
};
class RawImmediatePrinter {
uint32_t imm_;
public:
explicit RawImmediatePrinter(uint32_t imm) : imm_(imm) {}
uint32_t GetImm() const { return imm_; }
friend std::ostream& operator<<(std::ostream& os, RawImmediatePrinter imm) {
return os << imm.GetImm();
}
};
class DtPrinter {
DataType dt_;
DataType default_dt_;
public:
DtPrinter(DataType dt, DataType default_dt)
: dt_(dt), default_dt_(default_dt) {}
DataType GetDt() const { return dt_; }
DataType GetDefaultDt() const { return default_dt_; }
friend std::ostream& operator<<(std::ostream& os, DtPrinter dt) {
if (dt.dt_.Is(dt.default_dt_)) return os;
return os << dt.dt_;
}
};
class IndexedRegisterPrinter {
DRegister reg_;
uint32_t index_;
public:
IndexedRegisterPrinter(DRegister reg, uint32_t index)
: reg_(reg), index_(index) {}
DRegister GetReg() const { return reg_; }
uint32_t GetIndex() const { return index_; }
friend std::ostream& operator<<(std::ostream& os,
IndexedRegisterPrinter reg) {
return os << reg.GetReg() << "[" << reg.GetIndex() << "]";
}
};
// TODO: Merge this class with PrintLabel below. This Location class
// represents a PC-relative offset, not an address.
class Location {
public:
typedef int32_t Offset;
Location(Offset immediate, Offset pc_offset)
: immediate_(immediate), pc_offset_(pc_offset) {}
Offset GetImmediate() const { return immediate_; }
Offset GetPCOffset() const { return pc_offset_; }
private:
Offset immediate_;
Offset pc_offset_;
};
class PrintLabel {
LocationType location_type_;
Location::Offset immediate_;
Location::Offset location_;
public:
PrintLabel(LocationType location_type,
Location* offset,
Location::Offset position)
: location_type_(location_type),
immediate_(offset->GetImmediate()),
location_(static_cast<Location::Offset>(
static_cast<int64_t>(offset->GetPCOffset()) +
offset->GetImmediate() + position)) {}
LocationType GetLocationType() const { return location_type_; }
Location::Offset GetLocation() const { return location_; }
Location::Offset GetImmediate() const { return immediate_; }
friend inline std::ostream& operator<<(std::ostream& os,
const PrintLabel& label) {
os << "0x" << std::hex << std::setw(8) << std::setfill('0')
<< label.GetLocation() << std::dec;
return os;
}
};
class PrintMemOperand {
LocationType location_type_;
const MemOperand& operand_;
public:
PrintMemOperand(LocationType location_type, const MemOperand& operand)
: location_type_(location_type), operand_(operand) {}
LocationType GetLocationType() const { return location_type_; }
const MemOperand& GetOperand() const { return operand_; }
};
class PrintAlignedMemOperand {
LocationType location_type_;
const AlignedMemOperand& operand_;
public:
PrintAlignedMemOperand(LocationType location_type,
const AlignedMemOperand& operand)
: location_type_(location_type), operand_(operand) {}
LocationType GetLocationType() const { return location_type_; }
const AlignedMemOperand& GetOperand() const { return operand_; }
};
class DisassemblerStream {
std::ostream& os_;
InstructionType current_instruction_type_;
InstructionAttribute current_instruction_attributes_;
public:
explicit DisassemblerStream(std::ostream& os) // NOLINT(runtime/references)
: os_(os),
current_instruction_type_(kUndefInstructionType),
current_instruction_attributes_(kNoAttribute) {}
virtual ~DisassemblerStream() {}
std::ostream& os() const { return os_; }
void SetCurrentInstruction(
InstructionType current_instruction_type,
InstructionAttribute current_instruction_attributes) {
current_instruction_type_ = current_instruction_type;
current_instruction_attributes_ = current_instruction_attributes;
}
InstructionType GetCurrentInstructionType() const {
return current_instruction_type_;
}
InstructionAttribute GetCurrentInstructionAttributes() const {
return current_instruction_attributes_;
}
bool Has(InstructionAttribute attributes) const {
return (current_instruction_attributes_ & attributes) == attributes;
}
template <typename T>
DisassemblerStream& operator<<(T value) {
os_ << value;
return *this;
}
virtual DisassemblerStream& operator<<(const char* string) {
os_ << string;
return *this;
}
virtual DisassemblerStream& operator<<(const ConditionPrinter& cond) {
os_ << cond;
return *this;
}
virtual DisassemblerStream& operator<<(Condition cond) {
os_ << cond;
return *this;
}
virtual DisassemblerStream& operator<<(const EncodingSize& size) {
os_ << size;
return *this;
}
virtual DisassemblerStream& operator<<(const ImmediatePrinter& imm) {
os_ << imm;
return *this;
}
virtual DisassemblerStream& operator<<(const SignedImmediatePrinter& imm) {
os_ << imm;
return *this;
}
virtual DisassemblerStream& operator<<(const RawImmediatePrinter& imm) {
os_ << imm;
return *this;
}
virtual DisassemblerStream& operator<<(const DtPrinter& dt) {
os_ << dt;
return *this;
}
virtual DisassemblerStream& operator<<(const DataType& type) {
os_ << type;
return *this;
}
virtual DisassemblerStream& operator<<(Shift shift) {
os_ << shift;
return *this;
}
virtual DisassemblerStream& operator<<(Sign sign) {
os_ << sign;
return *this;
}
virtual DisassemblerStream& operator<<(Alignment alignment) {
os_ << alignment;
return *this;
}
virtual DisassemblerStream& operator<<(const PrintLabel& label) {
os_ << label;
return *this;
}
virtual DisassemblerStream& operator<<(const WriteBack& write_back) {
os_ << write_back;
return *this;
}
virtual DisassemblerStream& operator<<(const NeonImmediate& immediate) {
os_ << immediate;
return *this;
}
virtual DisassemblerStream& operator<<(Register reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(SRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(DRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(QRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(const RegisterOrAPSR_nzcv reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(SpecialRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(MaskedSpecialRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(SpecialFPRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(BankedRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(const RegisterList& list) {
os_ << list;
return *this;
}
virtual DisassemblerStream& operator<<(const SRegisterList& list) {
os_ << list;
return *this;
}
virtual DisassemblerStream& operator<<(const DRegisterList& list) {
os_ << list;
return *this;
}
virtual DisassemblerStream& operator<<(const NeonRegisterList& list) {
os_ << list;
return *this;
}
virtual DisassemblerStream& operator<<(const DRegisterLane& reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(const IndexedRegisterPrinter& reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(Coprocessor coproc) {
os_ << coproc;
return *this;
}
virtual DisassemblerStream& operator<<(CRegister reg) {
os_ << reg;
return *this;
}
virtual DisassemblerStream& operator<<(Endianness endian_specifier) {
os_ << endian_specifier;
return *this;
}
virtual DisassemblerStream& operator<<(MemoryBarrier option) {
os_ << option;
return *this;
}
virtual DisassemblerStream& operator<<(InterruptFlags iflags) {
os_ << iflags;
return *this;
}
virtual DisassemblerStream& operator<<(const Operand& operand) {
if (operand.IsImmediate()) {
if (Has(kBitwise)) {
return *this << "#0x" << std::hex << operand.GetImmediate()
<< std::dec;
}
return *this << "#" << operand.GetImmediate();
}
if (operand.IsImmediateShiftedRegister()) {
if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) &&
(operand.GetShiftAmount() == 0)) {
return *this << operand.GetBaseRegister();
}
if (operand.GetShift().IsRRX()) {
return *this << operand.GetBaseRegister() << ", rrx";
}
return *this << operand.GetBaseRegister() << ", " << operand.GetShift()
<< " #" << operand.GetShiftAmount();
}
if (operand.IsRegisterShiftedRegister()) {
return *this << operand.GetBaseRegister() << ", " << operand.GetShift()
<< " " << operand.GetShiftRegister();
}
VIXL_UNREACHABLE();
return *this;
}
virtual DisassemblerStream& operator<<(const SOperand& operand) {
if (operand.IsImmediate()) {
return *this << operand.GetNeonImmediate();
}
return *this << operand.GetRegister();
}
virtual DisassemblerStream& operator<<(const DOperand& operand) {
if (operand.IsImmediate()) {
return *this << operand.GetNeonImmediate();
}
return *this << operand.GetRegister();
}
virtual DisassemblerStream& operator<<(const QOperand& operand) {
if (operand.IsImmediate()) {
return *this << operand.GetNeonImmediate();
}
return *this << operand.GetRegister();
}
virtual DisassemblerStream& operator<<(const MemOperand& operand) {
*this << "[" << operand.GetBaseRegister();
if (operand.GetAddrMode() == PostIndex) {
*this << "]";
if (operand.IsRegisterOnly()) return *this << "!";
}
if (operand.IsImmediate()) {
if ((operand.GetOffsetImmediate() != 0) ||
operand.GetSign().IsMinus() ||
((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) {
if (operand.GetOffsetImmediate() == 0) {
*this << ", #" << operand.GetSign() << operand.GetOffsetImmediate();
} else {
*this << ", #" << operand.GetOffsetImmediate();
}
}
} else if (operand.IsPlainRegister()) {
*this << ", " << operand.GetSign() << operand.GetOffsetRegister();
} else if (operand.IsShiftedRegister()) {
*this << ", " << operand.GetSign() << operand.GetOffsetRegister()
<< ImmediateShiftOperand(operand.GetShift(),
operand.GetShiftAmount());
} else {
VIXL_UNREACHABLE();
return *this;
}
if (operand.GetAddrMode() == Offset) {
*this << "]";
} else if (operand.GetAddrMode() == PreIndex) {
*this << "]!";
}
return *this;
}
virtual DisassemblerStream& operator<<(const PrintMemOperand& operand) {
return *this << operand.GetOperand();
}
virtual DisassemblerStream& operator<<(const AlignedMemOperand& operand) {
*this << "[" << operand.GetBaseRegister() << operand.GetAlignment()
<< "]";
if (operand.GetAddrMode() == PostIndex) {
if (operand.IsPlainRegister()) {
*this << ", " << operand.GetOffsetRegister();
} else {
*this << "!";
}
}
return *this;
}
virtual DisassemblerStream& operator<<(
const PrintAlignedMemOperand& operand) {
return *this << operand.GetOperand();
}
};
private:
class ITBlockScope {
ITBlock* const it_block_;
bool inside_;
public:
explicit ITBlockScope(ITBlock* it_block)
: it_block_(it_block), inside_(it_block->InITBlock()) {}
~ITBlockScope() {
if (inside_) it_block_->Advance();
}
};
ITBlock it_block_;
DisassemblerStream* os_;
bool owns_os_;
uint32_t code_address_;
// True if the disassembler always output instructions with all the
// registers (even if two registers are identical and only one could be
// output).
bool use_short_hand_form_;
public:
explicit Disassembler(std::ostream& os, // NOLINT(runtime/references)
uint32_t code_address = 0)
: os_(new DisassemblerStream(os)),
owns_os_(true),
code_address_(code_address),
use_short_hand_form_(true) {}
explicit Disassembler(DisassemblerStream* os, uint32_t code_address = 0)
: os_(os),
owns_os_(false),
code_address_(code_address),
use_short_hand_form_(true) {}
virtual ~Disassembler() {
if (owns_os_) {
delete os_;
}
}
DisassemblerStream& os() const { return *os_; }
void SetIT(Condition first_condition, uint16_t it_mask) {
it_block_.Set(first_condition, it_mask);
}
const ITBlock& GetITBlock() const { return it_block_; }
bool InITBlock() const { return it_block_.InITBlock(); }
bool OutsideITBlock() const { return it_block_.OutsideITBlock(); }
bool OutsideITBlockOrLast() const { return it_block_.OutsideITBlockOrLast(); }
void CheckNotIT() const { VIXL_ASSERT(it_block_.OutsideITBlock()); }
// Return the current condition depending on the IT state for T32.
Condition CurrentCond() const {
if (it_block_.OutsideITBlock()) return al;
return it_block_.GetCurrentCondition();
}
bool UseShortHandForm() const { return use_short_hand_form_; }
void SetUseShortHandForm(bool use_short_hand_form) {
use_short_hand_form_ = use_short_hand_form;
}
virtual void UnallocatedT32(uint32_t instruction) {
if (T32Size(instruction) == 2) {
os() << "unallocated " << std::hex << std::setw(4) << std::setfill('0')
<< (instruction >> 16) << std::dec;
} else {
os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0')
<< instruction << std::dec;
}
}
virtual void UnallocatedA32(uint32_t instruction) {
os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0')
<< instruction << std::dec;
}
virtual void UnimplementedT32_16(const char* name, uint32_t instruction) {
os() << "unimplemented " << name << " T32:" << std::hex << std::setw(4)
<< std::setfill('0') << (instruction >> 16) << std::dec;
}
virtual void UnimplementedT32_32(const char* name, uint32_t instruction) {
os() << "unimplemented " << name << " T32:" << std::hex << std::setw(8)
<< std::setfill('0') << instruction << std::dec;
}
virtual void UnimplementedA32(const char* name, uint32_t instruction) {
os() << "unimplemented " << name << " ARM:" << std::hex << std::setw(8)
<< std::setfill('0') << instruction << std::dec;
}
virtual void Unpredictable() { os() << " ; unpredictable"; }
virtual void UnpredictableT32(uint32_t /*instr*/) { return Unpredictable(); }
virtual void UnpredictableA32(uint32_t /*instr*/) { return Unpredictable(); }
static bool Is16BitEncoding(uint32_t instr) { return instr < 0xe8000000; }
uint32_t GetCodeAddress() const { return code_address_; }
void SetCodeAddress(uint32_t code_address) { code_address_ = code_address; }
// Start of generated code.
void adc(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void adcs(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void add(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void add(Condition cond, Register rd, const Operand& operand);
void adds(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void adds(Register rd, const Operand& operand);
void addw(Condition cond, Register rd, Register rn, const Operand& operand);
void adr(Condition cond, EncodingSize size, Register rd, Location* location);
void and_(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void ands(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void asr(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void asrs(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void b(Condition cond, EncodingSize size, Location* location);
void bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width);
void bfi(
Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width);
void bic(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void bics(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void bkpt(Condition cond, uint32_t imm);
void bl(Condition cond, Location* location);
void blx(Condition cond, Location* location);
void blx(Condition cond, Register rm);
void bx(Condition cond, Register rm);
void bxj(Condition cond, Register rm);
void cbnz(Register rn, Location* location);
void cbz(Register rn, Location* location);
void clrex(Condition cond);
void clz(Condition cond, Register rd, Register rm);
void cmn(Condition cond,
EncodingSize size,
Register rn,
const Operand& operand);
void cmp(Condition cond,
EncodingSize size,
Register rn,
const Operand& operand);
void crc32b(Condition cond, Register rd, Register rn, Register rm);
void crc32cb(Condition cond, Register rd, Register rn, Register rm);
void crc32ch(Condition cond, Register rd, Register rn, Register rm);
void crc32cw(Condition cond, Register rd, Register rn, Register rm);
void crc32h(Condition cond, Register rd, Register rn, Register rm);
void crc32w(Condition cond, Register rd, Register rn, Register rm);
void dmb(Condition cond, MemoryBarrier option);
void dsb(Condition cond, MemoryBarrier option);
void eor(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void eors(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void fldmdbx(Condition cond,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void fldmiax(Condition cond,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void fstmdbx(Condition cond,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void fstmiax(Condition cond,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void hlt(Condition cond, uint32_t imm);
void hvc(Condition cond, uint32_t imm);
void isb(Condition cond, MemoryBarrier option);
void it(Condition cond, uint16_t mask);
void lda(Condition cond, Register rt, const MemOperand& operand);
void ldab(Condition cond, Register rt, const MemOperand& operand);
void ldaex(Condition cond, Register rt, const MemOperand& operand);
void ldaexb(Condition cond, Register rt, const MemOperand& operand);
void ldaexd(Condition cond,
Register rt,
Register rt2,
const MemOperand& operand);
void ldaexh(Condition cond, Register rt, const MemOperand& operand);
void ldah(Condition cond, Register rt, const MemOperand& operand);
void ldm(Condition cond,
EncodingSize size,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmda(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmdb(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmea(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmed(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmfa(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmfd(Condition cond,
EncodingSize size,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldmib(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void ldr(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void ldr(Condition cond, EncodingSize size, Register rt, Location* location);
void ldrb(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void ldrb(Condition cond, Register rt, Location* location);
void ldrd(Condition cond,
Register rt,
Register rt2,
const MemOperand& operand);
void ldrd(Condition cond, Register rt, Register rt2, Location* location);
void ldrex(Condition cond, Register rt, const MemOperand& operand);
void ldrexb(Condition cond, Register rt, const MemOperand& operand);
void ldrexd(Condition cond,
Register rt,
Register rt2,
const MemOperand& operand);
void ldrexh(Condition cond, Register rt, const MemOperand& operand);
void ldrh(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void ldrh(Condition cond, Register rt, Location* location);
void ldrsb(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void ldrsb(Condition cond, Register rt, Location* location);
void ldrsh(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void ldrsh(Condition cond, Register rt, Location* location);
void lsl(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void lsls(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void lsr(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void lsrs(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void mla(Condition cond, Register rd, Register rn, Register rm, Register ra);
void mlas(Condition cond, Register rd, Register rn, Register rm, Register ra);
void mls(Condition cond, Register rd, Register rn, Register rm, Register ra);
void mov(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void movs(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void movt(Condition cond, Register rd, const Operand& operand);
void movw(Condition cond, Register rd, const Operand& operand);
void mrs(Condition cond, Register rd, SpecialRegister spec_reg);
void msr(Condition cond,
MaskedSpecialRegister spec_reg,
const Operand& operand);
void mul(
Condition cond, EncodingSize size, Register rd, Register rn, Register rm);
void muls(Condition cond, Register rd, Register rn, Register rm);
void mvn(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void mvns(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void nop(Condition cond, EncodingSize size);
void orn(Condition cond, Register rd, Register rn, const Operand& operand);
void orns(Condition cond, Register rd, Register rn, const Operand& operand);
void orr(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void orrs(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void pkhbt(Condition cond, Register rd, Register rn, const Operand& operand);
void pkhtb(Condition cond, Register rd, Register rn, const Operand& operand);
void pld(Condition cond, Location* location);
void pld(Condition cond, const MemOperand& operand);
void pldw(Condition cond, const MemOperand& operand);
void pli(Condition cond, const MemOperand& operand);
void pli(Condition cond, Location* location);
void pop(Condition cond, EncodingSize size, RegisterList registers);
void pop(Condition cond, EncodingSize size, Register rt);
void push(Condition cond, EncodingSize size, RegisterList registers);
void push(Condition cond, EncodingSize size, Register rt);
void qadd(Condition cond, Register rd, Register rm, Register rn);
void qadd16(Condition cond, Register rd, Register rn, Register rm);
void qadd8(Condition cond, Register rd, Register rn, Register rm);
void qasx(Condition cond, Register rd, Register rn, Register rm);
void qdadd(Condition cond, Register rd, Register rm, Register rn);
void qdsub(Condition cond, Register rd, Register rm, Register rn);
void qsax(Condition cond, Register rd, Register rn, Register rm);
void qsub(Condition cond, Register rd, Register rm, Register rn);
void qsub16(Condition cond, Register rd, Register rn, Register rm);
void qsub8(Condition cond, Register rd, Register rn, Register rm);
void rbit(Condition cond, Register rd, Register rm);
void rev(Condition cond, EncodingSize size, Register rd, Register rm);
void rev16(Condition cond, EncodingSize size, Register rd, Register rm);
void revsh(Condition cond, EncodingSize size, Register rd, Register rm);
void ror(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void rors(Condition cond,
EncodingSize size,
Register rd,
Register rm,
const Operand& operand);
void rrx(Condition cond, Register rd, Register rm);
void rrxs(Condition cond, Register rd, Register rm);
void rsb(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void rsbs(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void rsc(Condition cond, Register rd, Register rn, const Operand& operand);
void rscs(Condition cond, Register rd, Register rn, const Operand& operand);
void sadd16(Condition cond, Register rd, Register rn, Register rm);
void sadd8(Condition cond, Register rd, Register rn, Register rm);
void sasx(Condition cond, Register rd, Register rn, Register rm);
void sbc(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void sbcs(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void sbfx(
Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width);
void sdiv(Condition cond, Register rd, Register rn, Register rm);
void sel(Condition cond, Register rd, Register rn, Register rm);
void shadd16(Condition cond, Register rd, Register rn, Register rm);
void shadd8(Condition cond, Register rd, Register rn, Register rm);
void shasx(Condition cond, Register rd, Register rn, Register rm);
void shsax(Condition cond, Register rd, Register rn, Register rm);
void shsub16(Condition cond, Register rd, Register rn, Register rm);
void shsub8(Condition cond, Register rd, Register rn, Register rm);
void smlabb(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlabt(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlad(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smladx(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlal(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlalbb(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlalbt(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlald(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlaldx(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlals(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlaltb(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlaltt(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlatb(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlatt(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlawb(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlawt(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlsd(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlsdx(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smlsld(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smlsldx(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smmla(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smmlar(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smmls(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smmlsr(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void smmul(Condition cond, Register rd, Register rn, Register rm);
void smmulr(Condition cond, Register rd, Register rn, Register rm);
void smuad(Condition cond, Register rd, Register rn, Register rm);
void smuadx(Condition cond, Register rd, Register rn, Register rm);
void smulbb(Condition cond, Register rd, Register rn, Register rm);
void smulbt(Condition cond, Register rd, Register rn, Register rm);
void smull(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smulls(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void smultb(Condition cond, Register rd, Register rn, Register rm);
void smultt(Condition cond, Register rd, Register rn, Register rm);
void smulwb(Condition cond, Register rd, Register rn, Register rm);
void smulwt(Condition cond, Register rd, Register rn, Register rm);
void smusd(Condition cond, Register rd, Register rn, Register rm);
void smusdx(Condition cond, Register rd, Register rn, Register rm);
void ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand);
void ssat16(Condition cond, Register rd, uint32_t imm, Register rn);
void ssax(Condition cond, Register rd, Register rn, Register rm);
void ssub16(Condition cond, Register rd, Register rn, Register rm);
void ssub8(Condition cond, Register rd, Register rn, Register rm);
void stl(Condition cond, Register rt, const MemOperand& operand);
void stlb(Condition cond, Register rt, const MemOperand& operand);
void stlex(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void stlexb(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void stlexd(Condition cond,
Register rd,
Register rt,
Register rt2,
const MemOperand& operand);
void stlexh(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void stlh(Condition cond, Register rt, const MemOperand& operand);
void stm(Condition cond,
EncodingSize size,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmda(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmdb(Condition cond,
EncodingSize size,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmea(Condition cond,
EncodingSize size,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmed(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmfa(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmfd(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void stmib(Condition cond,
Register rn,
WriteBack write_back,
RegisterList registers);
void str(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void strb(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void strd(Condition cond,
Register rt,
Register rt2,
const MemOperand& operand);
void strex(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void strexb(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void strexd(Condition cond,
Register rd,
Register rt,
Register rt2,
const MemOperand& operand);
void strexh(Condition cond,
Register rd,
Register rt,
const MemOperand& operand);
void strh(Condition cond,
EncodingSize size,
Register rt,
const MemOperand& operand);
void sub(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void sub(Condition cond, Register rd, const Operand& operand);
void subs(Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand);
void subs(Register rd, const Operand& operand);
void subw(Condition cond, Register rd, Register rn, const Operand& operand);
void svc(Condition cond, uint32_t imm);
void sxtab(Condition cond, Register rd, Register rn, const Operand& operand);
void sxtab16(Condition cond,
Register rd,
Register rn,
const Operand& operand);
void sxtah(Condition cond, Register rd, Register rn, const Operand& operand);
void sxtb(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void sxtb16(Condition cond, Register rd, const Operand& operand);
void sxth(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void tbb(Condition cond, Register rn, Register rm);
void tbh(Condition cond, Register rn, Register rm);
void teq(Condition cond, Register rn, const Operand& operand);
void tst(Condition cond,
EncodingSize size,
Register rn,
const Operand& operand);
void uadd16(Condition cond, Register rd, Register rn, Register rm);
void uadd8(Condition cond, Register rd, Register rn, Register rm);
void uasx(Condition cond, Register rd, Register rn, Register rm);
void ubfx(
Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width);
void udf(Condition cond, EncodingSize size, uint32_t imm);
void udiv(Condition cond, Register rd, Register rn, Register rm);
void uhadd16(Condition cond, Register rd, Register rn, Register rm);
void uhadd8(Condition cond, Register rd, Register rn, Register rm);
void uhasx(Condition cond, Register rd, Register rn, Register rm);
void uhsax(Condition cond, Register rd, Register rn, Register rm);
void uhsub16(Condition cond, Register rd, Register rn, Register rm);
void uhsub8(Condition cond, Register rd, Register rn, Register rm);
void umaal(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void umlal(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void umlals(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void umull(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void umulls(
Condition cond, Register rdlo, Register rdhi, Register rn, Register rm);
void uqadd16(Condition cond, Register rd, Register rn, Register rm);
void uqadd8(Condition cond, Register rd, Register rn, Register rm);
void uqasx(Condition cond, Register rd, Register rn, Register rm);
void uqsax(Condition cond, Register rd, Register rn, Register rm);
void uqsub16(Condition cond, Register rd, Register rn, Register rm);
void uqsub8(Condition cond, Register rd, Register rn, Register rm);
void usad8(Condition cond, Register rd, Register rn, Register rm);
void usada8(
Condition cond, Register rd, Register rn, Register rm, Register ra);
void usat(Condition cond, Register rd, uint32_t imm, const Operand& operand);
void usat16(Condition cond, Register rd, uint32_t imm, Register rn);
void usax(Condition cond, Register rd, Register rn, Register rm);
void usub16(Condition cond, Register rd, Register rn, Register rm);
void usub8(Condition cond, Register rd, Register rn, Register rm);
void uxtab(Condition cond, Register rd, Register rn, const Operand& operand);
void uxtab16(Condition cond,
Register rd,
Register rn,
const Operand& operand);
void uxtah(Condition cond, Register rd, Register rn, const Operand& operand);
void uxtb(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void uxtb16(Condition cond, Register rd, const Operand& operand);
void uxth(Condition cond,
EncodingSize size,
Register rd,
const Operand& operand);
void vaba(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vaba(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vabal(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vabd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vabd(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vabdl(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vabs(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vabs(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vabs(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vacge(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vacge(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vacgt(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vacgt(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vacle(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vacle(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vaclt(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vaclt(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vadd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vadd(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vadd(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vaddhn(
Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm);
void vaddl(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vaddw(
Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm);
void vand(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
const DOperand& operand);
void vand(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
const QOperand& operand);
void vbic(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
const DOperand& operand);
void vbic(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
const QOperand& operand);
void vbif(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vbif(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vbit(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vbit(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vbsl(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vbsl(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vceq(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vceq(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vceq(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vceq(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vcge(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vcge(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vcge(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vcge(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vcgt(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vcgt(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vcgt(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vcgt(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vcle(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vcle(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vcle(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vcle(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vcls(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vcls(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vclt(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vclt(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vclt(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vclt(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vclz(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vclz(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vcmp(Condition cond, DataType dt, SRegister rd, const SOperand& operand);
void vcmp(Condition cond, DataType dt, DRegister rd, const DOperand& operand);
void vcmpe(Condition cond,
DataType dt,
SRegister rd,
const SOperand& operand);
void vcmpe(Condition cond,
DataType dt,
DRegister rd,
const DOperand& operand);
void vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvt(Condition cond,
DataType dt1,
DataType dt2,
DRegister rd,
DRegister rm,
int32_t fbits);
void vcvt(Condition cond,
DataType dt1,
DataType dt2,
QRegister rd,
QRegister rm,
int32_t fbits);
void vcvt(Condition cond,
DataType dt1,
DataType dt2,
SRegister rd,
SRegister rm,
int32_t fbits);
void vcvt(
Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm);
void vcvt(
Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm);
void vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm);
void vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtb(
Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtb(
Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm);
void vcvtb(
Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm);
void vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm);
void vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm);
void vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm);
void vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm);
void vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm);
void vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtr(
Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtr(
Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vcvtt(
Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm);
void vcvtt(
Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm);
void vcvtt(
Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm);
void vdiv(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vdiv(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vdup(Condition cond, DataType dt, QRegister rd, Register rt);
void vdup(Condition cond, DataType dt, DRegister rd, Register rt);
void vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm);
void vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm);
void veor(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void veor(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vext(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegister rm,
const DOperand& operand);
void vext(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
QRegister rm,
const QOperand& operand);
void vfma(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vfma(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vfma(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vfms(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vfms(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vfms(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vfnma(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vfnma(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vfnms(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vfnms(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vhadd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vhadd(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vhsub(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vhsub(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vld1(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vld2(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vld3(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vld3(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const MemOperand& operand);
void vld4(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vldm(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vldm(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vldmdb(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vldmdb(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vldmia(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vldmia(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vldr(Condition cond, DataType dt, DRegister rd, Location* location);
void vldr(Condition cond,
DataType dt,
DRegister rd,
const MemOperand& operand);
void vldr(Condition cond, DataType dt, SRegister rd, Location* location);
void vldr(Condition cond,
DataType dt,
SRegister rd,
const MemOperand& operand);
void vmax(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmax(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vmin(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmin(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vmla(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegisterLane rm);
void vmla(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
DRegisterLane rm);
void vmla(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmla(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vmla(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vmlal(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegisterLane rm);
void vmlal(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vmls(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegisterLane rm);
void vmls(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
DRegisterLane rm);
void vmls(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmls(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vmls(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vmlsl(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegisterLane rm);
void vmlsl(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vmov(Condition cond, Register rt, SRegister rn);
void vmov(Condition cond, SRegister rn, Register rt);
void vmov(Condition cond, Register rt, Register rt2, DRegister rm);
void vmov(Condition cond, DRegister rm, Register rt, Register rt2);
void vmov(
Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1);
void vmov(
Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2);
void vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt);
void vmov(Condition cond, DataType dt, DRegister rd, const DOperand& operand);
void vmov(Condition cond, DataType dt, QRegister rd, const QOperand& operand);
void vmov(Condition cond, DataType dt, SRegister rd, const SOperand& operand);
void vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn);
void vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm);
void vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm);
void vmrs(Condition cond, RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg);
void vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt);
void vmul(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegister dm,
unsigned index);
void vmul(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
DRegister dm,
unsigned index);
void vmul(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vmul(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vmul(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vmull(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegister dm,
unsigned index);
void vmull(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vmvn(Condition cond, DataType dt, DRegister rd, const DOperand& operand);
void vmvn(Condition cond, DataType dt, QRegister rd, const QOperand& operand);
void vneg(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vneg(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vneg(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vnmla(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vnmla(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vnmls(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vnmls(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vnmul(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vnmul(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vorn(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
const DOperand& operand);
void vorn(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
const QOperand& operand);
void vorr(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
const DOperand& operand);
void vorr(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
const QOperand& operand);
void vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vpadd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vpmax(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vpmin(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vpop(Condition cond, DataType dt, DRegisterList dreglist);
void vpop(Condition cond, DataType dt, SRegisterList sreglist);
void vpush(Condition cond, DataType dt, DRegisterList dreglist);
void vpush(Condition cond, DataType dt, SRegisterList sreglist);
void vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vqadd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vqadd(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vqdmlal(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vqdmlal(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegister dm,
unsigned index);
void vqdmlsl(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vqdmlsl(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegister dm,
unsigned index);
void vqdmulh(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vqdmulh(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vqdmulh(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegisterLane rm);
void vqdmulh(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
DRegisterLane rm);
void vqdmull(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vqdmull(Condition cond,
DataType dt,
QRegister rd,
DRegister rn,
DRegisterLane rm);
void vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm);
void vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm);
void vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vqrdmulh(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vqrdmulh(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vqrdmulh(Condition cond,
DataType dt,
DRegister rd,
DRegister rn,
DRegisterLane rm);
void vqrdmulh(Condition cond,
DataType dt,
QRegister rd,
QRegister rn,
DRegisterLane rm);
void vqrshl(
Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn);
void vqrshl(
Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn);
void vqrshrn(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vqrshrun(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vqshl(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vqshl(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vqshlu(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vqshlu(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vqshrn(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vqshrun(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vqsub(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vqsub(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vraddhn(
Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm);
void vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vrecps(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vrecps(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vrhadd(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vrhadd(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vrinta(DataType dt, DRegister rd, DRegister rm);
void vrinta(DataType dt, QRegister rd, QRegister rm);
void vrinta(DataType dt, SRegister rd, SRegister rm);
void vrintm(DataType dt, DRegister rd, DRegister rm);
void vrintm(DataType dt, QRegister rd, QRegister rm);
void vrintm(DataType dt, SRegister rd, SRegister rm);
void vrintn(DataType dt, DRegister rd, DRegister rm);
void vrintn(DataType dt, QRegister rd, QRegister rm);
void vrintn(DataType dt, SRegister rd, SRegister rm);
void vrintp(DataType dt, DRegister rd, DRegister rm);
void vrintp(DataType dt, QRegister rd, QRegister rm);
void vrintp(DataType dt, SRegister rd, SRegister rm);
void vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrintx(DataType dt, QRegister rd, QRegister rm);
void vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrintz(DataType dt, QRegister rd, QRegister rm);
void vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vrshl(
Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn);
void vrshl(
Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn);
void vrshr(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vrshr(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vrshrn(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vrsqrts(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vrsqrts(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vrsra(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vrsra(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vrsubhn(
Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm);
void vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vshl(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vshl(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vshll(Condition cond,
DataType dt,
QRegister rd,
DRegister rm,
const DOperand& operand);
void vshr(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vshr(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vshrn(Condition cond,
DataType dt,
DRegister rd,
QRegister rm,
const QOperand& operand);
void vsli(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vsli(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm);
void vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vsra(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vsra(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vsri(Condition cond,
DataType dt,
DRegister rd,
DRegister rm,
const DOperand& operand);
void vsri(Condition cond,
DataType dt,
QRegister rd,
QRegister rm,
const QOperand& operand);
void vst1(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vst2(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vst3(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vst3(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const MemOperand& operand);
void vst4(Condition cond,
DataType dt,
const NeonRegisterList& nreglist,
const AlignedMemOperand& operand);
void vstm(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vstm(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vstmdb(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vstmdb(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vstmia(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
DRegisterList dreglist);
void vstmia(Condition cond,
DataType dt,
Register rn,
WriteBack write_back,
SRegisterList sreglist);
void vstr(Condition cond,
DataType dt,
DRegister rd,
const MemOperand& operand);
void vstr(Condition cond,
DataType dt,
SRegister rd,
const MemOperand& operand);
void vsub(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vsub(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vsub(
Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm);
void vsubhn(
Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm);
void vsubl(
Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm);
void vsubw(
Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm);
void vswp(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vswp(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vtbl(Condition cond,
DataType dt,
DRegister rd,
const NeonRegisterList& nreglist,
DRegister rm);
void vtbx(Condition cond,
DataType dt,
DRegister rd,
const NeonRegisterList& nreglist,
DRegister rm);
void vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vtst(
Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm);
void vtst(
Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm);
void vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm);
void vzip(Condition cond, DataType dt, DRegister rd, DRegister rm);
void vzip(Condition cond, DataType dt, QRegister rd, QRegister rm);
void yield(Condition cond, EncodingSize size);
int T32Size(uint32_t instr);
void DecodeT32(uint32_t instr);
void DecodeA32(uint32_t instr);
};
DataTypeValue Dt_L_imm6_1_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_L_imm6_2_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_L_imm6_3_Decode(uint32_t value);
DataTypeValue Dt_L_imm6_4_Decode(uint32_t value);
DataTypeValue Dt_imm6_1_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_imm6_2_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_imm6_3_Decode(uint32_t value);
DataTypeValue Dt_imm6_4_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_op_U_size_1_Decode(uint32_t value);
DataTypeValue Dt_op_size_1_Decode(uint32_t value);
DataTypeValue Dt_op_size_2_Decode(uint32_t value);
DataTypeValue Dt_op_size_3_Decode(uint32_t value);
DataTypeValue Dt_U_imm3H_1_Decode(uint32_t value);
DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane);
DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane);
DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane);
DataTypeValue Dt_B_E_1_Decode(uint32_t value);
DataTypeValue Dt_op_1_Decode1(uint32_t value);
DataTypeValue Dt_op_1_Decode2(uint32_t value);
DataTypeValue Dt_op_2_Decode(uint32_t value);
DataTypeValue Dt_op_3_Decode(uint32_t value);
DataTypeValue Dt_U_sx_1_Decode(uint32_t value);
DataTypeValue Dt_op_U_1_Decode1(uint32_t value);
DataTypeValue Dt_op_U_1_Decode2(uint32_t value);
DataTypeValue Dt_sz_1_Decode(uint32_t value);
DataTypeValue Dt_F_size_1_Decode(uint32_t value);
DataTypeValue Dt_F_size_2_Decode(uint32_t value);
DataTypeValue Dt_F_size_3_Decode(uint32_t value);
DataTypeValue Dt_F_size_4_Decode(uint32_t value);
DataTypeValue Dt_U_size_1_Decode(uint32_t value);
DataTypeValue Dt_U_size_2_Decode(uint32_t value);
DataTypeValue Dt_U_size_3_Decode(uint32_t value);
DataTypeValue Dt_size_1_Decode(uint32_t value);
DataTypeValue Dt_size_2_Decode(uint32_t value);
DataTypeValue Dt_size_3_Decode(uint32_t value);
DataTypeValue Dt_size_4_Decode(uint32_t value);
DataTypeValue Dt_size_5_Decode(uint32_t value);
DataTypeValue Dt_size_6_Decode(uint32_t value);
DataTypeValue Dt_size_7_Decode(uint32_t value);
DataTypeValue Dt_size_8_Decode(uint32_t value);
DataTypeValue Dt_size_9_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_size_10_Decode(uint32_t value);
DataTypeValue Dt_size_11_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_size_12_Decode(uint32_t value, uint32_t type_value);
DataTypeValue Dt_size_13_Decode(uint32_t value);
DataTypeValue Dt_size_14_Decode(uint32_t value);
DataTypeValue Dt_size_15_Decode(uint32_t value);
DataTypeValue Dt_size_16_Decode(uint32_t value);
DataTypeValue Dt_size_17_Decode(uint32_t value);
// End of generated code.
class PrintDisassembler : public Disassembler {
public:
explicit PrintDisassembler(std::ostream& os, // NOLINT(runtime/references)
uint32_t code_address = 0)
: Disassembler(os, code_address) {}
explicit PrintDisassembler(DisassemblerStream* os, uint32_t code_address = 0)
: Disassembler(os, code_address) {}
virtual void PrintCodeAddress(uint32_t code_address) {
os() << "0x" << std::hex << std::setw(8) << std::setfill('0')
<< code_address << "\t";
}
virtual void PrintOpcode16(uint32_t opcode) {
os() << std::hex << std::setw(4) << std::setfill('0') << opcode << " "
<< std::dec << "\t";
}
virtual void PrintOpcode32(uint32_t opcode) {
os() << std::hex << std::setw(8) << std::setfill('0') << opcode << std::dec
<< "\t";
}
const uint32_t* DecodeA32At(const uint32_t* instruction_address) {
DecodeA32(*instruction_address);
return instruction_address + 1;
}
// Returns the address of the next instruction.
const uint16_t* DecodeT32At(const uint16_t* instruction_address,
const uint16_t* buffer_end);
void DecodeT32(uint32_t instruction);
void DecodeA32(uint32_t instruction);
void DisassembleA32Buffer(const uint32_t* buffer, size_t size_in_bytes);
void DisassembleT32Buffer(const uint16_t* buffer, size_t size_in_bytes);
};
} // namespace aarch32
} // namespace vixl
#endif // VIXL_DISASM_AARCH32_H_
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <stdint.h>
}
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include "utils-vixl.h"
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
bool Shift::IsValidAmount(uint32_t amount) const {
switch (GetType()) {
case LSL:
return amount <= 31;
case ROR:
return (amount > 0) && (amount <= 31);
case LSR:
case ASR:
return (amount > 0) && (amount <= 32);
case RRX:
return amount == 0;
default:
VIXL_UNREACHABLE();
return false;
}
}
std::ostream& operator<<(std::ostream& os, const Register reg) {
switch (reg.GetCode()) {
case 12:
return os << "ip";
case 13:
return os << "sp";
case 14:
return os << "lr";
case 15:
return os << "pc";
default:
return os << "r" << reg.GetCode();
}
}
SRegister VRegister::S() const {
VIXL_ASSERT(GetType() == kSRegister);
return SRegister(GetCode());
}
DRegister VRegister::D() const {
VIXL_ASSERT(GetType() == kDRegister);
return DRegister(GetCode());
}
QRegister VRegister::Q() const {
VIXL_ASSERT(GetType() == kQRegister);
return QRegister(GetCode());
}
Register RegisterList::GetFirstAvailableRegister() const {
for (uint32_t i = 0; i < kNumberOfRegisters; i++) {
if (((list_ >> i) & 1) != 0) return Register(i);
}
return Register();
}
std::ostream& PrintRegisterList(std::ostream& os, // NOLINT(runtime/references)
uint32_t list) {
os << "{";
bool first = true;
int code = 0;
while (list != 0) {
if ((list & 1) != 0) {
if (first) {
first = false;
} else {
os << ",";
}
os << Register(code);
}
list >>= 1;
code++;
}
os << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, RegisterList registers) {
return PrintRegisterList(os, registers.GetList());
}
QRegister VRegisterList::GetFirstAvailableQRegister() const {
for (uint32_t i = 0; i < kNumberOfQRegisters; i++) {
if (((list_ >> (i * 4)) & 0xf) == 0xf) return QRegister(i);
}
return QRegister();
}
DRegister VRegisterList::GetFirstAvailableDRegister() const {
for (uint32_t i = 0; i < kMaxNumberOfDRegisters; i++) {
if (((list_ >> (i * 2)) & 0x3) == 0x3) return DRegister(i);
}
return DRegister();
}
SRegister VRegisterList::GetFirstAvailableSRegister() const {
for (uint32_t i = 0; i < kNumberOfSRegisters; i++) {
if (((list_ >> i) & 0x1) != 0) return SRegister(i);
}
return SRegister();
}
std::ostream& operator<<(std::ostream& os, SRegisterList reglist) {
SRegister first = reglist.GetFirstSRegister();
SRegister last = reglist.GetLastSRegister();
if (first.Is(last))
os << "{" << first << "}";
else
os << "{" << first << "-" << last << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, DRegisterList reglist) {
DRegister first = reglist.GetFirstDRegister();
DRegister last = reglist.GetLastDRegister();
if (first.Is(last))
os << "{" << first << "}";
else
os << "{" << first << "-" << last << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, NeonRegisterList nreglist) {
DRegister first = nreglist.GetFirstDRegister();
int increment = nreglist.IsSingleSpaced() ? 1 : 2;
int count =
nreglist.GetLastDRegister().GetCode() - first.GetCode() + increment;
if (count < 0) count += kMaxNumberOfDRegisters;
os << "{";
bool first_displayed = false;
for (;;) {
if (first_displayed) {
os << ",";
} else {
first_displayed = true;
}
os << first;
if (nreglist.IsTransferOneLane()) {
os << "[" << nreglist.GetTransferLane() << "]";
} else if (nreglist.IsTransferAllLanes()) {
os << "[]";
}
count -= increment;
if (count <= 0) break;
unsigned next = first.GetCode() + increment;
if (next >= kMaxNumberOfDRegisters) next -= kMaxNumberOfDRegisters;
first = DRegister(next);
}
os << "}";
return os;
}
const char* SpecialRegister::GetName() const {
switch (reg_) {
case APSR:
return "APSR";
case SPSR:
return "SPSR";
}
VIXL_UNREACHABLE();
return "??";
}
const char* MaskedSpecialRegister::GetName() const {
switch (reg_) {
case APSR_nzcvq:
return "APSR_nzcvq";
case APSR_g:
return "APSR_g";
case APSR_nzcvqg:
return "APSR_nzcvqg";
case CPSR_c:
return "CPSR_c";
case CPSR_x:
return "CPSR_x";
case CPSR_xc:
return "CPSR_xc";
case CPSR_sc:
return "CPSR_sc";
case CPSR_sx:
return "CPSR_sx";
case CPSR_sxc:
return "CPSR_sxc";
case CPSR_fc:
return "CPSR_fc";
case CPSR_fx:
return "CPSR_fx";
case CPSR_fxc:
return "CPSR_fxc";
case CPSR_fsc:
return "CPSR_fsc";
case CPSR_fsx:
return "CPSR_fsx";
case CPSR_fsxc:
return "CPSR_fsxc";
case SPSR_c:
return "SPSR_c";
case SPSR_x:
return "SPSR_x";
case SPSR_xc:
return "SPSR_xc";
case SPSR_s:
return "SPSR_s";
case SPSR_sc:
return "SPSR_sc";
case SPSR_sx:
return "SPSR_sx";
case SPSR_sxc:
return "SPSR_sxc";
case SPSR_f:
return "SPSR_f";
case SPSR_fc:
return "SPSR_fc";
case SPSR_fx:
return "SPSR_fx";
case SPSR_fxc:
return "SPSR_fxc";
case SPSR_fs:
return "SPSR_fs";
case SPSR_fsc:
return "SPSR_fsc";
case SPSR_fsx:
return "SPSR_fsx";
case SPSR_fsxc:
return "SPSR_fsxc";
}
VIXL_UNREACHABLE();
return "??";
}
const char* BankedRegister::GetName() const {
switch (reg_) {
case R8_usr:
return "R8_usr";
case R9_usr:
return "R9_usr";
case R10_usr:
return "R10_usr";
case R11_usr:
return "R11_usr";
case R12_usr:
return "R12_usr";
case SP_usr:
return "SP_usr";
case LR_usr:
return "LR_usr";
case R8_fiq:
return "R8_fiq";
case R9_fiq:
return "R9_fiq";
case R10_fiq:
return "R10_fiq";
case R11_fiq:
return "R11_fiq";
case R12_fiq:
return "R12_fiq";
case SP_fiq:
return "SP_fiq";
case LR_fiq:
return "LR_fiq";
case LR_irq:
return "LR_irq";
case SP_irq:
return "SP_irq";
case LR_svc:
return "LR_svc";
case SP_svc:
return "SP_svc";
case LR_abt:
return "LR_abt";
case SP_abt:
return "SP_abt";
case LR_und:
return "LR_und";
case SP_und:
return "SP_und";
case LR_mon:
return "LR_mon";
case SP_mon:
return "SP_mon";
case ELR_hyp:
return "ELR_hyp";
case SP_hyp:
return "SP_hyp";
case SPSR_fiq:
return "SPSR_fiq";
case SPSR_irq:
return "SPSR_irq";
case SPSR_svc:
return "SPSR_svc";
case SPSR_abt:
return "SPSR_abt";
case SPSR_und:
return "SPSR_und";
case SPSR_mon:
return "SPSR_mon";
case SPSR_hyp:
return "SPSR_hyp";
}
VIXL_UNREACHABLE();
return "??";
}
const char* SpecialFPRegister::GetName() const {
switch (reg_) {
case FPSID:
return "FPSID";
case FPSCR:
return "FPSCR";
case MVFR2:
return "MVFR2";
case MVFR1:
return "MVFR1";
case MVFR0:
return "MVFR0";
case FPEXC:
return "FPEXC";
}
VIXL_UNREACHABLE();
return "??";
}
const char* Condition::GetName() const {
switch (condition_) {
case eq:
return "eq";
case ne:
return "ne";
case cs:
return "cs";
case cc:
return "cc";
case mi:
return "mi";
case pl:
return "pl";
case vs:
return "vs";
case vc:
return "vc";
case hi:
return "hi";
case ls:
return "ls";
case ge:
return "ge";
case lt:
return "lt";
case gt:
return "gt";
case le:
return "le";
case al:
return "";
case Condition::kNone:
return "";
}
return "<und>";
}
const char* Shift::GetName() const {
switch (shift_) {
case LSL:
return "lsl";
case LSR:
return "lsr";
case ASR:
return "asr";
case ROR:
return "ror";
case RRX:
return "rrx";
}
VIXL_UNREACHABLE();
return "??";
}
const char* EncodingSize::GetName() const {
switch (size_) {
case Best:
case Narrow:
return "";
case Wide:
return ".w";
}
VIXL_UNREACHABLE();
return "??";
}
const char* DataType::GetName() const {
switch (value_) {
case kDataTypeValueInvalid:
return ".??";
case kDataTypeValueNone:
return "";
case S8:
return ".s8";
case S16:
return ".s16";
case S32:
return ".s32";
case S64:
return ".s64";
case U8:
return ".u8";
case U16:
return ".u16";
case U32:
return ".u32";
case U64:
return ".u64";
case F16:
return ".f16";
case F32:
return ".f32";
case F64:
return ".f64";
case I8:
return ".i8";
case I16:
return ".i16";
case I32:
return ".i32";
case I64:
return ".i64";
case P8:
return ".p8";
case P64:
return ".p64";
case Untyped8:
return ".8";
case Untyped16:
return ".16";
case Untyped32:
return ".32";
case Untyped64:
return ".64";
}
VIXL_UNREACHABLE();
return ".??";
}
const char* MemoryBarrier::GetName() const {
switch (type_) {
case OSHLD:
return "oshld";
case OSHST:
return "oshst";
case OSH:
return "osh";
case NSHLD:
return "nshld";
case NSHST:
return "nshst";
case NSH:
return "nsh";
case ISHLD:
return "ishld";
case ISHST:
return "ishst";
case ISH:
return "ish";
case LD:
return "ld";
case ST:
return "st";
case SY:
return "sy";
}
switch (static_cast<int>(type_)) {
case 0:
return "#0x0";
case 4:
return "#0x4";
case 8:
return "#0x8";
case 0xc:
return "#0xc";
}
VIXL_UNREACHABLE();
return "??";
}
const char* InterruptFlags::GetName() const {
switch (type_) {
case F:
return "f";
case I:
return "i";
case IF:
return "if";
case A:
return "a";
case AF:
return "af";
case AI:
return "ai";
case AIF:
return "aif";
}
VIXL_ASSERT(type_ == 0);
return "";
}
const char* Endianness::GetName() const {
switch (type_) {
case LE:
return "le";
case BE:
return "be";
}
VIXL_UNREACHABLE();
return "??";
}
// Constructor used for disassembly.
ImmediateShiftOperand::ImmediateShiftOperand(int shift_value, int amount_value)
: Shift(shift_value) {
switch (shift_value) {
case LSL:
amount_ = amount_value;
break;
case LSR:
case ASR:
amount_ = (amount_value == 0) ? 32 : amount_value;
break;
case ROR:
amount_ = amount_value;
if (amount_value == 0) SetType(RRX);
break;
default:
VIXL_UNREACHABLE();
SetType(LSL);
amount_ = 0;
break;
}
}
ImmediateT32::ImmediateT32(uint32_t imm) {
// 00000000 00000000 00000000 abcdefgh
if ((imm & ~0xff) == 0) {
SetEncodingValue(imm);
return;
}
if ((imm >> 16) == (imm & 0xffff)) {
if ((imm & 0xff00) == 0) {
// 00000000 abcdefgh 00000000 abcdefgh
SetEncodingValue((imm & 0xff) | (0x1 << 8));
return;
}
if ((imm & 0xff) == 0) {
// abcdefgh 00000000 abcdefgh 00000000
SetEncodingValue(((imm >> 8) & 0xff) | (0x2 << 8));
return;
}
if (((imm >> 8) & 0xff) == (imm & 0xff)) {
// abcdefgh abcdefgh abcdefgh abcdefgh
SetEncodingValue((imm & 0xff) | (0x3 << 8));
return;
}
}
for (int shift = 0; shift < 24; shift++) {
uint32_t imm8 = imm >> (24 - shift);
uint32_t overflow = imm << (8 + shift);
if ((imm8 <= 0xff) && ((imm8 & 0x80) != 0) && (overflow == 0)) {
SetEncodingValue(((shift + 8) << 7) | (imm8 & 0x7F));
return;
}
}
}
static inline uint32_t ror(uint32_t x, int i) {
VIXL_ASSERT((0 < i) && (i < 32));
return (x >> i) | (x << (32 - i));
}
bool ImmediateT32::IsImmediateT32(uint32_t imm) {
/* abcdefgh abcdefgh abcdefgh abcdefgh */
if ((imm ^ ror(imm, 8)) == 0) return true;
/* 00000000 abcdefgh 00000000 abcdefgh */
/* abcdefgh 00000000 abcdefgh 00000000 */
if ((imm ^ ror(imm, 16)) == 0 &&
(((imm & 0xff00) == 0) || ((imm & 0xff) == 0)))
return true;
/* isolate least-significant set bit */
uint32_t lsb = imm & -imm;
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
}
uint32_t ImmediateT32::Decode(uint32_t value) {
uint32_t base = value & 0xff;
switch (value >> 8) {
case 0:
return base;
case 1:
return base | (base << 16);
case 2:
return (base << 8) | (base << 24);
case 3:
return base | (base << 8) | (base << 16) | (base << 24);
default:
base |= 0x80;
return base << (32 - (value >> 7));
}
}
ImmediateA32::ImmediateA32(uint32_t imm) {
// Deal with rot = 0 first to avoid undefined shift by 32.
if (imm <= 0xff) {
SetEncodingValue(imm);
return;
}
for (int rot = 2; rot < 32; rot += 2) {
uint32_t imm8 = (imm << rot) | (imm >> (32 - rot));
if (imm8 <= 0xff) {
SetEncodingValue((rot << 7) | imm8);
return;
}
}
}
bool ImmediateA32::IsImmediateA32(uint32_t imm) {
/* fast-out */
if (imm < 256) return true;
/* avoid getting confused by wrapped-around bytes (this transform has no
* effect on pass/fail results) */
if (imm & 0xff000000) imm = ror(imm, 16);
/* copy odd-numbered set bits into even-numbered bits immediately below, so
* that the least-significant set bit is always an even bit */
imm = imm | ((imm >> 1) & 0x55555555);
/* isolate least-significant set bit (always even) */
uint32_t lsb = imm & -imm;
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
}
uint32_t ImmediateA32::Decode(uint32_t value) {
int rotation = (value >> 8) * 2;
VIXL_ASSERT(rotation >= 0);
VIXL_ASSERT(rotation <= 30);
value &= 0xff;
if (rotation == 0) return value;
return (value >> rotation) | (value << (32 - rotation));
}
uint32_t TypeEncodingValue(Shift shift) {
return shift.IsRRX() ? kRRXEncodedValue : shift.GetValue();
}
uint32_t AmountEncodingValue(Shift shift, uint32_t amount) {
switch (shift.GetType()) {
case LSL:
case ROR:
return amount;
case LSR:
case ASR:
return amount % 32;
case RRX:
return 0;
}
return 0;
}
} // namespace aarch32
} // namespace vixl
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_
#define VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include <algorithm>
#include <ostream>
#include "code-buffer-vixl.h"
#include "utils-vixl.h"
#include "aarch32/constants-aarch32.h"
#ifdef __arm__
#define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp")))
#else
#define HARDFLOAT __attribute__((noinline))
#endif
namespace vixl {
namespace aarch32 {
class Operand;
class SOperand;
class DOperand;
class QOperand;
class MemOperand;
class AlignedMemOperand;
enum AddrMode { Offset = 0, PreIndex = 1, PostIndex = 2 };
class CPURegister {
public:
enum RegisterType {
kNoRegister = 0,
kRRegister = 1,
kSRegister = 2,
kDRegister = 3,
kQRegister = 4
};
private:
static const int kCodeBits = 5;
static const int kTypeBits = 4;
static const int kSizeBits = 8;
static const int kCodeShift = 0;
static const int kTypeShift = kCodeShift + kCodeBits;
static const int kSizeShift = kTypeShift + kTypeBits;
static const uint32_t kCodeMask = ((1 << kCodeBits) - 1) << kCodeShift;
static const uint32_t kTypeMask = ((1 << kTypeBits) - 1) << kTypeShift;
static const uint32_t kSizeMask = ((1 << kSizeBits) - 1) << kSizeShift;
uint32_t value_;
public:
CPURegister(RegisterType type, uint32_t code, int size)
: value_((type << kTypeShift) | (code << kCodeShift) |
(size << kSizeShift)) {
#ifdef VIXL_DEBUG
switch (type) {
case kNoRegister:
break;
case kRRegister:
VIXL_ASSERT(code < kNumberOfRegisters);
VIXL_ASSERT(size == kRegSizeInBits);
break;
case kSRegister:
VIXL_ASSERT(code < kNumberOfSRegisters);
VIXL_ASSERT(size == kSRegSizeInBits);
break;
case kDRegister:
VIXL_ASSERT(code < kMaxNumberOfDRegisters);
VIXL_ASSERT(size == kDRegSizeInBits);
break;
case kQRegister:
VIXL_ASSERT(code < kNumberOfQRegisters);
VIXL_ASSERT(size == kQRegSizeInBits);
break;
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
RegisterType GetType() const {
return static_cast<RegisterType>((value_ & kTypeMask) >> kTypeShift);
}
bool IsRegister() const { return GetType() == kRRegister; }
bool IsS() const { return GetType() == kSRegister; }
bool IsD() const { return GetType() == kDRegister; }
bool IsQ() const { return GetType() == kQRegister; }
bool IsVRegister() const { return IsS() || IsD() || IsQ(); }
bool IsFPRegister() const { return IsS() || IsD(); }
uint32_t GetCode() const { return (value_ & kCodeMask) >> kCodeShift; }
uint32_t GetReg() const { return value_; }
int GetSizeInBits() const { return (value_ & kSizeMask) >> kSizeShift; }
int GetRegSizeInBytes() const {
return (GetType() == kNoRegister) ? 0 : (GetSizeInBits() / 8);
}
bool Is64Bits() const { return GetSizeInBits() == 64; }
bool Is128Bits() const { return GetSizeInBits() == 128; }
bool IsSameFormat(CPURegister reg) {
return (value_ & ~kCodeMask) == (reg.value_ & ~kCodeMask);
}
bool Is(CPURegister ref) const { return GetReg() == ref.GetReg(); }
bool IsValid() const { return GetType() != kNoRegister; }
};
class Register : public CPURegister {
public:
Register() : CPURegister(kNoRegister, 0, kRegSizeInBits) {}
explicit Register(uint32_t code)
: CPURegister(kRRegister, code % kNumberOfRegisters, kRegSizeInBits) {
VIXL_ASSERT(GetCode() < kNumberOfRegisters);
}
bool Is(Register ref) const { return GetCode() == ref.GetCode(); }
bool IsLow() const { return GetCode() < kNumberOfT32LowRegisters; }
bool IsLR() const { return GetCode() == kLrCode; }
bool IsPC() const { return GetCode() == kPcCode; }
bool IsSP() const { return GetCode() == kSpCode; }
};
std::ostream& operator<<(std::ostream& os, const Register reg);
class RegisterOrAPSR_nzcv {
uint32_t code_;
public:
explicit RegisterOrAPSR_nzcv(uint32_t code) : code_(code) {
VIXL_ASSERT(code_ < kNumberOfRegisters);
}
bool IsAPSR_nzcv() const { return code_ == kPcCode; }
uint32_t GetCode() const { return code_; }
Register AsRegister() const {
VIXL_ASSERT(!IsAPSR_nzcv());
return Register(code_);
}
};
const RegisterOrAPSR_nzcv APSR_nzcv(kPcCode);
inline std::ostream& operator<<(std::ostream& os,
const RegisterOrAPSR_nzcv reg) {
if (reg.IsAPSR_nzcv()) return os << "APSR_nzcv";
return os << reg.AsRegister();
}
class SRegister;
class DRegister;
class QRegister;
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(kNoRegister, 0, 0) {}
VRegister(RegisterType type, uint32_t code, int size)
: CPURegister(type, code, size) {}
SRegister S() const;
DRegister D() const;
QRegister Q() const;
};
class SRegister : public VRegister {
public:
SRegister() : VRegister(kNoRegister, 0, kSRegSizeInBits) {}
explicit SRegister(uint32_t code)
: VRegister(kSRegister, code, kSRegSizeInBits) {}
uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const {
if (four_bit_field_lowest_bit == 0) {
return ((GetCode() & 0x1) << single_bit_field) |
((GetCode() & 0x1e) >> 1);
}
return ((GetCode() & 0x1) << single_bit_field) |
((GetCode() & 0x1e) << (four_bit_field_lowest_bit - 1));
}
};
inline unsigned ExtractSRegister(uint32_t instr,
int single_bit_field,
int four_bit_field_lowest_bit) {
VIXL_ASSERT(single_bit_field > 0);
if (four_bit_field_lowest_bit == 0) {
return ((instr << 1) & 0x1e) | ((instr >> single_bit_field) & 0x1);
}
return ((instr >> (four_bit_field_lowest_bit - 1)) & 0x1e) |
((instr >> single_bit_field) & 0x1);
}
inline std::ostream& operator<<(std::ostream& os, const SRegister reg) {
return os << "s" << reg.GetCode();
}
class DRegister : public VRegister {
public:
DRegister() : VRegister(kNoRegister, 0, kDRegSizeInBits) {}
explicit DRegister(uint32_t code)
: VRegister(kDRegister, code, kDRegSizeInBits) {}
SRegister GetLane(uint32_t lane) const {
uint32_t lane_count = kDRegSizeInBits / kSRegSizeInBits;
VIXL_ASSERT(lane < lane_count);
VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters);
return SRegister(GetCode() * lane_count + lane);
}
uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const {
VIXL_ASSERT(single_bit_field >= 4);
return ((GetCode() & 0x10) << (single_bit_field - 4)) |
((GetCode() & 0xf) << four_bit_field_lowest_bit);
}
};
inline unsigned ExtractDRegister(uint32_t instr,
int single_bit_field,
int four_bit_field_lowest_bit) {
VIXL_ASSERT(single_bit_field >= 4);
return ((instr >> (single_bit_field - 4)) & 0x10) |
((instr >> four_bit_field_lowest_bit) & 0xf);
}
inline std::ostream& operator<<(std::ostream& os, const DRegister reg) {
return os << "d" << reg.GetCode();
}
enum DataTypeType {
kDataTypeS = 0x100,
kDataTypeU = 0x200,
kDataTypeF = 0x300,
kDataTypeI = 0x400,
kDataTypeP = 0x500,
kDataTypeUntyped = 0x600
};
const int kDataTypeSizeMask = 0x0ff;
const int kDataTypeTypeMask = 0x100;
enum DataTypeValue {
kDataTypeValueInvalid = 0x000,
kDataTypeValueNone = 0x001, // value used when dt is ignored.
S8 = kDataTypeS | 8,
S16 = kDataTypeS | 16,
S32 = kDataTypeS | 32,
S64 = kDataTypeS | 64,
U8 = kDataTypeU | 8,
U16 = kDataTypeU | 16,
U32 = kDataTypeU | 32,
U64 = kDataTypeU | 64,
F16 = kDataTypeF | 16,
F32 = kDataTypeF | 32,
F64 = kDataTypeF | 64,
I8 = kDataTypeI | 8,
I16 = kDataTypeI | 16,
I32 = kDataTypeI | 32,
I64 = kDataTypeI | 64,
P8 = kDataTypeP | 8,
P64 = kDataTypeP | 64,
Untyped8 = kDataTypeUntyped | 8,
Untyped16 = kDataTypeUntyped | 16,
Untyped32 = kDataTypeUntyped | 32,
Untyped64 = kDataTypeUntyped | 64
};
class DataType {
DataTypeValue value_;
public:
explicit DataType(uint32_t size)
: value_(static_cast<DataTypeValue>(kDataTypeUntyped | size)) {
VIXL_ASSERT((size == 8) || (size == 16) || (size == 32) || (size == 64));
}
// Users should be able to use "S8", "S6" and so forth to instantiate this
// class.
DataType(DataTypeValue value) : value_(value) {} // NOLINT(runtime/explicit)
DataTypeValue GetValue() const { return value_; }
DataTypeType GetType() const {
return static_cast<DataTypeType>(value_ & kDataTypeTypeMask);
}
uint32_t GetSize() const { return value_ & kDataTypeSizeMask; }
bool IsSize(uint32_t size) const {
return (value_ & kDataTypeSizeMask) == size;
}
const char* GetName() const;
bool Is(DataType type) const { return value_ == type.value_; }
bool Is(DataTypeValue value) const { return value_ == value; }
bool Is(DataTypeType type) const { return GetType() == type; }
bool IsNoneOr(DataTypeValue value) const {
return (value_ == value) || (value_ == kDataTypeValueNone);
}
bool Is(DataTypeType type, uint32_t size) const {
return value_ == static_cast<DataTypeValue>(type | size);
}
bool IsNoneOr(DataTypeType type, uint32_t size) const {
return Is(type, size) || Is(kDataTypeValueNone);
}
};
inline std::ostream& operator<<(std::ostream& os, DataType dt) {
return os << dt.GetName();
}
class DRegisterLane : public DRegister {
uint32_t lane_;
public:
DRegisterLane(DRegister reg, uint32_t lane)
: DRegister(reg.GetCode()), lane_(lane) {}
DRegisterLane(uint32_t code, uint32_t lane) : DRegister(code), lane_(lane) {}
uint32_t GetLane() const { return lane_; }
uint32_t EncodeX(DataType dt,
int single_bit_field,
int four_bit_field_lowest_bit) const {
VIXL_ASSERT(single_bit_field >= 4);
uint32_t value = lane_ << ((dt.GetSize() == 16) ? 3 : 4) | GetCode();
return ((value & 0x10) << (single_bit_field - 4)) |
((value & 0xf) << four_bit_field_lowest_bit);
}
};
inline unsigned ExtractDRegisterAndLane(uint32_t instr,
DataType dt,
int single_bit_field,
int four_bit_field_lowest_bit,
int* lane) {
VIXL_ASSERT(single_bit_field >= 4);
uint32_t value = ((instr >> (single_bit_field - 4)) & 0x10) |
((instr >> four_bit_field_lowest_bit) & 0xf);
if (dt.GetSize() == 16) {
*lane = value >> 3;
return value & 0x7;
}
*lane = value >> 4;
return value & 0xf;
}
inline std::ostream& operator<<(std::ostream& os, const DRegisterLane lane) {
os << "d" << lane.GetCode() << "[";
if (lane.GetLane() == static_cast<uint32_t>(-1)) return os << "??]";
return os << lane.GetLane() << "]";
}
class QRegister : public VRegister {
public:
QRegister() : VRegister(kNoRegister, 0, kQRegSizeInBits) {}
explicit QRegister(uint32_t code)
: VRegister(kQRegister, code, kQRegSizeInBits) {}
uint32_t Encode(int offset) { return GetCode() << offset; }
DRegister GetDLane(uint32_t lane) const {
uint32_t lane_count = kQRegSizeInBits / kDRegSizeInBits;
VIXL_ASSERT(lane < lane_count);
return DRegister(GetCode() * lane_count + lane);
}
DRegister GetLowDRegister() const { return DRegister(GetCode() * 2); }
DRegister GetHighDRegister() const { return DRegister(1 + GetCode() * 2); }
SRegister GetSLane(uint32_t lane) const {
uint32_t lane_count = kQRegSizeInBits / kSRegSizeInBits;
VIXL_ASSERT(lane < lane_count);
VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters);
return SRegister(GetCode() * lane_count + lane);
}
uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) {
// Encode "code * 2".
VIXL_ASSERT(single_bit_field >= 3);
return ((GetCode() & 0x8) << (single_bit_field - 3)) |
((GetCode() & 0x7) << (four_bit_field_lowest_bit + 1));
}
};
inline unsigned ExtractQRegister(uint32_t instr,
int single_bit_field,
int four_bit_field_lowest_bit) {
VIXL_ASSERT(single_bit_field >= 3);
return ((instr >> (single_bit_field - 3)) & 0x8) |
((instr >> (four_bit_field_lowest_bit + 1)) & 0x7);
}
inline std::ostream& operator<<(std::ostream& os, const QRegister reg) {
return os << "q" << reg.GetCode();
}
// clang-format off
#define AARCH32_REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15)
// clang-format on
#define DEFINE_REGISTER(N) const Register r##N(N);
AARCH32_REGISTER_CODE_LIST(DEFINE_REGISTER)
#undef DEFINE_REGISTER
#undef AARCH32_REGISTER_CODE_LIST
enum RegNum { kIPRegNum = 12, kSPRegNum = 13, kLRRegNum = 14, kPCRegNum = 15 };
const Register ip(kIPRegNum);
const Register sp(kSPRegNum);
const Register pc(kPCRegNum);
const Register lr(kLRRegNum);
const Register NoReg;
const VRegister NoVReg;
// clang-format off
#define SREGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// clang-format on
#define DEFINE_REGISTER(N) const SRegister s##N(N);
SREGISTER_CODE_LIST(DEFINE_REGISTER)
#undef DEFINE_REGISTER
#undef SREGISTER_CODE_LIST
const SRegister NoSReg;
// clang-format off
#define DREGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// clang-format on
#define DEFINE_REGISTER(N) const DRegister d##N(N);
DREGISTER_CODE_LIST(DEFINE_REGISTER)
#undef DEFINE_REGISTER
#undef DREGISTER_CODE_LIST
const DRegister NoDReg;
// clang-format off
#define QREGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15)
// clang-format on
#define DEFINE_REGISTER(N) const QRegister q##N(N);
QREGISTER_CODE_LIST(DEFINE_REGISTER)
#undef DEFINE_REGISTER
#undef QREGISTER_CODE_LIST
const QRegister NoQReg;
class RegisterList {
public:
RegisterList() : list_(0) {}
RegisterList(Register reg) // NOLINT(runtime/explicit)
: list_(RegisterToList(reg)) {}
RegisterList(Register reg1, Register reg2)
: list_(RegisterToList(reg1) | RegisterToList(reg2)) {}
RegisterList(Register reg1, Register reg2, Register reg3)
: list_(RegisterToList(reg1) | RegisterToList(reg2) |
RegisterToList(reg3)) {}
RegisterList(Register reg1, Register reg2, Register reg3, Register reg4)
: list_(RegisterToList(reg1) | RegisterToList(reg2) |
RegisterToList(reg3) | RegisterToList(reg4)) {}
explicit RegisterList(uint32_t list) : list_(list) {}
uint32_t GetList() const { return list_; }
void SetList(uint32_t list) { list_ = list; }
bool Includes(const Register& reg) const {
return (list_ & RegisterToList(reg)) != 0;
}
void Combine(const RegisterList& other) { list_ |= other.GetList(); }
void Combine(const Register& reg) { list_ |= RegisterToList(reg); }
void Remove(const RegisterList& other) { list_ &= ~other.GetList(); }
void Remove(const Register& reg) { list_ &= ~RegisterToList(reg); }
bool Overlaps(const RegisterList& other) const {
return (list_ & other.list_) != 0;
}
bool IsR0toR7orPC() const {
// True if all the registers from the list are not from r8-r14.
return (list_ & 0x7f00) == 0;
}
bool IsR0toR7orLR() const {
// True if all the registers from the list are not from r8-r13 nor from r15.
return (list_ & 0xbf00) == 0;
}
Register GetFirstAvailableRegister() const;
bool IsEmpty() const { return list_ == 0; }
static RegisterList Union(const RegisterList& list_1,
const RegisterList& list_2) {
return RegisterList(list_1.list_ | list_2.list_);
}
static RegisterList Union(const RegisterList& list_1,
const RegisterList& list_2,
const RegisterList& list_3) {
return Union(list_1, Union(list_2, list_3));
}
static RegisterList Union(const RegisterList& list_1,
const RegisterList& list_2,
const RegisterList& list_3,
const RegisterList& list_4) {
return Union(Union(list_1, list_2), Union(list_3, list_4));
}
static RegisterList Intersection(const RegisterList& list_1,
const RegisterList& list_2) {
return RegisterList(list_1.list_ & list_2.list_);
}
static RegisterList Intersection(const RegisterList& list_1,
const RegisterList& list_2,
const RegisterList& list_3) {
return Intersection(list_1, Intersection(list_2, list_3));
}
static RegisterList Intersection(const RegisterList& list_1,
const RegisterList& list_2,
const RegisterList& list_3,
const RegisterList& list_4) {
return Intersection(Intersection(list_1, list_2),
Intersection(list_3, list_4));
}
private:
static uint32_t RegisterToList(Register reg) {
if (reg.GetType() == CPURegister::kNoRegister) {
return 0;
} else {
return UINT32_C(1) << reg.GetCode();
}
}
// Bitfield representation of all registers in the list
// (1 for r0, 2 for r1, 4 for r2, ...).
uint32_t list_;
};
inline uint32_t GetRegisterListEncoding(const RegisterList& registers,
int first,
int count) {
return (registers.GetList() >> first) & ((1 << count) - 1);
}
std::ostream& operator<<(std::ostream& os, RegisterList registers);
class VRegisterList {
public:
VRegisterList() : list_(0) {}
explicit VRegisterList(VRegister reg) : list_(RegisterToList(reg)) {}
VRegisterList(VRegister reg1, VRegister reg2)
: list_(RegisterToList(reg1) | RegisterToList(reg2)) {}
VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3)
: list_(RegisterToList(reg1) | RegisterToList(reg2) |
RegisterToList(reg3)) {}
VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3, VRegister reg4)
: list_(RegisterToList(reg1) | RegisterToList(reg2) |
RegisterToList(reg3) | RegisterToList(reg4)) {}
explicit VRegisterList(uint64_t list) : list_(list) {}
uint64_t GetList() const { return list_; }
void SetList(uint64_t list) { list_ = list; }
// Because differently-sized V registers overlap with one another, there is no
// way to implement a single 'Includes' function in a way that is unsurprising
// for all existing uses.
bool IncludesAllOf(const VRegister& reg) const {
return (list_ & RegisterToList(reg)) == RegisterToList(reg);
}
bool IncludesAliasOf(const VRegister& reg) const {
return (list_ & RegisterToList(reg)) != 0;
}
void Combine(const VRegisterList& other) { list_ |= other.GetList(); }
void Combine(const VRegister& reg) { list_ |= RegisterToList(reg); }
void Remove(const VRegisterList& other) { list_ &= ~other.GetList(); }
void Remove(const VRegister& reg) { list_ &= ~RegisterToList(reg); }
bool Overlaps(const VRegisterList& other) const {
return (list_ & other.list_) != 0;
}
QRegister GetFirstAvailableQRegister() const;
DRegister GetFirstAvailableDRegister() const;
SRegister GetFirstAvailableSRegister() const;
bool IsEmpty() const { return list_ == 0; }
static VRegisterList Union(const VRegisterList& list_1,
const VRegisterList& list_2) {
return VRegisterList(list_1.list_ | list_2.list_);
}
static VRegisterList Union(const VRegisterList& list_1,
const VRegisterList& list_2,
const VRegisterList& list_3) {
return Union(list_1, Union(list_2, list_3));
}
static VRegisterList Union(const VRegisterList& list_1,
const VRegisterList& list_2,
const VRegisterList& list_3,
const VRegisterList& list_4) {
return Union(Union(list_1, list_2), Union(list_3, list_4));
}
static VRegisterList Intersection(const VRegisterList& list_1,
const VRegisterList& list_2) {
return VRegisterList(list_1.list_ & list_2.list_);
}
static VRegisterList Intersection(const VRegisterList& list_1,
const VRegisterList& list_2,
const VRegisterList& list_3) {
return Intersection(list_1, Intersection(list_2, list_3));
}
static VRegisterList Intersection(const VRegisterList& list_1,
const VRegisterList& list_2,
const VRegisterList& list_3,
const VRegisterList& list_4) {
return Intersection(Intersection(list_1, list_2),
Intersection(list_3, list_4));
}
private:
static uint64_t RegisterToList(VRegister reg) {
if (reg.GetType() == CPURegister::kNoRegister) {
return 0;
} else {
switch (reg.GetSizeInBits()) {
case kQRegSizeInBits:
return UINT64_C(0xf) << (reg.GetCode() * 4);
case kDRegSizeInBits:
return UINT64_C(0x3) << (reg.GetCode() * 2);
case kSRegSizeInBits:
return UINT64_C(0x1) << reg.GetCode();
default:
VIXL_UNREACHABLE();
return 0;
}
}
}
// Bitfield representation of all registers in the list.
// (0x3 for d0, 0xc0 for d1, 0x30 for d2, ...). We have one, two or four bits
// per register according to their size. This way we can make sure that we
// account for overlapping registers.
// A register is wholly included in this list only if all of its bits are set.
// A register is aliased by the list if at least one of its bits are set.
// The IncludesAllOf and IncludesAliasOf helpers are provided to make this
// distinction clear.
uint64_t list_;
};
class SRegisterList {
SRegister first_;
int length_;
public:
explicit SRegisterList(SRegister reg) : first_(reg.GetCode()), length_(1) {}
SRegisterList(SRegister first, int length)
: first_(first.GetCode()), length_(length) {
VIXL_ASSERT(length >= 0);
}
SRegister GetSRegister(int n) const {
VIXL_ASSERT(n >= 0);
VIXL_ASSERT(n < length_);
return SRegister((first_.GetCode() + n) % kNumberOfSRegisters);
}
const SRegister& GetFirstSRegister() const { return first_; }
SRegister GetLastSRegister() const { return GetSRegister(length_ - 1); }
int GetLength() const { return length_; }
};
std::ostream& operator<<(std::ostream& os, SRegisterList registers);
class DRegisterList {
DRegister first_;
int length_;
public:
explicit DRegisterList(DRegister reg) : first_(reg.GetCode()), length_(1) {}
DRegisterList(DRegister first, int length)
: first_(first.GetCode()), length_(length) {
VIXL_ASSERT(length >= 0);
}
DRegister GetDRegister(int n) const {
VIXL_ASSERT(n >= 0);
VIXL_ASSERT(n < length_);
return DRegister((first_.GetCode() + n) % kMaxNumberOfDRegisters);
}
const DRegister& GetFirstDRegister() const { return first_; }
DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); }
int GetLength() const { return length_; }
};
std::ostream& operator<<(std::ostream& os, DRegisterList registers);
enum SpacingType { kSingle, kDouble };
enum TransferType { kMultipleLanes, kOneLane, kAllLanes };
class NeonRegisterList {
DRegister first_;
SpacingType spacing_;
TransferType type_;
int lane_;
int length_;
public:
NeonRegisterList(DRegister reg, TransferType type)
: first_(reg.GetCode()),
spacing_(kSingle),
type_(type),
lane_(-1),
length_(1) {
VIXL_ASSERT(type_ != kOneLane);
}
NeonRegisterList(DRegister reg, int lane)
: first_(reg.GetCode()),
spacing_(kSingle),
type_(kOneLane),
lane_(lane),
length_(1) {
VIXL_ASSERT((lane_ >= 0) && (lane_ < 8));
}
NeonRegisterList(DRegister first,
DRegister last,
SpacingType spacing,
TransferType type)
: first_(first.GetCode()), spacing_(spacing), type_(type), lane_(-1) {
VIXL_ASSERT(type != kOneLane);
VIXL_ASSERT(first.GetCode() <= last.GetCode());
int range = last.GetCode() - first.GetCode();
VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2));
length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1;
VIXL_ASSERT(length_ <= 4);
}
NeonRegisterList(DRegister first,
DRegister last,
SpacingType spacing,
int lane)
: first_(first.GetCode()),
spacing_(spacing),
type_(kOneLane),
lane_(lane) {
VIXL_ASSERT((lane >= 0) && (lane < 8));
VIXL_ASSERT(first.GetCode() <= last.GetCode());
int range = last.GetCode() - first.GetCode();
VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2));
length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1;
VIXL_ASSERT(length_ <= 4);
}
DRegister GetDRegister(int n) const {
VIXL_ASSERT(n >= 0);
VIXL_ASSERT(n < length_);
unsigned code = first_.GetCode() + (IsDoubleSpaced() ? (2 * n) : n);
VIXL_ASSERT(code < kMaxNumberOfDRegisters);
return DRegister(code);
}
const DRegister& GetFirstDRegister() const { return first_; }
DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); }
int GetLength() const { return length_; }
bool IsSingleSpaced() const { return spacing_ == kSingle; }
bool IsDoubleSpaced() const { return spacing_ == kDouble; }
bool IsTransferAllLanes() const { return type_ == kAllLanes; }
bool IsTransferOneLane() const { return type_ == kOneLane; }
bool IsTransferMultipleLanes() const { return type_ == kMultipleLanes; }
int GetTransferLane() const { return lane_; }
};
std::ostream& operator<<(std::ostream& os, NeonRegisterList registers);
enum SpecialRegisterType { APSR = 0, CPSR = 0, SPSR = 1 };
class SpecialRegister {
uint32_t reg_;
public:
explicit SpecialRegister(uint32_t reg) : reg_(reg) {}
SpecialRegister(SpecialRegisterType reg) // NOLINT(runtime/explicit)
: reg_(reg) {}
uint32_t GetReg() const { return reg_; }
const char* GetName() const;
bool Is(SpecialRegister value) const { return reg_ == value.reg_; }
bool Is(uint32_t value) const { return reg_ == value; }
bool IsNot(uint32_t value) const { return reg_ != value; }
};
inline std::ostream& operator<<(std::ostream& os, SpecialRegister reg) {
return os << reg.GetName();
}
enum BankedRegisterType {
R8_usr = 0x00,
R9_usr = 0x01,
R10_usr = 0x02,
R11_usr = 0x03,
R12_usr = 0x04,
SP_usr = 0x05,
LR_usr = 0x06,
R8_fiq = 0x08,
R9_fiq = 0x09,
R10_fiq = 0x0a,
R11_fiq = 0x0b,
R12_fiq = 0x0c,
SP_fiq = 0x0d,
LR_fiq = 0x0e,
LR_irq = 0x10,
SP_irq = 0x11,
LR_svc = 0x12,
SP_svc = 0x13,
LR_abt = 0x14,
SP_abt = 0x15,
LR_und = 0x16,
SP_und = 0x17,
LR_mon = 0x1c,
SP_mon = 0x1d,
ELR_hyp = 0x1e,
SP_hyp = 0x1f,
SPSR_fiq = 0x2e,
SPSR_irq = 0x30,
SPSR_svc = 0x32,
SPSR_abt = 0x34,
SPSR_und = 0x36,
SPSR_mon = 0x3c,
SPSR_hyp = 0x3e
};
class BankedRegister {
uint32_t reg_;
public:
explicit BankedRegister(unsigned reg) : reg_(reg) {}
BankedRegister(BankedRegisterType reg) // NOLINT(runtime/explicit)
: reg_(reg) {}
uint32_t GetCode() const { return reg_; }
const char* GetName() const;
};
inline std::ostream& operator<<(std::ostream& os, BankedRegister reg) {
return os << reg.GetName();
}
enum MaskedSpecialRegisterType {
APSR_nzcvq = 0x08,
APSR_g = 0x04,
APSR_nzcvqg = 0x0c,
CPSR_c = 0x01,
CPSR_x = 0x02,
CPSR_xc = 0x03,
CPSR_s = APSR_g,
CPSR_sc = 0x05,
CPSR_sx = 0x06,
CPSR_sxc = 0x07,
CPSR_f = APSR_nzcvq,
CPSR_fc = 0x09,
CPSR_fx = 0x0a,
CPSR_fxc = 0x0b,
CPSR_fs = APSR_nzcvqg,
CPSR_fsc = 0x0d,
CPSR_fsx = 0x0e,
CPSR_fsxc = 0x0f,
SPSR_c = 0x11,
SPSR_x = 0x12,
SPSR_xc = 0x13,
SPSR_s = 0x14,
SPSR_sc = 0x15,
SPSR_sx = 0x16,
SPSR_sxc = 0x17,
SPSR_f = 0x18,
SPSR_fc = 0x19,
SPSR_fx = 0x1a,
SPSR_fxc = 0x1b,
SPSR_fs = 0x1c,
SPSR_fsc = 0x1d,
SPSR_fsx = 0x1e,
SPSR_fsxc = 0x1f
};
class MaskedSpecialRegister {
uint32_t reg_;
public:
explicit MaskedSpecialRegister(uint32_t reg) : reg_(reg) {
VIXL_ASSERT(reg <= SPSR_fsxc);
}
MaskedSpecialRegister(
MaskedSpecialRegisterType reg) // NOLINT(runtime/explicit)
: reg_(reg) {}
uint32_t GetReg() const { return reg_; }
const char* GetName() const;
bool Is(MaskedSpecialRegister value) const { return reg_ == value.reg_; }
bool Is(uint32_t value) const { return reg_ == value; }
bool IsNot(uint32_t value) const { return reg_ != value; }
};
inline std::ostream& operator<<(std::ostream& os, MaskedSpecialRegister reg) {
return os << reg.GetName();
}
enum SpecialFPRegisterType {
FPSID = 0x0,
FPSCR = 0x1,
MVFR2 = 0x5,
MVFR1 = 0x6,
MVFR0 = 0x7,
FPEXC = 0x8
};
class SpecialFPRegister {
uint32_t reg_;
public:
explicit SpecialFPRegister(uint32_t reg) : reg_(reg) {
#ifdef VIXL_DEBUG
switch (reg) {
case FPSID:
case FPSCR:
case MVFR2:
case MVFR1:
case MVFR0:
case FPEXC:
break;
default:
VIXL_UNREACHABLE();
}
#endif
}
SpecialFPRegister(SpecialFPRegisterType reg) // NOLINT(runtime/explicit)
: reg_(reg) {}
uint32_t GetReg() const { return reg_; }
const char* GetName() const;
bool Is(SpecialFPRegister value) const { return reg_ == value.reg_; }
bool Is(uint32_t value) const { return reg_ == value; }
bool IsNot(uint32_t value) const { return reg_ != value; }
};
inline std::ostream& operator<<(std::ostream& os, SpecialFPRegister reg) {
return os << reg.GetName();
}
class CRegister {
uint32_t code_;
public:
explicit CRegister(uint32_t code) : code_(code) {
VIXL_ASSERT(code < kNumberOfRegisters);
}
uint32_t GetCode() const { return code_; }
bool Is(CRegister value) const { return code_ == value.code_; }
};
inline std::ostream& operator<<(std::ostream& os, const CRegister reg) {
return os << "c" << reg.GetCode();
}
// clang-format off
#define CREGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15)
// clang-format on
#define DEFINE_CREGISTER(N) const CRegister c##N(N);
CREGISTER_CODE_LIST(DEFINE_CREGISTER)
enum CoprocessorName { p10 = 10, p11 = 11, p14 = 14, p15 = 15 };
class Coprocessor {
uint32_t coproc_;
public:
explicit Coprocessor(uint32_t coproc) : coproc_(coproc) {}
Coprocessor(CoprocessorName coproc) // NOLINT(runtime/explicit)
: coproc_(static_cast<uint32_t>(coproc)) {}
bool Is(Coprocessor coproc) const { return coproc_ == coproc.coproc_; }
bool Is(CoprocessorName coproc) const { return coproc_ == coproc; }
uint32_t GetCoprocessor() const { return coproc_; }
};
inline std::ostream& operator<<(std::ostream& os, Coprocessor coproc) {
return os << "p" << coproc.GetCoprocessor();
}
enum ConditionType {
eq = 0,
ne = 1,
cs = 2,
cc = 3,
mi = 4,
pl = 5,
vs = 6,
vc = 7,
hi = 8,
ls = 9,
ge = 10,
lt = 11,
gt = 12,
le = 13,
al = 14,
hs = cs,
lo = cc
};
class Condition {
uint32_t condition_;
static const uint32_t kNever = 15;
static const uint32_t kMask = 0xf;
static const uint32_t kNone = 0x10 | al;
public:
static const Condition None() { return Condition(kNone); }
static const Condition Never() { return Condition(kNever); }
explicit Condition(uint32_t condition) : condition_(condition) {
VIXL_ASSERT(condition <= kNone);
}
// Users should be able to use "eq", "ne" and so forth to instantiate this
// class.
Condition(ConditionType condition) // NOLINT(runtime/explicit)
: condition_(condition) {}
uint32_t GetCondition() const { return condition_ & kMask; }
bool IsNone() const { return condition_ == kNone; }
const char* GetName() const;
bool Is(Condition value) const { return condition_ == value.condition_; }
bool Is(uint32_t value) const { return condition_ == value; }
bool IsNot(uint32_t value) const { return condition_ != value; }
bool IsNever() const { return condition_ == kNever; }
bool IsNotNever() const { return condition_ != kNever; }
Condition Negate() const {
VIXL_ASSERT(IsNot(al) && IsNot(kNever));
return Condition(condition_ ^ 1);
}
};
inline std::ostream& operator<<(std::ostream& os, Condition condition) {
return os << condition.GetName();
}
enum SignType { plus, minus };
class Sign {
public:
Sign() : sign_(plus) {}
Sign(SignType sign) : sign_(sign) {} // NOLINT(runtime/explicit)
const char* GetName() const { return (IsPlus() ? "" : "-"); }
bool IsPlus() const { return sign_ == plus; }
bool IsMinus() const { return sign_ == minus; }
int32_t ApplyTo(uint32_t value) { return IsPlus() ? value : -value; }
private:
SignType sign_;
};
inline std::ostream& operator<<(std::ostream& os, Sign sign) {
return os << sign.GetName();
}
enum ShiftType { LSL = 0x0, LSR = 0x1, ASR = 0x2, ROR = 0x3, RRX = 0x4 };
class Shift {
public:
Shift() : shift_(LSL) {}
Shift(ShiftType shift) : shift_(shift) {} // NOLINT(runtime/explicit)
explicit Shift(uint32_t shift) : shift_(static_cast<ShiftType>(shift)) {}
const Shift& GetShift() const { return *this; }
ShiftType GetType() const { return shift_; }
uint32_t GetValue() const { return shift_; }
const char* GetName() const;
bool IsLSL() const { return shift_ == LSL; }
bool IsLSR() const { return shift_ == LSR; }
bool IsASR() const { return shift_ == ASR; }
bool IsROR() const { return shift_ == ROR; }
bool IsRRX() const { return shift_ == RRX; }
bool Is(Shift value) const { return shift_ == value.shift_; }
bool IsNot(Shift value) const { return shift_ != value.shift_; }
bool IsValidAmount(uint32_t amount) const;
static const Shift NoShift;
protected:
void SetType(ShiftType s) { shift_ = s; }
private:
ShiftType shift_;
};
inline std::ostream& operator<<(std::ostream& os, Shift shift) {
return os << shift.GetName();
}
class ImmediateShiftOperand : public Shift {
public:
// Constructor used for assembly.
ImmediateShiftOperand(Shift shift, uint32_t amount)
: Shift(shift), amount_(amount) {
#ifdef VIXL_DEBUG
switch (shift.GetType()) {
case LSL:
VIXL_ASSERT(amount <= 31);
break;
case ROR:
VIXL_ASSERT(amount > 0);
VIXL_ASSERT(amount <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(amount > 0);
VIXL_ASSERT(amount <= 32);
break;
case RRX:
VIXL_ASSERT(amount == 0);
break;
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
// Constructor used for disassembly.
ImmediateShiftOperand(int shift, int amount);
uint32_t GetAmount() const { return amount_; }
bool Is(const ImmediateShiftOperand& rhs) const {
return amount_ == (rhs.amount_) && Shift::Is(*this);
}
private:
uint32_t amount_;
};
inline std::ostream& operator<<(std::ostream& os,
ImmediateShiftOperand const& shift_operand) {
if (shift_operand.IsLSL() && shift_operand.GetAmount() == 0) return os;
if (shift_operand.IsRRX()) return os << ", rrx";
return os << ", " << shift_operand.GetName() << " #"
<< shift_operand.GetAmount();
}
class RegisterShiftOperand : public Shift {
public:
RegisterShiftOperand(ShiftType shift, Register shift_register)
: Shift(shift), shift_register_(shift_register) {
VIXL_ASSERT(!IsRRX() && shift_register_.IsValid());
}
const Register GetShiftRegister() const { return shift_register_; }
bool Is(const RegisterShiftOperand& rhs) const {
return shift_register_.Is(rhs.shift_register_) && Shift::Is(*this);
}
private:
Register shift_register_;
};
inline std::ostream& operator<<(std::ostream& s,
const RegisterShiftOperand& shift_operand) {
return s << shift_operand.GetName() << " "
<< shift_operand.GetShiftRegister();
}
enum EncodingSizeType { Best, Narrow, Wide };
class EncodingSize {
uint32_t size_;
public:
explicit EncodingSize(uint32_t size) : size_(size) {}
EncodingSize(EncodingSizeType size) // NOLINT(runtime/explicit)
: size_(size) {}
uint32_t GetSize() const { return size_; }
const char* GetName() const;
bool IsBest() const { return size_ == Best; }
bool IsNarrow() const { return size_ == Narrow; }
bool IsWide() const { return size_ == Wide; }
};
inline std::ostream& operator<<(std::ostream& os, EncodingSize size) {
return os << size.GetName();
}
enum WriteBackValue { NO_WRITE_BACK, WRITE_BACK };
class WriteBack {
WriteBackValue value_;
public:
WriteBack(WriteBackValue value) // NOLINT(runtime/explicit)
: value_(value) {}
explicit WriteBack(int value)
: value_((value == 0) ? NO_WRITE_BACK : WRITE_BACK) {}
uint32_t GetWriteBackUint32() const { return (value_ == WRITE_BACK) ? 1 : 0; }
bool DoesWriteBack() const { return value_ == WRITE_BACK; }
};
inline std::ostream& operator<<(std::ostream& os, WriteBack write_back) {
if (write_back.DoesWriteBack()) return os << "!";
return os;
}
class EncodingValue {
bool valid_;
uint32_t encoding_value_;
public:
EncodingValue() {
valid_ = false;
encoding_value_ = 0;
}
bool IsValid() const { return valid_; }
uint32_t GetEncodingValue() const { return encoding_value_; }
void SetEncodingValue(uint32_t encoding_value) {
valid_ = true;
encoding_value_ = encoding_value;
}
};
class EncodingValueAndImmediate : public EncodingValue {
uint32_t encoded_immediate_;
public:
EncodingValueAndImmediate() { encoded_immediate_ = 0; }
uint32_t GetEncodedImmediate() const { return encoded_immediate_; }
void SetEncodedImmediate(uint32_t encoded_immediate) {
encoded_immediate_ = encoded_immediate;
}
};
class ImmediateT32 : public EncodingValue {
public:
explicit ImmediateT32(uint32_t imm);
static bool IsImmediateT32(uint32_t imm);
static uint32_t Decode(uint32_t value);
};
class ImmediateA32 : public EncodingValue {
public:
explicit ImmediateA32(uint32_t imm);
static bool IsImmediateA32(uint32_t imm);
static uint32_t Decode(uint32_t value);
};
// Return the encoding value of a shift type.
uint32_t TypeEncodingValue(Shift shift);
// Return the encoding value for a shift amount depending on the shift type.
uint32_t AmountEncodingValue(Shift shift, uint32_t amount);
enum MemoryBarrierType {
OSHLD = 0x1,
OSHST = 0x2,
OSH = 0x3,
NSHLD = 0x5,
NSHST = 0x6,
NSH = 0x7,
ISHLD = 0x9,
ISHST = 0xa,
ISH = 0xb,
LD = 0xd,
ST = 0xe,
SY = 0xf
};
class MemoryBarrier {
MemoryBarrierType type_;
public:
MemoryBarrier(MemoryBarrierType type) // NOLINT(runtime/explicit)
: type_(type) {}
MemoryBarrier(uint32_t type) // NOLINT(runtime/explicit)
: type_(static_cast<MemoryBarrierType>(type)) {
VIXL_ASSERT((type & 0x3) != 0);
}
MemoryBarrierType GetType() const { return type_; }
const char* GetName() const;
};
inline std::ostream& operator<<(std::ostream& os, MemoryBarrier option) {
return os << option.GetName();
}
enum InterruptFlagsType {
F = 0x1,
I = 0x2,
IF = 0x3,
A = 0x4,
AF = 0x5,
AI = 0x6,
AIF = 0x7
};
class InterruptFlags {
InterruptFlagsType type_;
public:
InterruptFlags(InterruptFlagsType type) // NOLINT(runtime/explicit)
: type_(type) {}
InterruptFlags(uint32_t type) // NOLINT(runtime/explicit)
: type_(static_cast<InterruptFlagsType>(type)) {
VIXL_ASSERT(type <= 7);
}
InterruptFlagsType GetType() const { return type_; }
const char* GetName() const;
};
inline std::ostream& operator<<(std::ostream& os, InterruptFlags option) {
return os << option.GetName();
}
enum EndiannessType { LE = 0, BE = 1 };
class Endianness {
EndiannessType type_;
public:
Endianness(EndiannessType type) : type_(type) {} // NOLINT(runtime/explicit)
Endianness(uint32_t type) // NOLINT(runtime/explicit)
: type_(static_cast<EndiannessType>(type)) {
VIXL_ASSERT(type <= 1);
}
EndiannessType GetType() const { return type_; }
const char* GetName() const;
};
inline std::ostream& operator<<(std::ostream& os, Endianness endian_specifier) {
return os << endian_specifier.GetName();
}
enum AlignmentType {
k16BitAlign = 0,
k32BitAlign = 1,
k64BitAlign = 2,
k128BitAlign = 3,
k256BitAlign = 4,
kNoAlignment = 5,
kBadAlignment = 6
};
class Alignment {
AlignmentType align_;
public:
Alignment(AlignmentType align) // NOLINT(runtime/explicit)
: align_(align) {}
Alignment(uint32_t align) // NOLINT(runtime/explicit)
: align_(static_cast<AlignmentType>(align)) {
VIXL_ASSERT(align <= static_cast<uint32_t>(k256BitAlign));
}
AlignmentType GetType() const { return align_; }
bool Is(AlignmentType type) { return align_ == type; }
};
inline std::ostream& operator<<(std::ostream& os, Alignment align) {
if (align.GetType() == kBadAlignment) return os << " :??";
if (align.GetType() == kNoAlignment) return os;
return os << " :" << (0x10 << static_cast<uint32_t>(align.GetType()));
}
// Structure containing information on forward references.
struct ReferenceInfo {
int size;
int min_offset;
int max_offset;
int alignment; // As a power of two.
enum { kAlignPc, kDontAlignPc } pc_needs_aligning;
};
} // namespace aarch32
} // namespace vixl
#endif // VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "location-aarch32.h"
#include "assembler-aarch32.h"
#include "macro-assembler-aarch32.h"
namespace vixl {
namespace aarch32 {
bool Location::Needs16BitPadding(int32_t location) const {
if (!HasForwardReferences()) return false;
const ForwardRef& last_ref = GetLastForwardReference();
int32_t min_location_last_ref = last_ref.GetMinLocation();
VIXL_ASSERT(min_location_last_ref - location <= 2);
return (min_location_last_ref > location);
}
void Location::ResolveReferences(internal::AssemblerBase* assembler) {
// Iterate over references and call EncodeLocationFor on each of them.
for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) {
const ForwardRef& reference = *it.Current();
VIXL_ASSERT(reference.LocationIsEncodable(location_));
int32_t from = reference.GetLocation();
EncodeLocationFor(assembler, from, reference.op());
}
forward_.clear();
}
static bool Is16BitEncoding(uint16_t instr) {
return instr < (kLowestT32_32Opcode >> 16);
}
void Location::EncodeLocationFor(internal::AssemblerBase* assembler,
int32_t from,
const Location::EmitOperator* encoder) {
if (encoder->IsUsingT32()) {
uint16_t* instr_ptr =
assembler->GetBuffer()->GetOffsetAddress<uint16_t*>(from);
if (Is16BitEncoding(instr_ptr[0])) {
// The Encode methods always deals with uint32_t types so we need
// to explicitly cast it.
uint32_t instr = static_cast<uint32_t>(instr_ptr[0]);
instr = encoder->Encode(instr, from, this);
// The Encode method should not ever set the top 16 bits.
VIXL_ASSERT((instr & ~0xffff) == 0);
instr_ptr[0] = static_cast<uint16_t>(instr);
} else {
uint32_t instr =
instr_ptr[1] | (static_cast<uint32_t>(instr_ptr[0]) << 16);
instr = encoder->Encode(instr, from, this);
instr_ptr[0] = static_cast<uint16_t>(instr >> 16);
instr_ptr[1] = static_cast<uint16_t>(instr);
}
} else {
uint32_t* instr_ptr =
assembler->GetBuffer()->GetOffsetAddress<uint32_t*>(from);
instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this);
}
}
void Location::AddForwardRef(int32_t instr_location,
const EmitOperator& op,
const ReferenceInfo* info) {
VIXL_ASSERT(referenced_);
int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta);
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc)
from = AlignDown(from, 4);
int32_t min_object_location = from + info->min_offset;
int32_t max_object_location = from + info->max_offset;
forward_.insert(ForwardRef(&op,
instr_location,
info->size,
min_object_location,
max_object_location,
info->alignment));
}
int Location::GetMaxAlignment() const {
int max_alignment = GetPoolObjectAlignment();
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
it.Advance()) {
const ForwardRef& reference = *it.Current();
if (reference.GetAlignment() > max_alignment)
max_alignment = reference.GetAlignment();
}
return max_alignment;
}
int Location::GetMinLocation() const {
int32_t min_location = 0;
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
it.Advance()) {
const ForwardRef& reference = *it.Current();
if (reference.GetMinLocation() > min_location)
min_location = reference.GetMinLocation();
}
return min_location;
}
void Label::UpdatePoolObject(PoolObject<int32_t>* object) {
VIXL_ASSERT(forward_.size() == 1);
const ForwardRef& reference = forward_.Front();
object->Update(reference.GetMinLocation(),
reference.GetMaxLocation(),
reference.GetAlignment());
}
void Label::EmitPoolObject(MacroAssemblerInterface* masm) {
MacroAssembler* macro_assembler = static_cast<MacroAssembler*>(masm);
// Add a new branch to this label.
macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes);
ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler,
kMaxInstructionSizeInBytes,
ExactAssemblyScope::kMaximumSize);
macro_assembler->b(this);
}
void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) {
Assembler* assembler = static_cast<Assembler*>(masm->AsAssemblerBase());
assembler->GetBuffer()->EnsureSpaceFor(GetSize());
assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize());
}
}
}
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
#define VIXL_AARCH32_LABEL_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include <algorithm>
#include <cstddef>
#include <iomanip>
#include <list>
#include "invalset-vixl.h"
#include "pool-manager.h"
#include "utils-vixl.h"
#include "constants-aarch32.h"
#include "instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
class MacroAssembler;
class Location : public LocationBase<int32_t> {
friend class Assembler;
friend class MacroAssembler;
public:
// Unbound location that can be used with the assembler bind() method and
// with the assembler methods for generating instructions, but will never
// be handled by the pool manager.
Location()
: LocationBase<int32_t>(kRawLocation, 1 /* dummy size*/),
referenced_(false) {}
typedef int32_t Offset;
~Location() {
#ifdef VIXL_DEBUG
if (IsReferenced() && !IsBound()) {
VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
}
#endif
}
bool IsReferenced() const { return referenced_; }
private:
class EmitOperator {
public:
explicit EmitOperator(InstructionSet isa) : isa_(isa) {
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == A32);
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == T32);
#endif
}
virtual ~EmitOperator() {}
virtual uint32_t Encode(uint32_t /*instr*/,
Location::Offset /*pc*/,
const Location* /*label*/) const {
return 0;
}
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
bool IsUsingT32() const { return false; }
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
bool IsUsingT32() const { return true; }
#else
bool IsUsingT32() const { return isa_ == T32; }
#endif
private:
InstructionSet isa_;
};
protected:
class ForwardRef : public ForwardReference<int32_t> {
public:
// Default constructor for InvalSet.
ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
ForwardRef(const Location::EmitOperator* op,
int32_t location,
int size,
int32_t min_object_location,
int32_t max_object_location,
int object_alignment = 1)
: ForwardReference<int32_t>(location,
size,
min_object_location,
max_object_location,
object_alignment),
op_(op) {}
const Location::EmitOperator* op() const { return op_; }
// We must provide comparison operators to work with InvalSet.
bool operator==(const ForwardRef& other) const {
return GetLocation() == other.GetLocation();
}
bool operator<(const ForwardRef& other) const {
return GetLocation() < other.GetLocation();
}
bool operator<=(const ForwardRef& other) const {
return GetLocation() <= other.GetLocation();
}
bool operator>(const ForwardRef& other) const {
return GetLocation() > other.GetLocation();
}
private:
const Location::EmitOperator* op_;
};
static const int kNPreallocatedElements = 4;
// The following parameters will not affect ForwardRefList in practice, as we
// resolve all references at once and clear the list, so we do not need to
// remove individual elements by invalidating them.
static const int32_t kInvalidLinkKey = INT32_MAX;
static const size_t kReclaimFrom = 512;
static const size_t kReclaimFactor = 2;
typedef InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>
ForwardRefListBase;
typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
class ForwardRefList : public ForwardRefListBase {
public:
ForwardRefList() : ForwardRefListBase() {}
using ForwardRefListBase::Back;
using ForwardRefListBase::Front;
};
class ForwardRefListIterator : public ForwardRefListIteratorBase {
public:
explicit ForwardRefListIterator(Location* location)
: ForwardRefListIteratorBase(&location->forward_) {}
// TODO: Remove these and use the STL-like interface instead. We'll need a
// const_iterator implemented for this.
using ForwardRefListIteratorBase::Advance;
using ForwardRefListIteratorBase::Current;
};
// For InvalSet::GetKey() and InvalSet::SetKey().
friend class InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>;
private:
virtual void ResolveReferences(internal::AssemblerBase* assembler)
VIXL_OVERRIDE;
void SetReferenced() { referenced_ = true; }
bool HasForwardReferences() const { return !forward_.empty(); }
ForwardRef GetLastForwardReference() const {
VIXL_ASSERT(HasForwardReferences());
return forward_.Back();
}
// Add forward reference to this object. Called from the assembler.
void AddForwardRef(int32_t instr_location,
const EmitOperator& op,
const ReferenceInfo* info);
// Check if we need to add padding when binding this object, in order to
// meet the minimum location requirement.
bool Needs16BitPadding(int location) const;
void EncodeLocationFor(internal::AssemblerBase* assembler,
int32_t from,
const Location::EmitOperator* encoder);
// True if the label has been used at least once.
bool referenced_;
protected:
// Types passed to LocationBase. Must be distinct for unbound Locations (not
// relevant for bound locations, as they don't have a correspoding
// PoolObject).
static const int kRawLocation = 0; // Will not be used by the pool manager.
static const int kVeneerType = 1;
static const int kLiteralType = 2;
// Contains the references to the unbound label
ForwardRefList forward_;
// To be used only by derived classes.
Location(uint32_t type, int size, int alignment)
: LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
// To be used only by derived classes.
explicit Location(Offset location)
: LocationBase<int32_t>(location), referenced_(false) {}
virtual int GetMaxAlignment() const VIXL_OVERRIDE;
virtual int GetMinLocation() const VIXL_OVERRIDE;
private:
// Included to make the class concrete, however should never be called.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
USE(masm);
VIXL_UNREACHABLE();
}
};
class Label : public Location {
static const int kVeneerSize = 4;
// Use an alignment of 1 for all architectures. Even though we can bind an
// unused label, because of the way the MacroAssembler works we can always be
// sure to have the correct buffer alignment for the instruction set we are
// using, so we do not need to enforce additional alignment requirements
// here.
// TODO: Consider modifying the interface of the pool manager to pass an
// optional additional alignment to Bind() in order to handle cases where the
// buffer could be unaligned.
static const int kVeneerAlignment = 1;
public:
Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
explicit Label(Offset location) : Location(location) {}
private:
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return false;
}
virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
return false;
}
virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
return true;
}
virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
return 1 * KBytes;
}
};
class RawLiteral : public Location {
// Some load instructions require alignment to 4 bytes. Since we do
// not know what instructions will reference a literal after we place
// it, we enforce a 4 byte alignment for literals that are 4 bytes or
// larger.
static const int kLiteralAlignment = 4;
public:
enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
enum DeletionPolicy {
kDeletedOnPlacementByPool,
kDeletedOnPoolDestruction,
kManuallyDeleted
};
RawLiteral(const void* addr,
int size,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(placement_policy == kManuallyPlaced),
deletion_policy_(deletion_policy) {
// We can't have manually placed literals that are not manually deleted.
VIXL_ASSERT(!IsManuallyPlaced() ||
(GetDeletionPolicy() == kManuallyDeleted));
}
RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(false),
deletion_policy_(deletion_policy) {}
const void* GetDataAddress() const { return addr_; }
int GetSize() const { return GetPoolObjectSizeInBytes(); }
bool IsManuallyPlaced() const { return manually_placed_; }
private:
DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPlacementByPool;
}
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPoolDestruction;
}
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
// Data address before it's moved into the code buffer.
const void* const addr_;
// When this flag is true, the label will be placed manually.
bool manually_placed_;
// When is the literal to be removed from the memory
// Can be delete'd when:
// moved into the code buffer: kDeletedOnPlacementByPool
// the pool is delete'd: kDeletedOnPoolDestruction
// or left to the application: kManuallyDeleted.
DeletionPolicy deletion_policy_;
friend class MacroAssembler;
};
template <typename T>
class Literal : public RawLiteral {
public:
explicit Literal(const T& value,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
value_(value) {}
explicit Literal(const T& value, DeletionPolicy deletion_policy)
: RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
void UpdateValue(const T& value, CodeBuffer* buffer) {
value_ = value;
if (IsBound()) {
buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
}
}
private:
T value_;
};
class StringLiteral : public RawLiteral {
public:
explicit StringLiteral(const char* str,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(str,
static_cast<int>(strlen(str) + 1),
placement_policy,
deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
: RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
};
} // namespace aarch32
// Required InvalSet template specialisations.
#define INVAL_SET_TEMPLATE_PARAMETERS \
aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
int32_t, aarch32::Location::kInvalidLinkKey, \
aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
template <>
inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
const aarch32::Location::ForwardRef& element) {
return element.GetLocation();
}
template <>
inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
aarch32::Location::ForwardRef* element, int32_t key) {
element->SetLocationToInvalidateOnly(key);
}
#undef INVAL_SET_TEMPLATE_PARAMETERS
} // namespace vixl
#endif // VIXL_AARCH32_LABEL_AARCH32_H_
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "aarch32/macro-assembler-aarch32.h"
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define CONTEXT_SCOPE \
ContextScope context(this, __FILE__ ":" TOSTRING(__LINE__))
namespace vixl {
namespace aarch32 {
ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck(
MacroAssembler* masm, size_t size, SizePolicy size_policy)
: ExactAssemblyScope(masm,
size,
size_policy,
ExactAssemblyScope::kIgnorePools) {}
void UseScratchRegisterScope::Open(MacroAssembler* masm) {
VIXL_ASSERT(masm_ == NULL);
VIXL_ASSERT(masm != NULL);
masm_ = masm;
old_available_ = masm_->GetScratchRegisterList()->GetList();
old_available_vfp_ = masm_->GetScratchVRegisterList()->GetList();
parent_ = masm->GetCurrentScratchRegisterScope();
masm->SetCurrentScratchRegisterScope(this);
}
void UseScratchRegisterScope::Close() {
if (masm_ != NULL) {
// Ensure that scopes nest perfectly, and do not outlive their parents.
// This is a run-time check because the order of destruction of objects in
// the _same_ scope is implementation-defined, and is likely to change in
// optimised builds.
VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this);
masm_->SetCurrentScratchRegisterScope(parent_);
masm_->GetScratchRegisterList()->SetList(old_available_);
masm_->GetScratchVRegisterList()->SetList(old_available_vfp_);
masm_ = NULL;
}
}
bool UseScratchRegisterScope::IsAvailable(const Register& reg) const {
VIXL_ASSERT(masm_ != NULL);
VIXL_ASSERT(reg.IsValid());
return masm_->GetScratchRegisterList()->Includes(reg);
}
bool UseScratchRegisterScope::IsAvailable(const VRegister& reg) const {
VIXL_ASSERT(masm_ != NULL);
VIXL_ASSERT(reg.IsValid());
return masm_->GetScratchVRegisterList()->IncludesAllOf(reg);
}
Register UseScratchRegisterScope::Acquire() {
VIXL_ASSERT(masm_ != NULL);
Register reg = masm_->GetScratchRegisterList()->GetFirstAvailableRegister();
VIXL_CHECK(reg.IsValid());
masm_->GetScratchRegisterList()->Remove(reg);
return reg;
}
VRegister UseScratchRegisterScope::AcquireV(unsigned size_in_bits) {
switch (size_in_bits) {
case kSRegSizeInBits:
return AcquireS();
case kDRegSizeInBits:
return AcquireD();
case kQRegSizeInBits:
return AcquireQ();
default:
VIXL_UNREACHABLE();
return NoVReg;
}
}
QRegister UseScratchRegisterScope::AcquireQ() {
VIXL_ASSERT(masm_ != NULL);
QRegister reg =
masm_->GetScratchVRegisterList()->GetFirstAvailableQRegister();
VIXL_CHECK(reg.IsValid());
masm_->GetScratchVRegisterList()->Remove(reg);
return reg;
}
DRegister UseScratchRegisterScope::AcquireD() {
VIXL_ASSERT(masm_ != NULL);
DRegister reg =
masm_->GetScratchVRegisterList()->GetFirstAvailableDRegister();
VIXL_CHECK(reg.IsValid());
masm_->GetScratchVRegisterList()->Remove(reg);
return reg;
}
SRegister UseScratchRegisterScope::AcquireS() {
VIXL_ASSERT(masm_ != NULL);
SRegister reg =
masm_->GetScratchVRegisterList()->GetFirstAvailableSRegister();
VIXL_CHECK(reg.IsValid());
masm_->GetScratchVRegisterList()->Remove(reg);
return reg;
}
void UseScratchRegisterScope::Release(const Register& reg) {
VIXL_ASSERT(masm_ != NULL);
VIXL_ASSERT(reg.IsValid());
VIXL_ASSERT(!masm_->GetScratchRegisterList()->Includes(reg));
masm_->GetScratchRegisterList()->Combine(reg);
}
void UseScratchRegisterScope::Release(const VRegister& reg) {
VIXL_ASSERT(masm_ != NULL);
VIXL_ASSERT(reg.IsValid());
VIXL_ASSERT(!masm_->GetScratchVRegisterList()->IncludesAliasOf(reg));
masm_->GetScratchVRegisterList()->Combine(reg);
}
void UseScratchRegisterScope::Include(const RegisterList& list) {
VIXL_ASSERT(masm_ != NULL);
RegisterList excluded_registers(sp, lr, pc);
uint32_t mask = list.GetList() & ~excluded_registers.GetList();
RegisterList* available = masm_->GetScratchRegisterList();
available->SetList(available->GetList() | mask);
}
void UseScratchRegisterScope::Include(const VRegisterList& list) {
VIXL_ASSERT(masm_ != NULL);
VRegisterList* available = masm_->GetScratchVRegisterList();
available->SetList(available->GetList() | list.GetList());
}
void UseScratchRegisterScope::Exclude(const RegisterList& list) {
VIXL_ASSERT(masm_ != NULL);
RegisterList* available = masm_->GetScratchRegisterList();
available->SetList(available->GetList() & ~list.GetList());
}
void UseScratchRegisterScope::Exclude(const VRegisterList& list) {
VIXL_ASSERT(masm_ != NULL);
VRegisterList* available = masm_->GetScratchVRegisterList();
available->SetList(available->GetList() & ~list.GetList());
}
void UseScratchRegisterScope::Exclude(const Operand& operand) {
if (operand.IsImmediateShiftedRegister()) {
Exclude(operand.GetBaseRegister());
} else if (operand.IsRegisterShiftedRegister()) {
Exclude(operand.GetBaseRegister(), operand.GetShiftRegister());
} else {
VIXL_ASSERT(operand.IsImmediate());
}
}
void UseScratchRegisterScope::ExcludeAll() {
VIXL_ASSERT(masm_ != NULL);
masm_->GetScratchRegisterList()->SetList(0);
masm_->GetScratchVRegisterList()->SetList(0);
}
void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) {
// We skip the check when the pools are blocked.
if (ArePoolsBlocked()) return;
VIXL_ASSERT(IsUint32(size_arg));
uint32_t size = static_cast<uint32_t>(size_arg);
if (pool_manager_.MustEmit(GetCursorOffset(), size)) {
int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size);
VIXL_ASSERT(new_pc == GetCursorOffset());
USE(new_pc);
}
}
void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond,
Register tmp,
uint32_t imm) {
if (IsUintN(16, imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
mov(cond, tmp, imm & 0xffff);
return;
}
if (IsUsingT32()) {
if (ImmediateT32::IsImmediateT32(~imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
mvn(cond, tmp, ~imm);
return;
}
} else {
if (ImmediateA32::IsImmediateA32(~imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
mvn(cond, tmp, ~imm);
return;
}
}
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
mov(cond, tmp, imm & 0xffff);
movt(cond, tmp, imm >> 16);
}
MemOperand MacroAssembler::MemOperandComputationHelper(
Condition cond,
Register scratch,
Register base,
uint32_t offset,
uint32_t extra_offset_mask) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(scratch));
VIXL_ASSERT(!AliasesAvailableScratchRegister(base));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
// Check for the simple pass-through case.
if ((offset & extra_offset_mask) == offset) return MemOperand(base, offset);
MacroEmissionCheckScope guard(this);
ITScope it_scope(this, &cond, guard);
uint32_t load_store_offset = offset & extra_offset_mask;
uint32_t add_offset = offset & ~extra_offset_mask;
if ((add_offset != 0) &&
(IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
load_store_offset = 0;
add_offset = offset;
}
if (base.IsPC()) {
// Special handling for PC bases. We must read the PC in the first
// instruction (and only in that instruction), and we must also take care to
// keep the same address calculation as loads and stores. For T32, that
// means using something like ADR, which uses AlignDown(PC, 4).
// We don't handle positive offsets from PC because the intention is not
// clear; does the user expect the offset from the current
// GetCursorOffset(), or to allow a certain amount of space after the
// instruction?
VIXL_ASSERT((offset & 0x80000000) != 0);
if (IsUsingT32()) {
// T32: make the first instruction "SUB (immediate, from PC)" -- an alias
// of ADR -- to get behaviour like loads and stores. This ADR can handle
// at least as much offset as the load_store_offset so it can replace it.
uint32_t sub_pc_offset = (-offset) & 0xfff;
load_store_offset = (offset + sub_pc_offset) & extra_offset_mask;
add_offset = (offset + sub_pc_offset) & ~extra_offset_mask;
ExactAssemblyScope scope(this, k32BitT32InstructionSizeInBytes);
sub(cond, scratch, base, sub_pc_offset);
if (add_offset == 0) return MemOperand(scratch, load_store_offset);
// The rest of the offset can be generated in the usual way.
base = scratch;
}
// A32 can use any SUB instruction, so we don't have to do anything special
// here except to ensure that we read the PC first.
}
add(cond, scratch, base, add_offset);
return MemOperand(scratch, load_store_offset);
}
uint32_t MacroAssembler::GetOffsetMask(InstructionType type,
AddrMode addrmode) {
switch (type) {
case kLdr:
case kLdrb:
case kStr:
case kStrb:
if (IsUsingA32() || (addrmode == Offset)) {
return 0xfff;
} else {
return 0xff;
}
case kLdrsb:
case kLdrh:
case kLdrsh:
case kStrh:
if (IsUsingT32() && (addrmode == Offset)) {
return 0xfff;
} else {
return 0xff;
}
case kVldr:
case kVstr:
return 0x3fc;
case kLdrd:
case kStrd:
if (IsUsingA32()) {
return 0xff;
} else {
return 0x3fc;
}
default:
VIXL_UNREACHABLE();
return 0;
}
}
HARDFLOAT void PrintfTrampolineRRRR(
const char* format, uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRRRD(
const char* format, uint32_t a, uint32_t b, uint32_t c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRRDR(
const char* format, uint32_t a, uint32_t b, double c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRRDD(
const char* format, uint32_t a, uint32_t b, double c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRDRR(
const char* format, uint32_t a, double b, uint32_t c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRDRD(
const char* format, uint32_t a, double b, uint32_t c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRDDR(
const char* format, uint32_t a, double b, double c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineRDDD(
const char* format, uint32_t a, double b, double c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDRRR(
const char* format, double a, uint32_t b, uint32_t c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDRRD(
const char* format, double a, uint32_t b, uint32_t c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDRDR(
const char* format, double a, uint32_t b, double c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDRDD(
const char* format, double a, uint32_t b, double c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDDRR(
const char* format, double a, double b, uint32_t c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDDRD(
const char* format, double a, double b, uint32_t c, double d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDDDR(
const char* format, double a, double b, double c, uint32_t d) {
printf(format, a, b, c, d);
}
HARDFLOAT void PrintfTrampolineDDDD(
const char* format, double a, double b, double c, double d) {
printf(format, a, b, c, d);
}
void MacroAssembler::Printf(const char* format,
CPURegister reg1,
CPURegister reg2,
CPURegister reg3,
CPURegister reg4) {
// Exclude all registers from the available scratch registers, so
// that we are able to use ip below.
// TODO: Refactor this function to use UseScratchRegisterScope
// for temporary registers below.
UseScratchRegisterScope scratch(this);
scratch.ExcludeAll();
if (generate_simulator_code_) {
PushRegister(reg4);
PushRegister(reg3);
PushRegister(reg2);
PushRegister(reg1);
Push(RegisterList(r0, r1));
StringLiteral* format_literal =
new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);
Adr(r0, format_literal);
uint32_t args = (reg4.GetType() << 12) | (reg3.GetType() << 8) |
(reg2.GetType() << 4) | reg1.GetType();
Mov(r1, args);
Hvc(kPrintfCode);
Pop(RegisterList(r0, r1));
int size = reg4.GetRegSizeInBytes() + reg3.GetRegSizeInBytes() +
reg2.GetRegSizeInBytes() + reg1.GetRegSizeInBytes();
Drop(size);
} else {
// Generate on a native platform => 32 bit environment.
// Preserve core registers r0-r3, r12, r14
const uint32_t saved_registers_mask =
kCallerSavedRegistersMask | (1 << r5.GetCode());
Push(RegisterList(saved_registers_mask));
// Push VFP registers.
Vpush(Untyped64, DRegisterList(d0, 8));
if (Has32DRegs()) Vpush(Untyped64, DRegisterList(d16, 16));
// Search one register which has been saved and which doesn't need to be
// printed.
RegisterList available_registers(kCallerSavedRegistersMask);
if (reg1.GetType() == CPURegister::kRRegister) {
available_registers.Remove(Register(reg1.GetCode()));
}
if (reg2.GetType() == CPURegister::kRRegister) {
available_registers.Remove(Register(reg2.GetCode()));
}
if (reg3.GetType() == CPURegister::kRRegister) {
available_registers.Remove(Register(reg3.GetCode()));
}
if (reg4.GetType() == CPURegister::kRRegister) {
available_registers.Remove(Register(reg4.GetCode()));
}
Register tmp = available_registers.GetFirstAvailableRegister();
VIXL_ASSERT(tmp.GetType() == CPURegister::kRRegister);
// Push the flags.
Mrs(tmp, APSR);
Push(tmp);
Vmrs(RegisterOrAPSR_nzcv(tmp.GetCode()), FPSCR);
Push(tmp);
// Push the registers to print on the stack.
PushRegister(reg4);
PushRegister(reg3);
PushRegister(reg2);
PushRegister(reg1);
int core_count = 1;
int vfp_count = 0;
uint32_t printf_type = 0;
// Pop the registers to print and store them into r1-r3 and/or d0-d3.
// Reg4 may stay into the stack if all the register to print are core
// registers.
PreparePrintfArgument(reg1, &core_count, &vfp_count, &printf_type);
PreparePrintfArgument(reg2, &core_count, &vfp_count, &printf_type);
PreparePrintfArgument(reg3, &core_count, &vfp_count, &printf_type);
PreparePrintfArgument(reg4, &core_count, &vfp_count, &printf_type);
// Ensure that the stack is aligned on 8 bytes.
And(r5, sp, 0x7);
if (core_count == 5) {
// One 32 bit argument (reg4) has been left on the stack => align the
// stack
// before the argument.
Pop(r0);
Sub(sp, sp, r5);
Push(r0);
} else {
Sub(sp, sp, r5);
}
// Select the right trampoline depending on the arguments.
uintptr_t address;
switch (printf_type) {
case 0:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);
break;
case 1:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRR);
break;
case 2:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRR);
break;
case 3:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRR);
break;
case 4:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDR);
break;
case 5:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDR);
break;
case 6:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDR);
break;
case 7:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDR);
break;
case 8:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRD);
break;
case 9:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRD);
break;
case 10:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRD);
break;
case 11:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRD);
break;
case 12:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDD);
break;
case 13:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDD);
break;
case 14:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDD);
break;
case 15:
address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDD);
break;
default:
VIXL_UNREACHABLE();
address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);
break;
}
StringLiteral* format_literal =
new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);
Adr(r0, format_literal);
Mov(ip, Operand::From(address));
Blx(ip);
// If register reg4 was left on the stack => skip it.
if (core_count == 5) Drop(kRegSizeInBytes);
// Restore the stack as it was before alignment.
Add(sp, sp, r5);
// Restore the flags.
Pop(tmp);
Vmsr(FPSCR, tmp);
Pop(tmp);
Msr(APSR_nzcvqg, tmp);
// Restore the regsisters.
if (Has32DRegs()) Vpop(Untyped64, DRegisterList(d16, 16));
Vpop(Untyped64, DRegisterList(d0, 8));
Pop(RegisterList(saved_registers_mask));
}
}
void MacroAssembler::PushRegister(CPURegister reg) {
switch (reg.GetType()) {
case CPURegister::kNoRegister:
break;
case CPURegister::kRRegister:
Push(Register(reg.GetCode()));
break;
case CPURegister::kSRegister:
Vpush(Untyped32, SRegisterList(SRegister(reg.GetCode())));
break;
case CPURegister::kDRegister:
Vpush(Untyped64, DRegisterList(DRegister(reg.GetCode())));
break;
case CPURegister::kQRegister:
VIXL_UNIMPLEMENTED();
break;
}
}
void MacroAssembler::PreparePrintfArgument(CPURegister reg,
int* core_count,
int* vfp_count,
uint32_t* printf_type) {
switch (reg.GetType()) {
case CPURegister::kNoRegister:
break;
case CPURegister::kRRegister:
VIXL_ASSERT(*core_count <= 4);
if (*core_count < 4) Pop(Register(*core_count));
*core_count += 1;
break;
case CPURegister::kSRegister:
VIXL_ASSERT(*vfp_count < 4);
*printf_type |= 1 << (*core_count + *vfp_count - 1);
Vpop(Untyped32, SRegisterList(SRegister(*vfp_count * 2)));
Vcvt(F64, F32, DRegister(*vfp_count), SRegister(*vfp_count * 2));
*vfp_count += 1;
break;
case CPURegister::kDRegister:
VIXL_ASSERT(*vfp_count < 4);
*printf_type |= 1 << (*core_count + *vfp_count - 1);
Vpop(Untyped64, DRegisterList(DRegister(*vfp_count)));
*vfp_count += 1;
break;
case CPURegister::kQRegister:
VIXL_UNIMPLEMENTED();
break;
}
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondROp instruction,
Condition cond,
Register rn,
const Operand& operand) {
VIXL_ASSERT((type == kMovt) || (type == kSxtb16) || (type == kTeq) ||
(type == kUxtb16));
if (type == kMovt) {
VIXL_ABORT_WITH_MSG("`Movt` expects a 16-bit immediate.\n");
}
// This delegate only supports teq with immediates.
CONTEXT_SCOPE;
if ((type == kTeq) && operand.IsImmediate()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
HandleOutOfBoundsImmediate(cond, scratch, operand.GetImmediate());
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
teq(cond, rn, scratch);
return;
}
Assembler::Delegate(type, instruction, cond, rn, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondSizeROp instruction,
Condition cond,
EncodingSize size,
Register rn,
const Operand& operand) {
CONTEXT_SCOPE;
VIXL_ASSERT(size.IsBest());
VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) ||
(type == kMovs) || (type == kMvn) || (type == kMvns) ||
(type == kSxtb) || (type == kSxth) || (type == kTst) ||
(type == kUxtb) || (type == kUxth));
if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
VIXL_ASSERT((type != kMov) || (type != kMovs));
InstructionCondRROp shiftop = NULL;
switch (operand.GetShift().GetType()) {
case LSL:
shiftop = &Assembler::lsl;
break;
case LSR:
shiftop = &Assembler::lsr;
break;
case ASR:
shiftop = &Assembler::asr;
break;
case RRX:
// A RegisterShiftedRegister operand cannot have a shift of type RRX.
VIXL_UNREACHABLE();
break;
case ROR:
shiftop = &Assembler::ror;
break;
default:
VIXL_UNREACHABLE();
}
if (shiftop != NULL) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
(this->*shiftop)(cond,
scratch,
operand.GetBaseRegister(),
operand.GetShiftRegister());
(this->*instruction)(cond, size, rn, scratch);
return;
}
}
if (operand.IsImmediate()) {
uint32_t imm = operand.GetImmediate();
switch (type) {
case kMov:
case kMovs:
if (!rn.IsPC()) {
// Immediate is too large, but not using PC, so handle with mov{t}.
HandleOutOfBoundsImmediate(cond, rn, imm);
if (type == kMovs) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
tst(cond, rn, rn);
}
return;
} else if (type == kMov) {
VIXL_ASSERT(IsUsingA32() || cond.Is(al));
// Immediate is too large and using PC, so handle using a temporary
// register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
HandleOutOfBoundsImmediate(al, scratch, imm);
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
bx(cond, scratch);
return;
}
break;
case kCmn:
case kCmp:
if (IsUsingA32() || !rn.IsPC()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
HandleOutOfBoundsImmediate(cond, scratch, imm);
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rn, scratch);
return;
}
break;
case kMvn:
case kMvns:
if (!rn.IsPC()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
HandleOutOfBoundsImmediate(cond, scratch, imm);
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rn, scratch);
return;
}
break;
case kTst:
if (IsUsingA32() || !rn.IsPC()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
HandleOutOfBoundsImmediate(cond, scratch, imm);
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rn, scratch);
return;
}
break;
default: // kSxtb, Sxth, Uxtb, Uxth
break;
}
}
Assembler::Delegate(type, instruction, cond, size, rn, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondRROp instruction,
Condition cond,
Register rd,
Register rn,
const Operand& operand) {
if ((type == kSxtab) || (type == kSxtab16) || (type == kSxtah) ||
(type == kUxtab) || (type == kUxtab16) || (type == kUxtah) ||
(type == kPkhbt) || (type == kPkhtb)) {
UnimplementedDelegate(type);
return;
}
// This delegate only handles the following instructions.
VIXL_ASSERT((type == kOrn) || (type == kOrns) || (type == kRsc) ||
(type == kRscs));
CONTEXT_SCOPE;
// T32 does not support register shifted register operands, emulate it.
if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
InstructionCondRROp shiftop = NULL;
switch (operand.GetShift().GetType()) {
case LSL:
shiftop = &Assembler::lsl;
break;
case LSR:
shiftop = &Assembler::lsr;
break;
case ASR:
shiftop = &Assembler::asr;
break;
case RRX:
// A RegisterShiftedRegister operand cannot have a shift of type RRX.
VIXL_UNREACHABLE();
break;
case ROR:
shiftop = &Assembler::ror;
break;
default:
VIXL_UNREACHABLE();
}
if (shiftop != NULL) {
UseScratchRegisterScope temps(this);
Register rm = operand.GetBaseRegister();
Register rs = operand.GetShiftRegister();
// Try to use rd as a scratch register. We can do this if it aliases rs or
// rm (because we read them in the first instruction), but not rn.
if (!rd.Is(rn)) temps.Include(rd);
Register scratch = temps.Acquire();
// TODO: The scope length was measured empirically. We should analyse the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
(this->*shiftop)(cond, scratch, rm, rs);
(this->*instruction)(cond, rd, rn, scratch);
return;
}
}
// T32 does not have a Rsc instruction, negate the lhs input and turn it into
// an Adc. Adc and Rsc are equivalent using a bitwise NOT:
// adc rd, rn, operand <-> rsc rd, NOT(rn), operand
if (IsUsingT32() && ((type == kRsc) || (type == kRscs))) {
// The RegisterShiftRegister case should have been handled above.
VIXL_ASSERT(!operand.IsRegisterShiftedRegister());
UseScratchRegisterScope temps(this);
// Try to use rd as a scratch register. We can do this if it aliases rn
// (because we read it in the first instruction), but not rm.
temps.Include(rd);
temps.Exclude(operand);
Register negated_rn = temps.Acquire();
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
mvn(cond, negated_rn, rn);
}
if (type == kRsc) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
adc(cond, rd, negated_rn, operand);
return;
}
// TODO: We shouldn't have to specify how much space the next instruction
// needs.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
adcs(cond, rd, negated_rn, operand);
return;
}
if (operand.IsImmediate()) {
// If the immediate can be encoded when inverted, turn Orn into Orr.
// Otherwise rely on HandleOutOfBoundsImmediate to generate a series of
// mov.
int32_t imm = operand.GetSignedImmediate();
if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
switch (type) {
case kOrn:
orr(cond, rd, rn, ~imm);
return;
case kOrns:
orrs(cond, rd, rn, ~imm);
return;
default:
VIXL_UNREACHABLE();
break;
}
}
}
// A32 does not have a Orn instruction, negate the rhs input and turn it into
// a Orr.
if (IsUsingA32() && ((type == kOrn) || (type == kOrns))) {
// TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable
// mvn r0, r2
// orr r0, r1, r0
Register scratch;
UseScratchRegisterScope temps(this);
// Try to use rd as a scratch register. We can do this if it aliases rs or
// rm (because we read them in the first instruction), but not rn.
if (!rd.Is(rn)) temps.Include(rd);
scratch = temps.Acquire();
{
// TODO: We shouldn't have to specify how much space the next instruction
// needs.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
mvn(cond, scratch, operand);
}
if (type == kOrns) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
orrs(cond, rd, rn, scratch);
return;
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
orr(cond, rd, rn, scratch);
return;
}
if (operand.IsImmediate()) {
UseScratchRegisterScope temps(this);
// Allow using the destination as a scratch register if possible.
if (!rd.Is(rn)) temps.Include(rd);
Register scratch = temps.Acquire();
int32_t imm = operand.GetSignedImmediate();
HandleOutOfBoundsImmediate(cond, scratch, imm);
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, rd, rn, scratch);
return;
}
Assembler::Delegate(type, instruction, cond, rd, rn, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondSizeRL instruction,
Condition cond,
EncodingSize size,
Register rd,
Location* location) {
VIXL_ASSERT((type == kLdr) || (type == kAdr));
CONTEXT_SCOPE;
VIXL_ASSERT(size.IsBest());
if ((type == kLdr) && location->IsBound()) {
CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
UseScratchRegisterScope temps(this);
temps.Include(rd);
uint32_t mask = GetOffsetMask(type, Offset);
ldr(rd, MemOperandComputationHelper(cond, temps.Acquire(), location, mask));
return;
}
Assembler::Delegate(type, instruction, cond, size, rd, location);
}
bool MacroAssembler::GenerateSplitInstruction(
InstructionCondSizeRROp instruction,
Condition cond,
Register rd,
Register rn,
uint32_t imm,
uint32_t mask) {
uint32_t high = imm & ~mask;
if (!IsModifiedImmediate(high) && !rn.IsPC()) return false;
// If high is a modified immediate, we can perform the operation with
// only 2 instructions.
// Else, if rn is PC, we want to avoid moving PC into a temporary.
// Therefore, we also use the pattern even if the second call may
// generate 3 instructions.
uint32_t low = imm & mask;
CodeBufferCheckScope scope(this,
(rn.IsPC() ? 4 : 2) * kMaxInstructionSizeInBytes);
(this->*instruction)(cond, Best, rd, rn, low);
(this->*instruction)(cond, Best, rd, rd, high);
return true;
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondSizeRROp instruction,
Condition cond,
EncodingSize size,
Register rd,
Register rn,
const Operand& operand) {
VIXL_ASSERT(
(type == kAdc) || (type == kAdcs) || (type == kAdd) || (type == kAdds) ||
(type == kAnd) || (type == kAnds) || (type == kAsr) || (type == kAsrs) ||
(type == kBic) || (type == kBics) || (type == kEor) || (type == kEors) ||
(type == kLsl) || (type == kLsls) || (type == kLsr) || (type == kLsrs) ||
(type == kOrr) || (type == kOrrs) || (type == kRor) || (type == kRors) ||
(type == kRsb) || (type == kRsbs) || (type == kSbc) || (type == kSbcs) ||
(type == kSub) || (type == kSubs));
CONTEXT_SCOPE;
VIXL_ASSERT(size.IsBest());
if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
InstructionCondRROp shiftop = NULL;
switch (operand.GetShift().GetType()) {
case LSL:
shiftop = &Assembler::lsl;
break;
case LSR:
shiftop = &Assembler::lsr;
break;
case ASR:
shiftop = &Assembler::asr;
break;
case RRX:
// A RegisterShiftedRegister operand cannot have a shift of type RRX.
VIXL_UNREACHABLE();
break;
case ROR:
shiftop = &Assembler::ror;
break;
default:
VIXL_UNREACHABLE();
}
if (shiftop != NULL) {
UseScratchRegisterScope temps(this);
Register rm = operand.GetBaseRegister();
Register rs = operand.GetShiftRegister();
// Try to use rd as a scratch register. We can do this if it aliases rs or
// rm (because we read them in the first instruction), but not rn.
if (!rd.Is(rn)) temps.Include(rd);
Register scratch = temps.Acquire();
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
(this->*shiftop)(cond, scratch, rm, rs);
(this->*instruction)(cond, size, rd, rn, scratch);
return;
}
}
if (operand.IsImmediate()) {
int32_t imm = operand.GetSignedImmediate();
if (ImmediateT32::IsImmediateT32(~imm)) {
if (IsUsingT32()) {
switch (type) {
case kOrr:
orn(cond, rd, rn, ~imm);
return;
case kOrrs:
orns(cond, rd, rn, ~imm);
return;
default:
break;
}
}
}
if (imm < 0) {
InstructionCondSizeRROp asmcb = NULL;
// Add and sub are equivalent using an arithmetic negation:
// add rd, rn, #imm <-> sub rd, rn, - #imm
// Add and sub with carry are equivalent using a bitwise NOT:
// adc rd, rn, #imm <-> sbc rd, rn, NOT #imm
switch (type) {
case kAdd:
asmcb = &Assembler::sub;
imm = -imm;
break;
case kAdds:
asmcb = &Assembler::subs;
imm = -imm;
break;
case kSub:
asmcb = &Assembler::add;
imm = -imm;
break;
case kSubs:
asmcb = &Assembler::adds;
imm = -imm;
break;
case kAdc:
asmcb = &Assembler::sbc;
imm = ~imm;
break;
case kAdcs:
asmcb = &Assembler::sbcs;
imm = ~imm;
break;
case kSbc:
asmcb = &Assembler::adc;
imm = ~imm;
break;
case kSbcs:
asmcb = &Assembler::adcs;
imm = ~imm;
break;
default:
break;
}
if (asmcb != NULL) {
CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes);
(this->*asmcb)(cond, size, rd, rn, Operand(imm));
return;
}
}
// When rn is PC, only handle negative offsets. The correct way to handle
// positive offsets isn't clear; does the user want the offset from the
// start of the macro, or from the end (to allow a certain amount of space)?
// When type is Add or Sub, imm is always positive (imm < 0 has just been
// handled and imm == 0 would have been generated without the need of a
// delegate). Therefore, only add to PC is forbidden here.
if ((((type == kAdd) && !rn.IsPC()) || (type == kSub)) &&
(IsUsingA32() || (!rd.IsPC() && !rn.IsPC()))) {
VIXL_ASSERT(imm > 0);
// Try to break the constant into two modified immediates.
// For T32 also try to break the constant into one imm12 and one modified
// immediate. Count the trailing zeroes and get the biggest even value.
int trailing_zeroes = CountTrailingZeros(imm) & ~1u;
uint32_t mask = ((trailing_zeroes < 4) && IsUsingT32())
? 0xfff
: (0xff << trailing_zeroes);
if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) {
return;
}
InstructionCondSizeRROp asmcb = NULL;
switch (type) {
case kAdd:
asmcb = &Assembler::sub;
break;
case kSub:
asmcb = &Assembler::add;
break;
default:
VIXL_UNREACHABLE();
}
if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) {
return;
}
}
UseScratchRegisterScope temps(this);
// Allow using the destination as a scratch register if possible.
if (!rd.Is(rn)) temps.Include(rd);
if (rn.IsPC()) {
// If we're reading the PC, we need to do it in the first instruction,
// otherwise we'll read the wrong value. We rely on this to handle the
// long-range PC-relative MemOperands which can result from user-managed
// literals.
// Only handle negative offsets. The correct way to handle positive
// offsets isn't clear; does the user want the offset from the start of
// the macro, or from the end (to allow a certain amount of space)?
bool offset_is_negative_or_zero = (imm <= 0);
switch (type) {
case kAdd:
case kAdds:
offset_is_negative_or_zero = (imm <= 0);
break;
case kSub:
case kSubs:
offset_is_negative_or_zero = (imm >= 0);
break;
case kAdc:
case kAdcs:
offset_is_negative_or_zero = (imm < 0);
break;
case kSbc:
case kSbcs:
offset_is_negative_or_zero = (imm > 0);
break;
default:
break;
}
if (offset_is_negative_or_zero) {
{
rn = temps.Acquire();
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
mov(cond, rn, pc);
}
// Recurse rather than falling through, to try to get the immediate into
// a single instruction.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rd, rn, operand);
return;
}
} else {
Register scratch = temps.Acquire();
// TODO: The scope length was measured empirically. We should analyse the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
mov(cond, scratch, operand.GetImmediate());
(this->*instruction)(cond, size, rd, rn, scratch);
return;
}
}
Assembler::Delegate(type, instruction, cond, size, rd, rn, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionRL instruction,
Register rn,
Location* location) {
VIXL_ASSERT((type == kCbz) || (type == kCbnz));
CONTEXT_SCOPE;
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
if (IsUsingA32()) {
if (type == kCbz) {
VIXL_ABORT_WITH_MSG("Cbz is only available for T32.\n");
} else {
VIXL_ABORT_WITH_MSG("Cbnz is only available for T32.\n");
}
} else if (rn.IsLow()) {
switch (type) {
case kCbnz: {
Label done;
cbz(rn, &done);
b(location);
Bind(&done);
return;
}
case kCbz: {
Label done;
cbnz(rn, &done);
b(location);
Bind(&done);
return;
}
default:
break;
}
}
Assembler::Delegate(type, instruction, rn, location);
}
template <typename T>
static inline bool IsI64BitPattern(T imm) {
for (T mask = 0xff << ((sizeof(T) - 1) * 8); mask != 0; mask >>= 8) {
if (((imm & mask) != mask) && ((imm & mask) != 0)) return false;
}
return true;
}
template <typename T>
static inline bool IsI8BitPattern(T imm) {
uint8_t imm8 = imm & 0xff;
for (unsigned rep = sizeof(T) - 1; rep > 0; rep--) {
imm >>= 8;
if ((imm & 0xff) != imm8) return false;
}
return true;
}
static inline bool CanBeInverted(uint32_t imm32) {
uint32_t fill8 = 0;
if ((imm32 & 0xffffff00) == 0xffffff00) {
// 11111111 11111111 11111111 abcdefgh
return true;
}
if (((imm32 & 0xff) == 0) || ((imm32 & 0xff) == 0xff)) {
fill8 = imm32 & 0xff;
imm32 >>= 8;
if ((imm32 >> 8) == 0xffff) {
// 11111111 11111111 abcdefgh 00000000
// or 11111111 11111111 abcdefgh 11111111
return true;
}
if ((imm32 & 0xff) == fill8) {
imm32 >>= 8;
if ((imm32 >> 8) == 0xff) {
// 11111111 abcdefgh 00000000 00000000
// or 11111111 abcdefgh 11111111 11111111
return true;
}
if ((fill8 == 0xff) && ((imm32 & 0xff) == 0xff)) {
// abcdefgh 11111111 11111111 11111111
return true;
}
}
}
return false;
}
template <typename RES, typename T>
static inline RES replicate(T imm) {
VIXL_ASSERT((sizeof(RES) > sizeof(T)) &&
(((sizeof(RES) / sizeof(T)) * sizeof(T)) == sizeof(RES)));
RES res = imm;
for (unsigned i = sizeof(RES) / sizeof(T) - 1; i > 0; i--) {
res = (res << (sizeof(T) * 8)) | imm;
}
return res;
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtSSop instruction,
Condition cond,
DataType dt,
SRegister rd,
const SOperand& operand) {
CONTEXT_SCOPE;
if (type == kVmov) {
if (operand.IsImmediate() && dt.Is(F32)) {
const NeonImmediate& neon_imm = operand.GetNeonImmediate();
if (neon_imm.CanConvert<float>()) {
// movw ip, imm16
// movk ip, imm16
// vmov s0, ip
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
float f = neon_imm.GetImmediate<float>();
// TODO: The scope length was measured empirically. We should analyse
// the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
mov(cond, scratch, FloatToRawbits(f));
vmov(cond, rd, scratch);
return;
}
}
}
Assembler::Delegate(type, instruction, cond, dt, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtDDop instruction,
Condition cond,
DataType dt,
DRegister rd,
const DOperand& operand) {
CONTEXT_SCOPE;
if (type == kVmov) {
if (operand.IsImmediate()) {
const NeonImmediate& neon_imm = operand.GetNeonImmediate();
switch (dt.GetValue()) {
case I32:
if (neon_imm.CanConvert<uint32_t>()) {
uint32_t imm = neon_imm.GetImmediate<uint32_t>();
// vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
// vmov.i32 d0, 0xff0000ff will translate into
// vmov.i64 d0, 0xff0000ffff0000ff
if (IsI64BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I64, rd, replicate<uint64_t>(imm));
return;
}
// vmov.i32 d0, 0xffab0000 will translate into
// vmvn.i32 d0, 0x0054ffff
if (cond.Is(al) && CanBeInverted(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmvn(I32, rd, ~imm);
return;
}
}
break;
case I16:
if (neon_imm.CanConvert<uint16_t>()) {
uint16_t imm = neon_imm.GetImmediate<uint16_t>();
// vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
}
break;
case I64:
if (neon_imm.CanConvert<uint64_t>()) {
uint64_t imm = neon_imm.GetImmediate<uint64_t>();
// vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
// mov ip, lo(imm64)
// vdup d0, ip
// vdup is prefered to 'vmov d0[0]' as d0[1] does not need to be
// preserved
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
// TODO: The scope length was measured empirically. We should
// analyse the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this,
2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vdup(cond, Untyped32, rd, scratch);
}
// mov ip, hi(imm64)
// vmov d0[1], ip
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
// TODO: The scope length was measured empirically. We should
// analyse the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this,
2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, Untyped32, DRegisterLane(rd, 1), scratch);
}
return;
}
break;
default:
break;
}
VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already.
if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {
// mov ip, imm32
// vdup.16 d0, ip
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());
}
DataTypeValue vdup_dt = Untyped32;
switch (dt.GetValue()) {
case I16:
vdup_dt = Untyped16;
break;
case I32:
vdup_dt = Untyped32;
break;
default:
VIXL_UNREACHABLE();
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vdup(cond, vdup_dt, rd, scratch);
return;
}
if (dt.Is(F32) && neon_imm.CanConvert<float>()) {
float f = neon_imm.GetImmediate<float>();
// Punt to vmov.i32
// TODO: The scope length was guessed based on the double case below. We
// should analyse the worst-case size and add targetted tests.
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
vmov(cond, I32, rd, FloatToRawbits(f));
return;
}
if (dt.Is(F64) && neon_imm.CanConvert<double>()) {
// Punt to vmov.i64
double d = neon_imm.GetImmediate<double>();
// TODO: The scope length was measured empirically. We should analyse
// the
// worst-case size and add targetted tests.
CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);
vmov(cond, I64, rd, DoubleToRawbits(d));
return;
}
}
}
Assembler::Delegate(type, instruction, cond, dt, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtQQop instruction,
Condition cond,
DataType dt,
QRegister rd,
const QOperand& operand) {
CONTEXT_SCOPE;
if (type == kVmov) {
if (operand.IsImmediate()) {
const NeonImmediate& neon_imm = operand.GetNeonImmediate();
switch (dt.GetValue()) {
case I32:
if (neon_imm.CanConvert<uint32_t>()) {
uint32_t imm = neon_imm.GetImmediate<uint32_t>();
// vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
// vmov.i32 d0, 0xff0000ff will translate into
// vmov.i64 d0, 0xff0000ffff0000ff
if (IsI64BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I64, rd, replicate<uint64_t>(imm));
return;
}
// vmov.i32 d0, 0xffab0000 will translate into
// vmvn.i32 d0, 0x0054ffff
if (CanBeInverted(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmvn(cond, I32, rd, ~imm);
return;
}
}
break;
case I16:
if (neon_imm.CanConvert<uint16_t>()) {
uint16_t imm = neon_imm.GetImmediate<uint16_t>();
// vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
}
break;
case I64:
if (neon_imm.CanConvert<uint64_t>()) {
uint64_t imm = neon_imm.GetImmediate<uint64_t>();
// vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff
if (IsI8BitPattern(imm)) {
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, I8, rd, imm & 0xff);
return;
}
// mov ip, lo(imm64)
// vdup q0, ip
// vdup is prefered to 'vmov d0[0]' as d0[1-3] don't need to be
// preserved
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
CodeBufferCheckScope scope(this,
2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vdup(cond, Untyped32, rd, scratch);
}
// mov ip, hi(imm64)
// vmov.i32 d0[1], ip
// vmov d1, d0
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
CodeBufferCheckScope scope(this,
2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond,
Untyped32,
DRegisterLane(rd.GetLowDRegister(), 1),
scratch);
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());
}
return;
}
break;
default:
break;
}
VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already.
if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {
// mov ip, imm32
// vdup.16 d0, ip
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());
}
DataTypeValue vdup_dt = Untyped32;
switch (dt.GetValue()) {
case I16:
vdup_dt = Untyped16;
break;
case I32:
vdup_dt = Untyped32;
break;
default:
VIXL_UNREACHABLE();
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
vdup(cond, vdup_dt, rd, scratch);
return;
}
if (dt.Is(F32) && neon_imm.CanConvert<float>()) {
// Punt to vmov.i64
float f = neon_imm.GetImmediate<float>();
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
vmov(cond, I32, rd, FloatToRawbits(f));
return;
}
if (dt.Is(F64) && neon_imm.CanConvert<double>()) {
// Use vmov to create the double in the low D register, then duplicate
// it into the high D register.
double d = neon_imm.GetImmediate<double>();
CodeBufferCheckScope scope(this, 7 * kMaxInstructionSizeInBytes);
vmov(cond, F64, rd.GetLowDRegister(), d);
vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());
return;
}
}
}
Assembler::Delegate(type, instruction, cond, dt, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondRL instruction,
Condition cond,
Register rt,
Location* location) {
VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) ||
(type == kLdrsh));
CONTEXT_SCOPE;
if (location->IsBound()) {
CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
UseScratchRegisterScope temps(this);
temps.Include(rt);
Register scratch = temps.Acquire();
uint32_t mask = GetOffsetMask(type, Offset);
switch (type) {
case kLdrb:
ldrb(rt, MemOperandComputationHelper(cond, scratch, location, mask));
return;
case kLdrh:
ldrh(rt, MemOperandComputationHelper(cond, scratch, location, mask));
return;
case kLdrsb:
ldrsb(rt, MemOperandComputationHelper(cond, scratch, location, mask));
return;
case kLdrsh:
ldrsh(rt, MemOperandComputationHelper(cond, scratch, location, mask));
return;
default:
VIXL_UNREACHABLE();
}
return;
}
Assembler::Delegate(type, instruction, cond, rt, location);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondRRL instruction,
Condition cond,
Register rt,
Register rt2,
Location* location) {
VIXL_ASSERT(type == kLdrd);
CONTEXT_SCOPE;
if (location->IsBound()) {
CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);
UseScratchRegisterScope temps(this);
temps.Include(rt, rt2);
Register scratch = temps.Acquire();
uint32_t mask = GetOffsetMask(type, Offset);
ldrd(rt, rt2, MemOperandComputationHelper(cond, scratch, location, mask));
return;
}
Assembler::Delegate(type, instruction, cond, rt, rt2, location);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondSizeRMop instruction,
Condition cond,
EncodingSize size,
Register rd,
const MemOperand& operand) {
CONTEXT_SCOPE;
VIXL_ASSERT(size.IsBest());
VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) ||
(type == kLdrsb) || (type == kLdrsh) || (type == kStr) ||
(type == kStrb) || (type == kStrh));
if (operand.IsImmediate()) {
const Register& rn = operand.GetBaseRegister();
AddrMode addrmode = operand.GetAddrMode();
int32_t offset = operand.GetOffsetImmediate();
uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);
// Try to maximize the offset used by the MemOperand (load_store_offset).
// Add the part which can't be used by the MemOperand (add_offset).
uint32_t load_store_offset = offset & extra_offset_mask;
uint32_t add_offset = offset & ~extra_offset_mask;
if ((add_offset != 0) &&
(IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
load_store_offset = 0;
add_offset = offset;
}
switch (addrmode) {
case PreIndex:
// Avoid the unpredictable case 'str r0, [r0, imm]!'
if (!rn.Is(rd)) {
// Pre-Indexed case:
// ldr r0, [r1, 12345]! will translate into
// add r1, r1, 12345
// ldr r0, [r1]
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, add_offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
size,
rd,
MemOperand(rn, load_store_offset, PreIndex));
}
return;
}
break;
case Offset: {
UseScratchRegisterScope temps(this);
// Allow using the destination as a scratch register if possible.
if ((type != kStr) && (type != kStrb) && (type != kStrh) &&
!rd.Is(rn)) {
temps.Include(rd);
}
Register scratch = temps.Acquire();
// Offset case:
// ldr r0, [r1, 12345] will translate into
// add r0, r1, 12345
// ldr r0, [r0]
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, scratch, rn, add_offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
size,
rd,
MemOperand(scratch, load_store_offset));
}
return;
}
case PostIndex:
// Avoid the unpredictable case 'ldr r0, [r0], imm'
if (!rn.Is(rd)) {
// Post-indexed case:
// ldr r0. [r1], imm32 will translate into
// ldr r0, [r1]
// movw ip. imm32 & 0xffffffff
// movt ip, imm32 >> 16
// add r1, r1, ip
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
size,
rd,
MemOperand(rn, load_store_offset, PostIndex));
}
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, add_offset);
}
return;
}
break;
}
} else if (operand.IsPlainRegister()) {
const Register& rn = operand.GetBaseRegister();
AddrMode addrmode = operand.GetAddrMode();
const Register& rm = operand.GetOffsetRegister();
if (rm.IsPC()) {
VIXL_ABORT_WITH_MSG(
"The MacroAssembler does not convert loads and stores with a PC "
"offset register.\n");
}
if (rn.IsPC()) {
if (addrmode == Offset) {
if (IsUsingT32()) {
VIXL_ABORT_WITH_MSG(
"The MacroAssembler does not convert loads and stores with a PC "
"base register for T32.\n");
}
} else {
VIXL_ABORT_WITH_MSG(
"The MacroAssembler does not convert loads and stores with a PC "
"base register in pre-index or post-index mode.\n");
}
}
switch (addrmode) {
case PreIndex:
// Avoid the unpredictable case 'str r0, [r0, imm]!'
if (!rn.Is(rd)) {
// Pre-Indexed case:
// ldr r0, [r1, r2]! will translate into
// add r1, r1, r2
// ldr r0, [r1]
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, rn, rn, rm);
} else {
sub(cond, rn, rn, rm);
}
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rd, MemOperand(rn, Offset));
}
return;
}
break;
case Offset: {
UseScratchRegisterScope temps(this);
// Allow using the destination as a scratch register if this is not a
// store.
// Avoid using PC as a temporary as this has side-effects.
if ((type != kStr) && (type != kStrb) && (type != kStrh) &&
!rd.IsPC()) {
temps.Include(rd);
}
Register scratch = temps.Acquire();
// Offset case:
// ldr r0, [r1, r2] will translate into
// add r0, r1, r2
// ldr r0, [r0]
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, scratch, rn, rm);
} else {
sub(cond, scratch, rn, rm);
}
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rd, MemOperand(scratch, Offset));
}
return;
}
case PostIndex:
// Avoid the unpredictable case 'ldr r0, [r0], imm'
if (!rn.Is(rd)) {
// Post-indexed case:
// ldr r0. [r1], r2 will translate into
// ldr r0, [r1]
// add r1, r1, r2
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, size, rd, MemOperand(rn, Offset));
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, rn, rn, rm);
} else {
sub(cond, rn, rn, rm);
}
}
return;
}
break;
}
}
Assembler::Delegate(type, instruction, cond, size, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondRRMop instruction,
Condition cond,
Register rt,
Register rt2,
const MemOperand& operand) {
if ((type == kLdaexd) || (type == kLdrexd) || (type == kStlex) ||
(type == kStlexb) || (type == kStlexh) || (type == kStrex) ||
(type == kStrexb) || (type == kStrexh)) {
UnimplementedDelegate(type);
return;
}
VIXL_ASSERT((type == kLdrd) || (type == kStrd));
CONTEXT_SCOPE;
// TODO: Should we allow these cases?
if (IsUsingA32()) {
// The first register needs to be even.
if ((rt.GetCode() & 1) != 0) {
UnimplementedDelegate(type);
return;
}
// Registers need to be adjacent.
if (((rt.GetCode() + 1) % kNumberOfRegisters) != rt2.GetCode()) {
UnimplementedDelegate(type);
return;
}
// LDRD lr, pc [...] is not allowed.
if (rt.Is(lr)) {
UnimplementedDelegate(type);
return;
}
}
if (operand.IsImmediate()) {
const Register& rn = operand.GetBaseRegister();
AddrMode addrmode = operand.GetAddrMode();
int32_t offset = operand.GetOffsetImmediate();
uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);
// Try to maximize the offset used by the MemOperand (load_store_offset).
// Add the part which can't be used by the MemOperand (add_offset).
uint32_t load_store_offset = offset & extra_offset_mask;
uint32_t add_offset = offset & ~extra_offset_mask;
if ((add_offset != 0) &&
(IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
load_store_offset = 0;
add_offset = offset;
}
switch (addrmode) {
case PreIndex: {
// Allow using the destinations as a scratch registers if possible.
UseScratchRegisterScope temps(this);
if (type == kLdrd) {
if (!rt.Is(rn)) temps.Include(rt);
if (!rt2.Is(rn)) temps.Include(rt2);
}
// Pre-Indexed case:
// ldrd r0, r1, [r2, 12345]! will translate into
// add r2, 12345
// ldrd r0, r1, [r2]
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, add_offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
rt,
rt2,
MemOperand(rn, load_store_offset, PreIndex));
}
return;
}
case Offset: {
UseScratchRegisterScope temps(this);
// Allow using the destinations as a scratch registers if possible.
if (type == kLdrd) {
if (!rt.Is(rn)) temps.Include(rt);
if (!rt2.Is(rn)) temps.Include(rt2);
}
Register scratch = temps.Acquire();
// Offset case:
// ldrd r0, r1, [r2, 12345] will translate into
// add r0, r2, 12345
// ldrd r0, r1, [r0]
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, scratch, rn, add_offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
rt,
rt2,
MemOperand(scratch, load_store_offset));
}
return;
}
case PostIndex:
// Avoid the unpredictable case 'ldrd r0, r1, [r0], imm'
if (!rn.Is(rt) && !rn.Is(rt2)) {
// Post-indexed case:
// ldrd r0, r1, [r2], imm32 will translate into
// ldrd r0, r1, [r2]
// movw ip. imm32 & 0xffffffff
// movt ip, imm32 >> 16
// add r2, ip
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond,
rt,
rt2,
MemOperand(rn, load_store_offset, PostIndex));
}
{
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, add_offset);
}
return;
}
break;
}
}
if (operand.IsPlainRegister()) {
const Register& rn = operand.GetBaseRegister();
const Register& rm = operand.GetOffsetRegister();
AddrMode addrmode = operand.GetAddrMode();
switch (addrmode) {
case PreIndex:
// ldrd r0, r1, [r2, r3]! will translate into
// add r2, r3
// ldrd r0, r1, [r2]
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, rn, rn, rm);
} else {
sub(cond, rn, rn, rm);
}
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));
}
return;
case PostIndex:
// ldrd r0, r1, [r2], r3 will translate into
// ldrd r0, r1, [r2]
// add r2, r3
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, rn, rn, rm);
} else {
sub(cond, rn, rn, rm);
}
}
return;
case Offset: {
UseScratchRegisterScope temps(this);
// Allow using the destinations as a scratch registers if possible.
if (type == kLdrd) {
if (!rt.Is(rn)) temps.Include(rt);
if (!rt2.Is(rn)) temps.Include(rt2);
}
Register scratch = temps.Acquire();
// Offset case:
// ldrd r0, r1, [r2, r3] will translate into
// add r0, r2, r3
// ldrd r0, r1, [r0]
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
if (operand.GetSign().IsPlus()) {
add(cond, scratch, rn, rm);
} else {
sub(cond, scratch, rn, rm);
}
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, rt, rt2, MemOperand(scratch, Offset));
}
return;
}
}
}
Assembler::Delegate(type, instruction, cond, rt, rt2, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtSMop instruction,
Condition cond,
DataType dt,
SRegister rd,
const MemOperand& operand) {
CONTEXT_SCOPE;
if (operand.IsImmediate()) {
const Register& rn = operand.GetBaseRegister();
AddrMode addrmode = operand.GetAddrMode();
int32_t offset = operand.GetOffsetImmediate();
VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||
((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));
if (rn.IsPC()) {
VIXL_ABORT_WITH_MSG(
"The MacroAssembler does not convert vldr or vstr with a PC base "
"register.\n");
}
switch (addrmode) {
case PreIndex:
// Pre-Indexed case:
// vldr.32 s0, [r1, 12345]! will translate into
// add r1, 12345
// vldr.32 s0, [r1]
if (offset != 0) {
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
}
return;
case Offset: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Offset case:
// vldr.32 s0, [r1, 12345] will translate into
// add ip, r1, 12345
// vldr.32 s0, [ip]
{
VIXL_ASSERT(offset != 0);
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, scratch, rn, offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));
}
return;
}
case PostIndex:
// Post-indexed case:
// vldr.32 s0, [r1], imm32 will translate into
// vldr.32 s0, [r1]
// movw ip. imm32 & 0xffffffff
// movt ip, imm32 >> 16
// add r1, ip
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
}
if (offset != 0) {
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, offset);
}
return;
}
}
Assembler::Delegate(type, instruction, cond, dt, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtDMop instruction,
Condition cond,
DataType dt,
DRegister rd,
const MemOperand& operand) {
CONTEXT_SCOPE;
if (operand.IsImmediate()) {
const Register& rn = operand.GetBaseRegister();
AddrMode addrmode = operand.GetAddrMode();
int32_t offset = operand.GetOffsetImmediate();
VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||
((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));
if (rn.IsPC()) {
VIXL_ABORT_WITH_MSG(
"The MacroAssembler does not convert vldr or vstr with a PC base "
"register.\n");
}
switch (addrmode) {
case PreIndex:
// Pre-Indexed case:
// vldr.64 d0, [r1, 12345]! will translate into
// add r1, 12345
// vldr.64 d0, [r1]
if (offset != 0) {
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
}
return;
case Offset: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Offset case:
// vldr.64 d0, [r1, 12345] will translate into
// add ip, r1, 12345
// vldr.32 s0, [ip]
{
VIXL_ASSERT(offset != 0);
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, scratch, rn, offset);
}
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));
}
return;
}
case PostIndex:
// Post-indexed case:
// vldr.64 d0. [r1], imm32 will translate into
// vldr.64 d0, [r1]
// movw ip. imm32 & 0xffffffff
// movt ip, imm32 >> 16
// add r1, ip
{
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
}
if (offset != 0) {
CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
add(cond, rn, rn, offset);
}
return;
}
}
Assembler::Delegate(type, instruction, cond, dt, rd, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondMsrOp instruction,
Condition cond,
MaskedSpecialRegister spec_reg,
const Operand& operand) {
USE(type);
VIXL_ASSERT(type == kMsr);
if (operand.IsImmediate()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
{
CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
mov(cond, scratch, operand);
}
CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
msr(cond, spec_reg, scratch);
return;
}
Assembler::Delegate(type, instruction, cond, spec_reg, operand);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtDL instruction,
Condition cond,
DataType dt,
DRegister rd,
Location* location) {
VIXL_ASSERT(type == kVldr);
CONTEXT_SCOPE;
if (location->IsBound()) {
CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
uint32_t mask = GetOffsetMask(type, Offset);
vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));
return;
}
Assembler::Delegate(type, instruction, cond, dt, rd, location);
}
void MacroAssembler::Delegate(InstructionType type,
InstructionCondDtSL instruction,
Condition cond,
DataType dt,
SRegister rd,
Location* location) {
VIXL_ASSERT(type == kVldr);
CONTEXT_SCOPE;
if (location->IsBound()) {
CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
uint32_t mask = GetOffsetMask(type, Offset);
vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));
return;
}
Assembler::Delegate(type, instruction, cond, dt, rd, location);
}
#undef CONTEXT_SCOPE
#undef TOSTRING
#undef STRINGIFY
// Start of generated code.
// End of generated code.
} // namespace aarch32
} // namespace vixl
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <inttypes.h>
#include <stdint.h>
}
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <iostream>
#include "../utils-vixl.h"
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/operands-aarch32.h"
namespace vixl {
namespace aarch32 {
// Operand
std::ostream& operator<<(std::ostream& os, const Operand& operand) {
if (operand.IsImmediate()) {
return os << "#" << operand.GetImmediate();
}
if (operand.IsImmediateShiftedRegister()) {
if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) &&
(operand.GetShiftAmount() == 0)) {
return os << operand.GetBaseRegister();
}
if (operand.GetShift().IsRRX()) {
return os << operand.GetBaseRegister() << ", rrx";
}
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " #"
<< operand.GetShiftAmount();
}
if (operand.IsRegisterShiftedRegister()) {
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " "
<< operand.GetShiftRegister();
}
VIXL_UNREACHABLE();
return os;
}
std::ostream& operator<<(std::ostream& os, const NeonImmediate& neon_imm) {
if (neon_imm.IsDouble()) {
if (neon_imm.imm_.d_ == 0) {
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) {
return os << "#-0.0";
}
return os << "#0.0";
}
return os << "#" << std::setprecision(9) << neon_imm.imm_.d_;
}
if (neon_imm.IsFloat()) {
if (neon_imm.imm_.f_ == 0) {
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) return os << "#-0.0";
return os << "#0.0";
}
return os << "#" << std::setprecision(9) << neon_imm.imm_.f_;
}
if (neon_imm.IsInteger64()) {
return os << "#0x" << std::hex << std::setw(16) << std::setfill('0')
<< neon_imm.imm_.u64_ << std::dec;
}
return os << "#" << neon_imm.imm_.u32_;
}
// SOperand
std::ostream& operator<<(std::ostream& os, const SOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
// DOperand
std::ostream& operator<<(std::ostream& os, const DOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
// QOperand
std::ostream& operator<<(std::ostream& os, const QOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
ImmediateVbic::ImmediateVbic(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if (dt.GetValue() == I16) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x9);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xb);
SetEncodedImmediate(immediate >> 8);
}
} else if (dt.GetValue() == I32) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x1);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x3);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x5);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x7);
SetEncodedImmediate(immediate >> 24);
}
}
}
}
DataType ImmediateVbic::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x1:
case 0x3:
case 0x5:
case 0x7:
return I32;
case 0x9:
case 0xb:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVbic::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x1:
case 0x9:
return immediate;
case 0x3:
case 0xb:
return immediate << 8;
case 0x5:
return immediate << 16;
case 0x7:
return immediate << 24;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVmov::ImmediateVmov(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger()) {
switch (dt.GetValue()) {
case I8:
if (neon_imm.CanConvert<uint8_t>()) {
SetEncodingValue(0xe);
SetEncodedImmediate(neon_imm.GetImmediate<uint8_t>());
}
break;
case I16:
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x8);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xa);
SetEncodedImmediate(immediate >> 8);
}
}
break;
case I32:
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x0);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x2);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x4);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x6);
SetEncodedImmediate(immediate >> 24);
} else if ((immediate & ~0xff00) == 0xff) {
SetEncodingValue(0xc);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0xffff) {
SetEncodingValue(0xd);
SetEncodedImmediate(immediate >> 16);
}
}
break;
case I64: {
bool is_valid = true;
uint32_t encoding = 0;
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
uint32_t mask = 0xff000000;
for (uint32_t set_bit = 1 << 3; set_bit != 0; set_bit >>= 1) {
if ((immediate & mask) == mask) {
encoding |= set_bit;
} else if ((immediate & mask) != 0) {
is_valid = false;
break;
}
mask >>= 8;
}
} else {
uint64_t immediate = neon_imm.GetImmediate<uint64_t>();
uint64_t mask = UINT64_C(0xff) << 56;
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
if ((immediate & mask) == mask) {
encoding |= set_bit;
} else if ((immediate & mask) != 0) {
is_valid = false;
break;
}
mask >>= 8;
}
}
if (is_valid) {
SetEncodingValue(0x1e);
SetEncodedImmediate(encoding);
}
break;
}
default:
break;
}
} else {
switch (dt.GetValue()) {
case F32:
if (neon_imm.IsFloat() || neon_imm.IsDouble()) {
ImmediateVFP vfp(neon_imm.GetImmediate<float>());
if (vfp.IsValid()) {
SetEncodingValue(0xf);
SetEncodedImmediate(vfp.GetEncodingValue());
}
}
break;
default:
break;
}
}
}
DataType ImmediateVmov::DecodeDt(uint32_t cmode) {
switch (cmode & 0xf) {
case 0x0:
case 0x2:
case 0x4:
case 0x6:
case 0xc:
case 0xd:
return I32;
case 0x8:
case 0xa:
return I16;
case 0xe:
return ((cmode & 0x10) == 0) ? I8 : I64;
case 0xf:
if ((cmode & 0x10) == 0) return F32;
break;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVmov::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode & 0xf) {
case 0x8:
case 0x0:
return immediate;
case 0x2:
case 0xa:
return immediate << 8;
case 0x4:
return immediate << 16;
case 0x6:
return immediate << 24;
case 0xc:
return (immediate << 8) | 0xff;
case 0xd:
return (immediate << 16) | 0xffff;
case 0xe: {
if (cmode == 0x1e) {
uint64_t encoding = 0;
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
encoding <<= 8;
if ((immediate & set_bit) != 0) {
encoding |= 0xff;
}
}
return encoding;
} else {
return immediate;
}
}
case 0xf: {
return ImmediateVFP::Decode<float>(immediate);
}
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVmvn::ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
switch (dt.GetValue()) {
case I16:
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x8);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xa);
SetEncodedImmediate(immediate >> 8);
}
break;
case I32:
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x0);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x2);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x4);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x6);
SetEncodedImmediate(immediate >> 24);
} else if ((immediate & ~0xff00) == 0xff) {
SetEncodingValue(0xc);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0xffff) {
SetEncodingValue(0xd);
SetEncodedImmediate(immediate >> 16);
}
break;
default:
break;
}
}
}
DataType ImmediateVmvn::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x0:
case 0x2:
case 0x4:
case 0x6:
case 0xc:
case 0xd:
return I32;
case 0x8:
case 0xa:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVmvn::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x0:
case 0x8:
return immediate;
case 0x2:
case 0xa:
return immediate << 8;
case 0x4:
return immediate << 16;
case 0x6:
return immediate << 24;
case 0xc:
return (immediate << 8) | 0xff;
case 0xd:
return (immediate << 16) | 0xffff;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVorr::ImmediateVorr(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if (dt.GetValue() == I16) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x9);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xb);
SetEncodedImmediate(immediate >> 8);
}
} else if (dt.GetValue() == I32) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x1);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x3);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x5);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x7);
SetEncodedImmediate(immediate >> 24);
}
}
}
}
DataType ImmediateVorr::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x1:
case 0x3:
case 0x5:
case 0x7:
return I32;
case 0x9:
case 0xb:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVorr::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x1:
case 0x9:
return immediate;
case 0x3:
case 0xb:
return immediate << 8;
case 0x5:
return immediate << 16;
case 0x7:
return immediate << 24;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
// MemOperand
std::ostream& operator<<(std::ostream& os, const MemOperand& operand) {
os << "[" << operand.GetBaseRegister();
if (operand.GetAddrMode() == PostIndex) {
os << "]";
if (operand.IsRegisterOnly()) return os << "!";
}
if (operand.IsImmediate()) {
if ((operand.GetOffsetImmediate() != 0) || operand.GetSign().IsMinus() ||
((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) {
if (operand.GetOffsetImmediate() == 0) {
os << ", #" << operand.GetSign() << operand.GetOffsetImmediate();
} else {
os << ", #" << operand.GetOffsetImmediate();
}
}
} else if (operand.IsPlainRegister()) {
os << ", " << operand.GetSign() << operand.GetOffsetRegister();
} else if (operand.IsShiftedRegister()) {
os << ", " << operand.GetSign() << operand.GetOffsetRegister()
<< ImmediateShiftOperand(operand.GetShift(), operand.GetShiftAmount());
} else {
VIXL_UNREACHABLE();
return os;
}
if (operand.GetAddrMode() == Offset) {
os << "]";
} else if (operand.GetAddrMode() == PreIndex) {
os << "]!";
}
return os;
}
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand) {
os << "[" << operand.GetBaseRegister() << operand.GetAlignment() << "]";
if (operand.GetAddrMode() == PostIndex) {
if (operand.IsPlainRegister()) {
os << ", " << operand.GetOffsetRegister();
} else {
os << "!";
}
}
return os;
}
} // namespace aarch32
} // namespace vixl
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_
#define VIXL_AARCH32_OPERANDS_AARCH32_H_
#include "aarch32/instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
// Operand represents generic set of arguments to pass to an instruction.
//
// Usage: <instr> <Rd> , <Operand>
//
// where <instr> is the instruction to use (e.g., Mov(), Rsb(), etc.)
// <Rd> is the destination register
// <Operand> is the rest of the arguments to the instruction
//
// <Operand> can be one of:
//
// #<imm> - an unsigned 32-bit immediate value
// <Rm>, <shift> <#amount> - immediate shifted register
// <Rm>, <shift> <Rs> - register shifted register
//
class Operand {
public:
// { #<immediate> }
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
Operand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
// rm
// where rm is the base register
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm),
shift_(LSL),
amount_(0),
rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
}
// rm, <shift>
// where rm is the base register, and
// <shift> is RRX
Operand(Register rm, Shift shift)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rm, <shift> #<amount>
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// <amount> is uint6_t.
Operand(Register rm, Shift shift, uint32_t amount)
: imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
#ifdef VIXL_DEBUG
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
// rm, <shift> rs
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// rs is the shifted register
Operand(Register rm, Shift shift, Register rs)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) {
VIXL_ASSERT(rm_.IsValid() && rs_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
}
// Factory methods creating operands from any integral or pointer type. The
// source must fit into 32 bits.
template <typename T>
static Operand From(T immediate) {
#if __cplusplus >= 201103L
VIXL_STATIC_ASSERT_MESSAGE(std::is_integral<T>::value,
"An integral type is required to build an "
"immediate operand.");
#endif
// Allow both a signed or unsigned 32 bit integer to be passed, but store it
// as a uint32_t. The signedness information will be lost. We have to add a
// static_cast to make sure the compiler does not complain about implicit 64
// to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit
// value, as long as it can be encoded in 32 bits.
VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate));
return Operand(static_cast<uint32_t>(immediate));
}
template <typename T>
static Operand From(T* address) {
uintptr_t address_as_integral = reinterpret_cast<uintptr_t>(address);
VIXL_ASSERT(IsUint32(address_as_integral));
return Operand(static_cast<uint32_t>(address_as_integral));
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsPlainRegister() const {
return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0);
}
bool IsImmediateShiftedRegister() const {
return rm_.IsValid() && !rs_.IsValid();
}
bool IsRegisterShiftedRegister() const {
return rm_.IsValid() && rs_.IsValid();
}
uint32_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return imm_;
}
int32_t GetSignedImmediate() const {
VIXL_ASSERT(IsImmediate());
int32_t result;
memcpy(&result, &imm_, sizeof(result));
return result;
}
Register GetBaseRegister() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return rm_;
}
Shift GetShift() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return shift_;
}
uint32_t GetShiftAmount() const {
VIXL_ASSERT(IsImmediateShiftedRegister());
return amount_;
}
Register GetShiftRegister() const {
VIXL_ASSERT(IsRegisterShiftedRegister());
return rs_;
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
private:
// Forbid implicitely creating operands around types that cannot be encoded
// into a uint32_t without loss.
#if __cplusplus >= 201103L
Operand(int64_t) = delete; // NOLINT(runtime/explicit)
Operand(uint64_t) = delete; // NOLINT(runtime/explicit)
Operand(float) = delete; // NOLINT(runtime/explicit)
Operand(double) = delete; // NOLINT(runtime/explicit)
#else
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT
VIXL_UNREACHABLE();
}
#endif
uint32_t imm_;
Register rm_;
Shift shift_;
uint32_t amount_;
Register rs_;
};
std::ostream& operator<<(std::ostream& os, const Operand& operand);
class NeonImmediate {
template <typename T>
struct DataTypeIdentity {
T data_type_;
};
public:
// { #<immediate> }
// where <immediate> is 32 bit number.
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
NeonImmediate(int immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
// { #<immediate> }
// where <immediate> is a 64 bit number
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
// { #<immediate> }
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F32) {}
NeonImmediate(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F64) {}
NeonImmediate(const NeonImmediate& src)
: imm_(src.imm_), immediate_type_(src.immediate_type_) {}
template <typename T>
T GetImmediate() const {
return GetImmediate(DataTypeIdentity<T>());
}
template <typename T>
T GetImmediate(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t));
VIXL_ASSERT(CanConvert<T>());
if (immediate_type_.Is(I64))
return static_cast<T>(imm_.u64_ & static_cast<T>(-1));
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return static_cast<T>(imm_.u32_ & static_cast<T>(-1));
}
uint64_t GetImmediate(const DataTypeIdentity<uint64_t>&) const {
VIXL_ASSERT(CanConvert<uint64_t>());
if (immediate_type_.Is(I32)) return imm_.u32_;
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return imm_.u64_;
}
float GetImmediate(const DataTypeIdentity<float>&) const {
VIXL_ASSERT(CanConvert<float>());
if (immediate_type_.Is(F64)) return static_cast<float>(imm_.d_);
return imm_.f_;
}
double GetImmediate(const DataTypeIdentity<double>&) const {
VIXL_ASSERT(CanConvert<double>());
if (immediate_type_.Is(F32)) return static_cast<double>(imm_.f_);
return imm_.d_;
}
bool IsInteger32() const { return immediate_type_.Is(I32); }
bool IsInteger64() const { return immediate_type_.Is(I64); }
bool IsInteger() const { return IsInteger32() | IsInteger64(); }
bool IsFloat() const { return immediate_type_.Is(F32); }
bool IsDouble() const { return immediate_type_.Is(F64); }
bool IsFloatZero() const {
if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f;
if (immediate_type_.Is(F64)) return imm_.d_ == 0.0;
return false;
}
template <typename T>
bool CanConvert() const {
return CanConvert(DataTypeIdentity<T>());
}
template <typename T>
bool CanConvert(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) < sizeof(uint32_t));
return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint32_t>&) const {
return immediate_type_.Is(I32) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint64_t>&) const {
return IsInteger() || CanConvert<uint32_t>();
}
bool CanConvert(const DataTypeIdentity<float>&) const {
return IsFloat() || IsDouble();
}
bool CanConvert(const DataTypeIdentity<double>&) const {
return IsFloat() || IsDouble();
}
friend std::ostream& operator<<(std::ostream& os,
const NeonImmediate& operand);
private:
union NeonImmediateType {
uint64_t u64_;
double d_;
uint32_t u32_;
float f_;
NeonImmediateType(uint64_t u) : u64_(u) {}
NeonImmediateType(int64_t u) : u64_(u) {}
NeonImmediateType(uint32_t u) : u32_(u) {}
NeonImmediateType(int32_t u) : u32_(u) {}
NeonImmediateType(double d) : d_(d) {}
NeonImmediateType(float f) : f_(f) {}
NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {}
} imm_;
DataType immediate_type_;
};
std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand);
class NeonOperand {
public:
NeonOperand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: imm_(imm),
rm_(NoDReg) {}
NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm) {
VIXL_ASSERT(rm_.IsValid());
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsRegister() const { return rm_.IsValid(); }
bool IsFloatZero() const {
VIXL_ASSERT(IsImmediate());
return imm_.IsFloatZero();
}
const NeonImmediate& GetNeonImmediate() const { return imm_; }
VRegister GetRegister() const {
VIXL_ASSERT(IsRegister());
return rm_;
}
protected:
NeonImmediate imm_;
VRegister rm_;
};
std::ostream& operator<<(std::ostream& os, const NeonOperand& operand);
// SOperand represents either an immediate or a SRegister.
class SOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is 32bit int
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is 32bit float
SOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// where <immediate> is 64bit float
SOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(SRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
SRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister));
return SRegister(rm_.GetCode());
}
};
// DOperand represents either an immediate or a DRegister.
std::ostream& operator<<(std::ostream& os, const SOperand& operand);
class DOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(DRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
DRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister));
return DRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const DOperand& operand);
// QOperand represents either an immediate or a QRegister.
class QOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(QRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {
VIXL_ASSERT(rm_.IsValid());
}
QRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister));
return QRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const QOperand& operand);
class ImmediateVFP : public EncodingValue {
template <typename T>
struct FloatType {
typedef T base_type;
};
public:
explicit ImmediateVFP(const NeonImmediate& neon_imm) {
if (neon_imm.IsFloat()) {
const float imm = neon_imm.GetImmediate<float>();
if (VFP::IsImmFP32(imm)) {
SetEncodingValue(VFP::FP32ToImm8(imm));
}
} else if (neon_imm.IsDouble()) {
const double imm = neon_imm.GetImmediate<double>();
if (VFP::IsImmFP64(imm)) {
SetEncodingValue(VFP::FP64ToImm8(imm));
}
}
}
template <typename T>
static T Decode(uint32_t v) {
return Decode(v, FloatType<T>());
}
static float Decode(uint32_t imm8, const FloatType<float>&) {
return VFP::Imm8ToFP32(imm8);
}
static double Decode(uint32_t imm8, const FloatType<double>&) {
return VFP::Imm8ToFP64(imm8);
}
};
class ImmediateVbic : public EncodingValueAndImmediate {
public:
ImmediateVbic(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVand : public ImmediateVbic {
public:
ImmediateVand(DataType dt, const NeonImmediate neon_imm)
: ImmediateVbic(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
class ImmediateVmov : public EncodingValueAndImmediate {
public:
ImmediateVmov(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVmvn : public EncodingValueAndImmediate {
public:
ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorr : public EncodingValueAndImmediate {
public:
ImmediateVorr(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorn : public ImmediateVorr {
public:
ImmediateVorn(DataType dt, const NeonImmediate& neon_imm)
: ImmediateVorr(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
// MemOperand represents the addressing mode of a load or store instruction.
//
// Usage: <instr> <Rt> , <MemOperand>
//
// where <instr> is the instruction to use (e.g., Ldr(), Str(), etc.),
// <Rt> is general purpose register to be transferred,
// <MemOperand> is the rest of the arguments to the instruction
//
// <MemOperand> can be in one of 3 addressing modes:
//
// [ <Rn>, <offset> ] == offset addressing
// [ <Rn>, <offset> ]! == pre-indexed addressing
// [ <Rn> ], <offset> == post-indexed addressing
//
// where <offset> can be one of:
// - an immediate constant, such as <imm8>, <imm12>
// - an index register <Rm>
// - a shifted index register <Rm>, <shift> #<amount>
//
// The index register may have an associated {+/-} sign,
// which if ommitted, defaults to + .
//
// We have two constructors for the offset:
//
// One with a signed value offset parameter. The value of sign_ is
// "sign_of(constructor's offset parameter) and the value of offset_ is
// "constructor's offset parameter".
//
// The other with a sign and a positive value offset parameters. The value of
// sign_ is "constructor's sign parameter" and the value of offset_ is
// "constructor's sign parameter * constructor's offset parameter".
//
// The value of offset_ reflects the effective offset. For an offset_ of 0,
// sign_ can be positive or negative. Otherwise, sign_ always agrees with
// the sign of offset_.
class MemOperand {
public:
// rn
// where rn is the general purpose base register only
explicit MemOperand(Register rn, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode | kMemOperandRegisterOnly) {
VIXL_ASSERT(rn_.IsValid());
}
// rn, #<imm>
// where rn is the general purpose base register,
// <imm> is a 32-bit offset to add to rn
//
// Note: if rn is PC, then this form is equivalent to a "label"
// Note: the second constructor allow minus zero (-0).
MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(offset),
sign_((offset < 0) ? minus : plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
}
MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(sign.IsPlus() ? offset : -offset),
sign_(sign),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
// With this constructor, the sign must only be specified by "sign".
VIXL_ASSERT(offset >= 0);
}
// rn, {+/-}rm
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, rm
// where rn is the general purpose base register,
// rm is the general purpose index register,
MemOperand(Register rn, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, {+/-}rm, <shift>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, rm, <shift>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, {+/-}rm, <shift> #<amount>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
// rn, rm, <shift> #<amount>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
Register GetBaseRegister() const { return rn_; }
int32_t GetOffsetImmediate() const { return offset_; }
bool IsOffsetImmediateWithinRange(int min,
int max,
int multiple_of = 1) const {
return (offset_ >= min) && (offset_ <= max) &&
((offset_ % multiple_of) == 0);
}
Sign GetSign() const { return sign_; }
Register GetOffsetRegister() const { return rm_; }
Shift GetShift() const { return shift_; }
unsigned GetShiftAmount() const { return shift_amount_; }
AddrMode GetAddrMode() const {
return static_cast<AddrMode>(addrmode_ & kMemOperandAddrModeMask);
}
bool IsRegisterOnly() const {
return (addrmode_ & kMemOperandRegisterOnly) != 0;
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); }
bool IsPlainRegister() const {
return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0);
}
bool IsShiftedRegister() const { return rm_.IsValid(); }
bool IsImmediateOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid();
}
bool IsImmediateZeroOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0);
}
bool IsRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() &&
(shift_amount_ == 0);
}
bool IsShiftedRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid();
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
bool IsOffset() const { return GetAddrMode() == Offset; }
bool IsPreIndex() const { return GetAddrMode() == PreIndex; }
bool IsPostIndex() const { return GetAddrMode() == PostIndex; }
bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); }
private:
static const int kMemOperandRegisterOnly = 0x1000;
static const int kMemOperandAddrModeMask = 0xfff;
void CheckShift() {
#ifdef VIXL_DEBUG
// Disallow any zero shift other than RRX #0 and LSL #0 .
if ((shift_amount_ == 0) && shift_.IsRRX()) return;
if ((shift_amount_ == 0) && !shift_.IsLSL()) {
VIXL_ABORT_WITH_MSG(
"A shift by 0 is only accepted in "
"the case of lsl and will be treated as "
"no shift.\n");
}
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(shift_amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
Register rn_;
int32_t offset_;
Sign sign_;
Register rm_;
Shift shift_;
uint32_t shift_amount_;
uint32_t addrmode_;
};
std::ostream& operator<<(std::ostream& os, const MemOperand& operand);
class AlignedMemOperand : public MemOperand {
public:
AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset)
: MemOperand(rn, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
AlignedMemOperand(Register rn,
Alignment align,
Register rm,
AddrMode addrmode)
: MemOperand(rn, rm, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
Alignment GetAlignment() const { return align_; }
private:
Alignment align_;
};
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand);
} // namespace aarch32
} // namespace vixl
#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The ABI features are only supported with C++11 or later.
#if __cplusplus >= 201103L
// This should not be defined manually.
#define VIXL_HAS_ABI_SUPPORT
#elif defined(VIXL_HAS_ABI_SUPPORT)
#error "The ABI support requires C++11 or later."
#endif
#ifdef VIXL_HAS_ABI_SUPPORT
#ifndef VIXL_AARCH64_ABI_AARCH64_H_
#define VIXL_AARCH64_ABI_AARCH64_H_
#include <algorithm>
#include <type_traits>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
// Class describing the AArch64 procedure call standard, as defined in "ARM
// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)",
// release 1.0 (AAPCS below).
//
// The stages in the comments match the description in that document.
//
// Stage B does not apply to arguments handled by this class.
class ABI {
public:
explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) {
// Stage A - Initialization
Reset();
}
void Reset() {
NGRN_ = 0;
NSRN_ = 0;
stack_offset_ = 0;
}
int GetStackSpaceRequired() { return stack_offset_; }
// The logic is described in section 5.5 of the AAPCS.
template <typename T>
GenericOperand GetReturnGenericOperand() const {
ABI abi(stack_pointer_);
GenericOperand result = abi.GetNextParameterGenericOperand<T>();
VIXL_ASSERT(result.IsCPURegister());
return result;
}
// The logic is described in section 5.4.2 of the AAPCS.
// The `GenericOperand` returned describes the location reserved for the
// argument from the point of view of the callee.
template <typename T>
GenericOperand GetNextParameterGenericOperand() {
const bool is_floating_point_type = std::is_floating_point<T>::value;
const bool is_integral_type =
std::is_integral<T>::value || std::is_enum<T>::value;
const bool is_pointer_type = std::is_pointer<T>::value;
int type_alignment = std::alignment_of<T>::value;
// We only support basic types.
VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type);
// To ensure we get the correct type of operand when simulating on a 32-bit
// host, force the size of pointer types to the native AArch64 pointer size.
unsigned size = is_pointer_type ? 8 : sizeof(T);
// The size of the 'operand' reserved for the argument.
unsigned operand_size = AlignUp(size, kWRegSizeInBytes);
if (size > 8) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.1
if (is_floating_point_type && (NSRN_ < 8)) {
return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte));
}
// Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above.
// Stages C.5 and C.6
if (is_floating_point_type) {
VIXL_STATIC_ASSERT(
!is_floating_point_type ||
(std::is_same<T, float>::value || std::is_same<T, double>::value));
int offset = stack_offset_;
stack_offset_ += 8;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
// Stage C.7
if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) {
return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte));
}
// Stage C.8
if (type_alignment == 16) {
NGRN_ = AlignUp(NGRN_, 2);
}
// Stage C.9
if (is_integral_type && (size == 16) && (NGRN_ < 7)) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.10: Unsupported. Caught by the assertions above.
// Stage C.11
NGRN_ = 8;
// Stage C.12
stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8));
// Stage C.13: Unsupported. Caught by the assertions above.
// Stage C.14
VIXL_ASSERT(size <= 8u);
size = std::max(size, 8u);
int offset = stack_offset_;
stack_offset_ += size;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
private:
Register stack_pointer_;
// Next General-purpose Register Number.
int NGRN_;
// Next SIMD and Floating-point Register Number.
int NSRN_;
// The acronym "NSAA" used in the standard refers to the "Next Stacked
// Argument Address". Here we deal with offsets from the stack pointer.
int stack_offset_;
};
template <>
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
return GenericOperand();
}
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_ABI_AARCH64_H_
#endif // VIXL_HAS_ABI_SUPPORT
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../utils-vixl.h"
#include "cpu-aarch64.h"
namespace vixl {
namespace aarch64 {
// Initialise to smallest possible cache size.
unsigned CPU::dcache_line_size_ = 1;
unsigned CPU::icache_line_size_ = 1;
// Currently computes I and D cache line size.
void CPU::SetUp() {
uint32_t cache_type_register = GetCacheType();
// The cache type register holds information about the caches, including I
// D caches line size.
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches in words as
// a power of two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 4 << dcache_line_size_power_of_two;
icache_line_size_ = 4 << icache_line_size_power_of_two;
}
uint32_t CPU::GetCacheType() {
#ifdef __aarch64__
uint64_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references)
: [ctr] "=r"(cache_type_register));
VIXL_ASSERT(IsUint32(cache_type_register));
return static_cast<uint32_t>(cache_type_register);
#else
// This will lead to a cache with 1 byte long lines, which is fine since
// neither EnsureIAndDCacheCoherency nor the simulator will need this
// information.
return 0;
#endif
}
void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
#ifdef __aarch64__
// Implement the cache synchronisation for all targets where AArch64 is the
// host, even if we're building the simulator for an AAarch64 host. This
// allows for cases where the user wants to simulate code as well as run it
// natively.
if (length == 0) {
return;
}
// The code below assumes user space cache operations are allowed.
// Work out the line sizes for each cache, and use them to determine the
// start addresses.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
uintptr_t dline = start & ~(dsize - 1);
uintptr_t iline = start & ~(isize - 1);
// Cache line sizes are always a power of 2.
VIXL_ASSERT(IsPowerOf2(dsize));
VIXL_ASSERT(IsPowerOf2(isize));
uintptr_t end = start + length;
do {
__asm__ __volatile__(
// Clean each line of the D cache containing the target data.
//
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
" dc cvau, %[dline]\n"
:
: [dline] "r"(dline)
// This code does not write to memory, but the "memory" dependency
// prevents GCC from reordering the code.
: "memory");
dline += dsize;
} while (dline < end);
__asm__ __volatile__(
// Make sure that the data cache operations (above) complete before the
// instruction cache operations (below).
//
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
//
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the
// processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
" dsb ish\n"
:
:
: "memory");
do {
__asm__ __volatile__(
// Invalidate each line of the I cache containing the target data.
//
// ic : Instruction Cache maintenance
// i : Invalidate
// va : by Address
// u : to the point of Unification
" ic ivau, %[iline]\n"
:
: [iline] "r"(iline)
: "memory");
iline += isize;
} while (iline < end);
__asm__ __volatile__(
// Make sure that the instruction cache operations (above) take effect
// before the isb (below).
" dsb ish\n"
// Ensure that any instructions already in the pipeline are discarded and
// reloaded from the new data.
// isb : Instruction Synchronisation Barrier
" isb\n"
:
:
: "memory");
#else
// If the host isn't AArch64, we must be using the simulator, so this function
// doesn't have to do anything.
USE(address, length);
#endif
}
} // namespace aarch64
} // namespace vixl
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_AARCH64_H
#define VIXL_CPU_AARCH64_H
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
class CPU {
public:
// Initialise CPU support.
static void SetUp();
// Ensures the data at a given address and with a given size is the same for
// the I and D caches. I and D caches are not automatically coherent on ARM
// so this operation is required before any dynamically generated code can
// safely run.
static void EnsureIAndDCacheCoherency(void *address, size_t length);
// Handle tagged pointers.
template <typename T>
static T SetPointerTag(T pointer, uint64_t tag) {
VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
return (T)raw;
}
template <typename T>
static uint64_t GetPointerTag(T pointer) {
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
return (raw & kAddressTagMask) >> kAddressTagOffset;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_CPU_AARCH64_H
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Arm Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../cpu-features.h"
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "decoder-aarch64.h"
#include "cpu-features-auditor-aarch64.h"
namespace vixl {
namespace aarch64 {
// Every instruction must update last_instruction_, even if only to clear it,
// and every instruction must also update seen_ once it has been fully handled.
// This scope makes that simple, and allows early returns in the decode logic.
class CPUFeaturesAuditor::RecordInstructionFeaturesScope {
public:
explicit RecordInstructionFeaturesScope(CPUFeaturesAuditor* auditor)
: auditor_(auditor) {
auditor_->last_instruction_ = CPUFeatures::None();
}
~RecordInstructionFeaturesScope() {
auditor_->seen_.Combine(auditor_->last_instruction_);
}
void Record(const CPUFeatures& features) {
auditor_->last_instruction_.Combine(features);
}
void Record(CPUFeatures::Feature feature0,
CPUFeatures::Feature feature1 = CPUFeatures::kNone,
CPUFeatures::Feature feature2 = CPUFeatures::kNone,
CPUFeatures::Feature feature3 = CPUFeatures::kNone) {
auditor_->last_instruction_.Combine(feature0, feature1, feature2, feature3);
}
// If exactly one of a or b is known to be available, record it. Otherwise,
// record both. This is intended for encodings that can be provided by two
// different features.
void RecordOneOrBothOf(CPUFeatures::Feature a, CPUFeatures::Feature b) {
bool hint_a = auditor_->available_.Has(a);
bool hint_b = auditor_->available_.Has(b);
if (hint_a && !hint_b) {
Record(a);
} else if (hint_b && !hint_a) {
Record(b);
} else {
Record(a, b);
}
}
private:
CPUFeaturesAuditor* auditor_;
};
void CPUFeaturesAuditor::LoadStoreHelper(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(LoadStoreMask)) {
case LDR_b:
case LDR_q:
case STR_b:
case STR_q:
scope.Record(CPUFeatures::kNEON);
return;
case LDR_h:
case LDR_s:
case LDR_d:
case STR_h:
case STR_s:
case STR_d:
scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::LoadStorePairHelper(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(LoadStorePairMask)) {
case LDP_q:
case STP_q:
scope.Record(CPUFeatures::kNEON);
return;
case LDP_s:
case LDP_d:
case STP_s:
case STP_d: {
scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON);
return;
}
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitAddSubExtended(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitAddSubImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitAddSubShifted(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitAddSubWithCarry(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitRotateRightIntoFlags(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(RotateRightIntoFlagsMask)) {
case RMIF:
scope.Record(CPUFeatures::kFlagM);
return;
}
}
void CPUFeaturesAuditor::VisitEvaluateIntoFlags(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(EvaluateIntoFlagsMask)) {
case SETF8:
case SETF16:
scope.Record(CPUFeatures::kFlagM);
return;
}
}
void CPUFeaturesAuditor::VisitAtomicMemory(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(AtomicMemoryMask)) {
case LDAPRB:
case LDAPRH:
case LDAPR_w:
case LDAPR_x:
scope.Record(CPUFeatures::kRCpc);
return;
default:
// Everything else belongs to the Atomics extension.
scope.Record(CPUFeatures::kAtomics);
return;
}
}
void CPUFeaturesAuditor::VisitBitfield(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitCompareBranch(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitConditionalBranch(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitConditionalCompareImmediate(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitConditionalCompareRegister(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitConditionalSelect(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitCrypto2RegSHA(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitCrypto3RegSHA(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitCryptoAES(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitDataProcessing1Source(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(DataProcessing1SourceMask)) {
case PACIA:
case PACIB:
case PACDA:
case PACDB:
case AUTIA:
case AUTIB:
case AUTDA:
case AUTDB:
case PACIZA:
case PACIZB:
case PACDZA:
case PACDZB:
case AUTIZA:
case AUTIZB:
case AUTDZA:
case AUTDZB:
case XPACI:
case XPACD:
scope.Record(CPUFeatures::kPAuth);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitDataProcessing2Source(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(DataProcessing2SourceMask)) {
case CRC32B:
case CRC32H:
case CRC32W:
case CRC32X:
case CRC32CB:
case CRC32CH:
case CRC32CW:
case CRC32CX:
scope.Record(CPUFeatures::kCRC32);
return;
case PACGA:
scope.Record(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitLoadStoreRCpcUnscaledOffset(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) {
case LDAPURB:
case LDAPURSB_w:
case LDAPURSB_x:
case LDAPURH:
case LDAPURSH_w:
case LDAPURSH_x:
case LDAPUR_w:
case LDAPURSW:
case LDAPUR_x:
scope.Record(CPUFeatures::kRCpc);
VIXL_FALLTHROUGH();
case STLURB:
case STLURH:
case STLUR_w:
case STLUR_x:
scope.Record(CPUFeatures::kRCpcImm);
return;
}
}
void CPUFeaturesAuditor::VisitLoadStorePAC(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
scope.Record(CPUFeatures::kPAuth);
}
void CPUFeaturesAuditor::VisitDataProcessing3Source(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitException(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitExtract(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitFPCompare(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPCompareMask)) {
case FCMP_h:
case FCMP_h_zero:
case FCMPE_h:
case FCMPE_h_zero:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitFPConditionalCompare(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPConditionalCompareMask)) {
case FCCMP_h:
case FCCMPE_h:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitFPConditionalSelect(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
if (instr->Mask(FPConditionalSelectMask) == FCSEL_h) {
scope.Record(CPUFeatures::kFPHalf);
}
}
void CPUFeaturesAuditor::VisitFPDataProcessing1Source(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPDataProcessing1SourceMask)) {
case FMOV_h:
case FABS_h:
case FNEG_h:
case FSQRT_h:
case FRINTN_h:
case FRINTP_h:
case FRINTM_h:
case FRINTZ_h:
case FRINTA_h:
case FRINTX_h:
case FRINTI_h:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
// This category includes some half-precision FCVT instructions that do
// not require FPHalf.
return;
}
}
void CPUFeaturesAuditor::VisitFPDataProcessing2Source(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPDataProcessing2SourceMask)) {
case FMUL_h:
case FDIV_h:
case FADD_h:
case FSUB_h:
case FMAX_h:
case FMIN_h:
case FMAXNM_h:
case FMINNM_h:
case FNMUL_h:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitFPDataProcessing3Source(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPDataProcessing3SourceMask)) {
case FMADD_h:
case FMSUB_h:
case FNMADD_h:
case FNMSUB_h:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitFPFixedPointConvert(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPFixedPointConvertMask)) {
case FCVTZS_wh_fixed:
case FCVTZS_xh_fixed:
case FCVTZU_wh_fixed:
case FCVTZU_xh_fixed:
case SCVTF_hw_fixed:
case SCVTF_hx_fixed:
case UCVTF_hw_fixed:
case UCVTF_hx_fixed:
scope.Record(CPUFeatures::kFPHalf);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitFPImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
if (instr->Mask(FPImmediateMask) == FMOV_h_imm) {
scope.Record(CPUFeatures::kFPHalf);
}
}
void CPUFeaturesAuditor::VisitFPIntegerConvert(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require FP.
scope.Record(CPUFeatures::kFP);
switch (instr->Mask(FPIntegerConvertMask)) {
case FCVTAS_wh:
case FCVTAS_xh:
case FCVTAU_wh:
case FCVTAU_xh:
case FCVTMS_wh:
case FCVTMS_xh:
case FCVTMU_wh:
case FCVTMU_xh:
case FCVTNS_wh:
case FCVTNS_xh:
case FCVTNU_wh:
case FCVTNU_xh:
case FCVTPS_wh:
case FCVTPS_xh:
case FCVTPU_wh:
case FCVTPU_xh:
case FCVTZS_wh:
case FCVTZS_xh:
case FCVTZU_wh:
case FCVTZU_xh:
case FMOV_hw:
case FMOV_hx:
case FMOV_wh:
case FMOV_xh:
case SCVTF_hw:
case SCVTF_hx:
case UCVTF_hw:
case UCVTF_hx:
scope.Record(CPUFeatures::kFPHalf);
return;
case FMOV_d1_x:
case FMOV_x_d1:
scope.Record(CPUFeatures::kNEON);
return;
case FJCVTZS:
scope.Record(CPUFeatures::kJSCVT);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitLoadLiteral(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(LoadLiteralMask)) {
case LDR_s_lit:
case LDR_d_lit:
scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON);
return;
case LDR_q_lit:
scope.Record(CPUFeatures::kNEON);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitLoadStoreExclusive(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(LoadStoreExclusiveMask)) {
case CAS_w:
case CASA_w:
case CASL_w:
case CASAL_w:
case CAS_x:
case CASA_x:
case CASL_x:
case CASAL_x:
case CASB:
case CASAB:
case CASLB:
case CASALB:
case CASH:
case CASAH:
case CASLH:
case CASALH:
case CASP_w:
case CASPA_w:
case CASPL_w:
case CASPAL_w:
case CASP_x:
case CASPA_x:
case CASPL_x:
case CASPAL_x:
scope.Record(CPUFeatures::kAtomics);
return;
case STLLRB:
case LDLARB:
case STLLRH:
case LDLARH:
case STLLR_w:
case LDLAR_w:
case STLLR_x:
case LDLAR_x:
scope.Record(CPUFeatures::kLORegions);
return;
default:
// No special CPU features.
return;
}
}
void CPUFeaturesAuditor::VisitLoadStorePairNonTemporal(
const Instruction* instr) {
LoadStorePairHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStorePairOffset(const Instruction* instr) {
LoadStorePairHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStorePairPostIndex(const Instruction* instr) {
LoadStorePairHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStorePairPreIndex(const Instruction* instr) {
LoadStorePairHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStorePostIndex(const Instruction* instr) {
LoadStoreHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStorePreIndex(const Instruction* instr) {
LoadStoreHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStoreRegisterOffset(
const Instruction* instr) {
LoadStoreHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStoreUnscaledOffset(
const Instruction* instr) {
LoadStoreHelper(instr);
}
void CPUFeaturesAuditor::VisitLoadStoreUnsignedOffset(
const Instruction* instr) {
LoadStoreHelper(instr);
}
void CPUFeaturesAuditor::VisitLogicalImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitLogicalShifted(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitMoveWideImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEON2RegMisc(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEON2RegMiscFPMask)) {
case NEON_FABS:
case NEON_FNEG:
case NEON_FSQRT:
case NEON_FCVTL:
case NEON_FCVTN:
case NEON_FCVTXN:
case NEON_FRINTI:
case NEON_FRINTX:
case NEON_FRINTA:
case NEON_FRINTM:
case NEON_FRINTN:
case NEON_FRINTP:
case NEON_FRINTZ:
case NEON_FCVTNS:
case NEON_FCVTNU:
case NEON_FCVTPS:
case NEON_FCVTPU:
case NEON_FCVTMS:
case NEON_FCVTMU:
case NEON_FCVTZS:
case NEON_FCVTZU:
case NEON_FCVTAS:
case NEON_FCVTAU:
case NEON_SCVTF:
case NEON_UCVTF:
case NEON_FRSQRTE:
case NEON_FRECPE:
case NEON_FCMGT_zero:
case NEON_FCMGE_zero:
case NEON_FCMEQ_zero:
case NEON_FCMLE_zero:
case NEON_FCMLT_zero:
scope.Record(CPUFeatures::kFP);
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEON2RegMiscFP16(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEONHalf.
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEON3Different(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEON3Same(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
scope.Record(CPUFeatures::kFP);
}
switch (instr->Mask(NEON3SameFHMMask)) {
case NEON_FMLAL:
case NEON_FMLAL2:
case NEON_FMLSL:
case NEON_FMLSL2:
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM);
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEON3SameExtra(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if ((instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) ||
(instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD)) {
scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma);
if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf);
} else {
switch (instr->Mask(NEON3SameExtraMask)) {
case NEON_SDOT:
case NEON_UDOT:
scope.Record(CPUFeatures::kDotProduct);
return;
case NEON_SQRDMLAH:
case NEON_SQRDMLSH:
scope.Record(CPUFeatures::kRDM);
return;
default:
// No additional features.
return;
}
}
}
void CPUFeaturesAuditor::VisitNEON3SameFP16(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON FP16 support.
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONAcrossLanes(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) {
// FMAXV_H, FMINV_H, FMAXNMV_H, FMINNMV_H
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf);
} else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
// FMAXV, FMINV, FMAXNMV, FMINNMV
scope.Record(CPUFeatures::kFP);
}
}
void CPUFeaturesAuditor::VisitNEONByIndexedElement(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONByIndexedElementMask)) {
case NEON_SDOT_byelement:
case NEON_UDOT_byelement:
scope.Record(CPUFeatures::kDotProduct);
return;
case NEON_SQRDMLAH_byelement:
case NEON_SQRDMLSH_byelement:
scope.Record(CPUFeatures::kRDM);
return;
default:
// Fall through to check other instructions.
break;
}
switch (instr->Mask(NEONByIndexedElementFPLongMask)) {
case NEON_FMLAL_H_byelement:
case NEON_FMLAL2_H_byelement:
case NEON_FMLSL_H_byelement:
case NEON_FMLSL2_H_byelement:
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM);
return;
default:
// Fall through to check other instructions.
break;
}
switch (instr->Mask(NEONByIndexedElementFPMask)) {
case NEON_FMLA_H_byelement:
case NEON_FMLS_H_byelement:
case NEON_FMUL_H_byelement:
case NEON_FMULX_H_byelement:
scope.Record(CPUFeatures::kNEONHalf);
VIXL_FALLTHROUGH();
case NEON_FMLA_byelement:
case NEON_FMLS_byelement:
case NEON_FMUL_byelement:
case NEON_FMULX_byelement:
scope.Record(CPUFeatures::kFP);
return;
default:
switch (instr->Mask(NEONByIndexedElementFPComplexMask)) {
case NEON_FCMLA_byelement:
scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma);
if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf);
return;
}
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONCopy(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONExtract(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStruct(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStructPostIndex(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStruct(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStructPostIndex(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONModifiedImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if (instr->GetNEONCmode() == 0xf) {
// FMOV (vector, immediate), double-, single- or half-precision.
scope.Record(CPUFeatures::kFP);
if (instr->ExtractBit(11)) scope.Record(CPUFeatures::kNEONHalf);
}
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONPerm(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalar2RegMisc(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
case NEON_FRECPE_scalar:
case NEON_FRECPX_scalar:
case NEON_FRSQRTE_scalar:
case NEON_FCMGT_zero_scalar:
case NEON_FCMGE_zero_scalar:
case NEON_FCMEQ_zero_scalar:
case NEON_FCMLE_zero_scalar:
case NEON_FCMLT_zero_scalar:
case NEON_SCVTF_scalar:
case NEON_UCVTF_scalar:
case NEON_FCVTNS_scalar:
case NEON_FCVTNU_scalar:
case NEON_FCVTPS_scalar:
case NEON_FCVTPU_scalar:
case NEON_FCVTMS_scalar:
case NEON_FCVTMU_scalar:
case NEON_FCVTZS_scalar:
case NEON_FCVTZU_scalar:
case NEON_FCVTAS_scalar:
case NEON_FCVTAU_scalar:
case NEON_FCVTXN_scalar:
scope.Record(CPUFeatures::kFP);
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEONHalf.
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalar3Diff(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalar3Same(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
scope.Record(CPUFeatures::kFP);
}
}
void CPUFeaturesAuditor::VisitNEONScalar3SameExtra(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON and RDM.
scope.Record(CPUFeatures::kNEON, CPUFeatures::kRDM);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalar3SameFP16(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEONHalf.
scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalarByIndexedElement(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONScalarByIndexedElementMask)) {
case NEON_SQRDMLAH_byelement_scalar:
case NEON_SQRDMLSH_byelement_scalar:
scope.Record(CPUFeatures::kRDM);
return;
default:
switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
case NEON_FMLA_H_byelement_scalar:
case NEON_FMLS_H_byelement_scalar:
case NEON_FMUL_H_byelement_scalar:
case NEON_FMULX_H_byelement_scalar:
scope.Record(CPUFeatures::kNEONHalf);
VIXL_FALLTHROUGH();
case NEON_FMLA_byelement_scalar:
case NEON_FMLS_byelement_scalar:
case NEON_FMUL_byelement_scalar:
case NEON_FMULX_byelement_scalar:
scope.Record(CPUFeatures::kFP);
return;
}
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONScalarCopy(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitNEONScalarPairwise(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONScalarPairwiseMask)) {
case NEON_FMAXNMP_h_scalar:
case NEON_FADDP_h_scalar:
case NEON_FMAXP_h_scalar:
case NEON_FMINNMP_h_scalar:
case NEON_FMINP_h_scalar:
scope.Record(CPUFeatures::kNEONHalf);
VIXL_FALLTHROUGH();
case NEON_FADDP_scalar:
case NEON_FMAXP_scalar:
case NEON_FMAXNMP_scalar:
case NEON_FMINP_scalar:
case NEON_FMINNMP_scalar:
scope.Record(CPUFeatures::kFP);
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONScalarShiftImmediate(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONScalarShiftImmediateMask)) {
case NEON_FCVTZS_imm_scalar:
case NEON_FCVTZU_imm_scalar:
case NEON_SCVTF_imm_scalar:
case NEON_UCVTF_imm_scalar:
scope.Record(CPUFeatures::kFP);
// If immh is 0b001x then the data type is FP16, and requires kNEONHalf.
if ((instr->GetImmNEONImmh() & 0xe) == 0x2) {
scope.Record(CPUFeatures::kNEONHalf);
}
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONShiftImmediate(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
switch (instr->Mask(NEONShiftImmediateMask)) {
case NEON_SCVTF_imm:
case NEON_UCVTF_imm:
case NEON_FCVTZS_imm:
case NEON_FCVTZU_imm:
scope.Record(CPUFeatures::kFP);
// If immh is 0b001x then the data type is FP16, and requires kNEONHalf.
if ((instr->GetImmNEONImmh() & 0xe) == 0x2) {
scope.Record(CPUFeatures::kNEONHalf);
}
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitNEONTable(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
USE(instr);
}
void CPUFeaturesAuditor::VisitPCRelAddressing(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitSystem(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
CPUFeatures required;
switch (instr->GetInstructionBits()) {
case PACIA1716:
case PACIB1716:
case AUTIA1716:
case AUTIB1716:
case PACIAZ:
case PACIASP:
case PACIBZ:
case PACIBSP:
case AUTIAZ:
case AUTIASP:
case AUTIBZ:
case AUTIBSP:
case XPACLRI:
required.Combine(CPUFeatures::kPAuth);
break;
default:
switch (instr->GetImmHint()) {
case ESB:
required.Combine(CPUFeatures::kRAS);
break;
case BTI:
case BTI_j:
case BTI_c:
case BTI_jc:
required.Combine(CPUFeatures::kBTI);
break;
default:
break;
}
break;
}
// These are all HINT instructions, and behave as NOPs if the corresponding
// features are not implemented, so we record the corresponding features
// only if they are available.
if (available_.Has(required)) scope.Record(required);
} else if (instr->Mask(SystemSysMask) == SYS) {
switch (instr->GetSysOp()) {
// DC instruction variants.
case CVAP:
scope.Record(CPUFeatures::kDCPoP);
break;
case IVAU:
case CVAC:
case CVAU:
case CIVAC:
// No special CPU features.
break;
}
} else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) {
switch (instr->Mask(SystemPStateMask)) {
case CFINV:
scope.Record(CPUFeatures::kFlagM);
break;
case AXFLAG:
case XAFLAG:
scope.Record(CPUFeatures::kAXFlag);
break;
}
}
}
void CPUFeaturesAuditor::VisitTestBranch(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitUnallocated(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitUnconditionalBranch(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
void CPUFeaturesAuditor::VisitUnconditionalBranchToRegister(
const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
case BRAAZ:
case BRABZ:
case BLRAAZ:
case BLRABZ:
case RETAA:
case RETAB:
case BRAA:
case BRAB:
case BLRAA:
case BLRAB:
scope.Record(CPUFeatures::kPAuth);
return;
default:
// No additional features.
return;
}
}
void CPUFeaturesAuditor::VisitUnimplemented(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
USE(instr);
}
} // namespace aarch64
} // namespace vixl
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Arm Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#include <iostream>
#include "../cpu-features.h"
#include "decoder-aarch64.h"
namespace vixl {
namespace aarch64 {
// This visitor records the CPU features that each decoded instruction requires.
// It provides:
// - the set of CPU features required by the most recently decoded instruction,
// - a cumulative set of encountered CPU features,
// - an optional list of 'available' CPU features.
//
// Primarily, this allows the Disassembler and Simulator to share the same CPU
// features logic. However, it can be used standalone to scan code blocks for
// CPU features.
class CPUFeaturesAuditor : public DecoderVisitor {
public:
// Construction arguments:
// - If a decoder is specified, the CPUFeaturesAuditor automatically
// registers itself as a visitor. Otherwise, this can be done manually.
//
// - If an `available` features list is provided, it is used as a hint in
// cases where instructions may be provided by multiple separate features.
// An example of this is FP&SIMD loads and stores: some of these are used
// in both FP and integer SIMD code. If exactly one of those features is
// in `available` when one of these instructions is encountered, then the
// auditor will record that feature. Otherwise, it will record _both_
// features.
explicit CPUFeaturesAuditor(
Decoder* decoder, const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(decoder) {
if (decoder_ != NULL) decoder_->AppendVisitor(this);
}
explicit CPUFeaturesAuditor(
const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(NULL) {}
virtual ~CPUFeaturesAuditor() {
if (decoder_ != NULL) decoder_->RemoveVisitor(this);
}
void ResetSeenFeatures() {
seen_ = CPUFeatures::None();
last_instruction_ = CPUFeatures::None();
}
// Query or set available CPUFeatures.
const CPUFeatures& GetAvailableFeatures() const { return available_; }
void SetAvailableFeatures(const CPUFeatures& available) {
available_ = available;
}
// Query CPUFeatures seen since construction (or the last call to `Reset()`).
const CPUFeatures& GetSeenFeatures() const { return seen_; }
// Query CPUFeatures from the last instruction visited by this auditor.
const CPUFeatures& GetInstructionFeatures() const {
return last_instruction_;
}
bool InstructionIsAvailable() const {
return available_.Has(last_instruction_);
}
// The common CPUFeatures interface operates on the available_ list.
CPUFeatures* GetCPUFeatures() { return &available_; }
void SetCPUFeatures(const CPUFeatures& available) {
SetAvailableFeatures(available);
}
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
class RecordInstructionFeaturesScope;
void LoadStoreHelper(const Instruction* instr);
void LoadStorePairHelper(const Instruction* instr);
CPUFeatures seen_;
CPUFeatures last_instruction_;
CPUFeatures available_;
Decoder* decoder_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "decoder-aarch64.h"
namespace vixl {
namespace aarch64 {
void Decoder::DecodeInstruction(const Instruction* instr) {
if (instr->ExtractBits(28, 27) == 0) {
VisitUnallocated(instr);
} else {
switch (instr->ExtractBits(27, 24)) {
// 0: PC relative addressing.
case 0x0:
DecodePCRelAddressing(instr);
break;
// 1: Add/sub immediate.
case 0x1:
DecodeAddSubImmediate(instr);
break;
// A: Logical shifted register.
// Add/sub with carry.
// Conditional compare register.
// Conditional compare immediate.
// Conditional select.
// Data processing 1 source.
// Data processing 2 source.
// B: Add/sub shifted register.
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
case 0xB:
DecodeDataProcessing(instr);
break;
// 2: Logical immediate.
// Move wide immediate.
case 0x2:
DecodeLogical(instr);
break;
// 3: Bitfield.
// Extract.
case 0x3:
DecodeBitfieldExtract(instr);
break;
// 4: Unconditional branch immediate.
// Exception generation.
// Compare and branch immediate.
// 5: Compare and branch immediate.
// Conditional branch.
// System.
// 6,7: Unconditional branch.
// Test and branch immediate.
case 0x4:
case 0x5:
case 0x6:
case 0x7:
DecodeBranchSystemException(instr);
break;
// 8,9: Load/store register pair post-index.
// Load register literal.
// Load/store register unscaled immediate.
// Load/store register immediate post-index.
// Load/store register immediate pre-index.
// Load/store register offset.
// Load/store exclusive.
// C,D: Load/store register pair offset.
// Load/store register pair pre-index.
// Load/store register unsigned immediate.
// Advanced SIMD.
case 0x8:
case 0x9:
case 0xC:
case 0xD:
DecodeLoadStore(instr);
break;
// E: FP fixed point conversion.
// FP integer conversion.
// FP data processing 1 source.
// FP compare.
// FP immediate.
// FP data processing 2 source.
// FP conditional compare.
// FP conditional select.
// Advanced SIMD.
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
case 0xF:
DecodeFP(instr);
break;
}
}
}
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.push_back(new_visitor);
}
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.push_front(new_visitor);
}
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
VIXL_ASSERT(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
it++;
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
VIXL_ASSERT(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
void Decoder::DecodePCRelAddressing(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
VIXL_ASSERT(instr->ExtractBit(28) == 0x1);
VisitPCRelAddressing(instr);
}
void Decoder::DecodeBranchSystemException(const Instruction* instr) {
VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x4) ||
(instr->ExtractBits(27, 24) == 0x5) ||
(instr->ExtractBits(27, 24) == 0x6) ||
(instr->ExtractBits(27, 24) == 0x7));
switch (instr->ExtractBits(31, 29)) {
case 0:
case 4: {
VisitUnconditionalBranch(instr);
break;
}
case 1:
case 5: {
if (instr->ExtractBit(25) == 0) {
VisitCompareBranch(instr);
} else {
VisitTestBranch(instr);
}
break;
}
case 2: {
if (instr->ExtractBit(25) == 0) {
if ((instr->ExtractBit(24) == 0x1) ||
(instr->Mask(0x01000010) == 0x00000010)) {
VisitUnallocated(instr);
} else {
VisitConditionalBranch(instr);
}
} else {
VisitUnallocated(instr);
}
break;
}
case 6: {
if (instr->ExtractBit(25) == 0) {
if (instr->ExtractBit(24) == 0) {
if ((instr->ExtractBits(4, 2) != 0) ||
(instr->Mask(0x00E0001D) == 0x00200001) ||
(instr->Mask(0x00E0001D) == 0x00400001) ||
(instr->Mask(0x00E0001E) == 0x00200002) ||
(instr->Mask(0x00E0001E) == 0x00400002) ||
(instr->Mask(0x00E0001C) == 0x00600000) ||
(instr->Mask(0x00E0001C) == 0x00800000) ||
(instr->Mask(0x00E0001F) == 0x00A00000) ||
(instr->Mask(0x00C0001C) == 0x00C00000)) {
VisitUnallocated(instr);
} else {
VisitException(instr);
}
} else {
if (instr->ExtractBits(23, 22) == 0) {
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
if ((instr->ExtractBits(21, 19) == 0x4) ||
(masked_003FF0E0 == 0x00033000) ||
(masked_003FF0E0 == 0x003FF020) ||
(masked_003FF0E0 == 0x003FF060) ||
(masked_003FF0E0 == 0x003FF0E0) ||
(instr->Mask(0x00388000) == 0x00008000) ||
(instr->Mask(0x0038E000) == 0x00000000) ||
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
VisitUnallocated(instr);
} else {
VisitSystem(instr);
}
} else {
VisitUnallocated(instr);
}
}
} else {
if (((instr->ExtractBit(24) == 0x1) &&
(instr->ExtractBits(23, 21) > 0x1)) ||
(instr->ExtractBits(20, 16) != 0x1F) ||
(instr->ExtractBits(15, 10) == 0x1) ||
(instr->ExtractBits(15, 10) > 0x3) ||
(instr->ExtractBits(24, 21) == 0x3) ||
(instr->ExtractBits(24, 22) == 0x3)) {
VisitUnallocated(instr);
} else {
VisitUnconditionalBranchToRegister(instr);
}
}
break;
}
case 3:
case 7: {
VisitUnallocated(instr);
break;
}
}
}
void Decoder::DecodeLoadStore(const Instruction* instr) {
VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x8) ||
(instr->ExtractBits(27, 24) == 0x9) ||
(instr->ExtractBits(27, 24) == 0xC) ||
(instr->ExtractBits(27, 24) == 0xD));
// TODO(all): rearrange the tree to integrate this branch.
if ((instr->ExtractBit(28) == 0) && (instr->ExtractBit(29) == 0) &&
(instr->ExtractBit(26) == 1)) {
DecodeNEONLoadStore(instr);
return;
}
if (instr->ExtractBit(24) == 0) {
if (instr->ExtractBit(28) == 0) {
if (instr->ExtractBit(29) == 0) {
if (instr->ExtractBit(26) == 0) {
VisitLoadStoreExclusive(instr);
} else {
VIXL_UNREACHABLE();
}
} else {
if ((instr->ExtractBits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(23) == 0) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePairNonTemporal(instr);
}
} else {
VisitLoadStorePairPostIndex(instr);
}
}
}
} else {
if (instr->ExtractBit(29) == 0) {
if (instr->Mask(0xC4000000) == 0xC4000000) {
VisitUnallocated(instr);
} else {
VisitLoadLiteral(instr);
}
} else {
if ((instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(21) == 0) {
switch (instr->ExtractBits(11, 10)) {
case 0: {
VisitLoadStoreUnscaledOffset(instr);
break;
}
case 1: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePostIndex(instr);
}
break;
}
case 2: {
// TODO: VisitLoadStoreRegisterOffsetUnpriv.
VisitUnimplemented(instr);
break;
}
case 3: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePreIndex(instr);
}
break;
}
}
} else {
if (instr->ExtractBits(11, 10) == 0x2) {
if (instr->ExtractBit(14) == 0) {
VisitUnallocated(instr);
} else {
VisitLoadStoreRegisterOffset(instr);
}
} else {
if (instr->ExtractBits(11, 10) == 0x0) {
if (instr->ExtractBit(25) == 0) {
if (instr->ExtractBit(26) == 0) {
if ((instr->ExtractBit(15) == 1) &&
((instr->ExtractBits(14, 12) == 0x1) ||
(instr->ExtractBit(13) == 1) ||
(instr->ExtractBits(14, 12) == 0x5) ||
((instr->ExtractBits(14, 12) == 0x4) &&
((instr->ExtractBit(23) == 0) ||
(instr->ExtractBits(23, 22) == 0x3))))) {
VisitUnallocated(instr);
} else {
VisitAtomicMemory(instr);
}
} else {
VisitUnallocated(instr);
}
} else {
VisitUnallocated(instr);
}
} else {
if (instr->ExtractBit(25) == 0) {
if (instr->ExtractBit(26) == 0) {
if (instr->ExtractBits(31, 30) == 0x3) {
VisitLoadStorePAC(instr);
} else {
VisitUnallocated(instr);
}
} else {
VisitUnallocated(instr);
}
} else {
VisitUnallocated(instr);
}
}
}
}
}
}
}
} else {
if (instr->ExtractBit(28) == 0) {
if (instr->ExtractBit(29) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->ExtractBits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(23) == 0) {
VisitLoadStorePairOffset(instr);
} else {
VisitLoadStorePairPreIndex(instr);
}
}
}
} else {
if (instr->ExtractBit(29) == 0) {
if ((instr->ExtractBit(26) == 0) && (instr->ExtractBit(21) == 0) &&
(instr->ExtractBits(11, 10) == 0x0) &&
((instr->ExtractBits(31, 30) < 0x2) ||
((instr->ExtractBits(31, 30) == 0x2) &&
(instr->ExtractBits(23, 22) != 0x3)) ||
((instr->ExtractBits(31, 30) == 0x3) &&
(instr->ExtractBits(23, 22) < 0x2)))) {
VisitLoadStoreRCpcUnscaledOffset(instr);
} else {
VisitUnallocated(instr);
}
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
VisitLoadStoreUnsignedOffset(instr);
}
}
}
}
}
void Decoder::DecodeLogical(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(23) == 0) {
VisitLogicalImmediate(instr);
} else {
if (instr->ExtractBits(30, 29) == 0x1) {
VisitUnallocated(instr);
} else {
VisitMoveWideImmediate(instr);
}
}
}
}
void Decoder::DecodeBitfieldExtract(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else if (instr->ExtractBit(23) == 0) {
if ((instr->Mask(0x80200000) == 0x00200000) ||
(instr->Mask(0x60000000) == 0x60000000)) {
VisitUnallocated(instr);
} else {
VisitBitfield(instr);
}
} else {
if ((instr->Mask(0x60200000) == 0x00200000) ||
(instr->Mask(0x60000000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitExtract(instr);
}
}
}
void Decoder::DecodeAddSubImmediate(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x1);
if (instr->ExtractBit(23) == 1) {
VisitUnallocated(instr);
} else {
VisitAddSubImmediate(instr);
}
}
void Decoder::DecodeDataProcessing(const Instruction* instr) {
VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xA) ||
(instr->ExtractBits(27, 24) == 0xB));
if (instr->ExtractBit(24) == 0) {
if (instr->ExtractBit(28) == 0) {
if (instr->Mask(0x80008000) == 0x00008000) {
VisitUnallocated(instr);
} else {
VisitLogicalShifted(instr);
}
} else {
switch (instr->ExtractBits(23, 21)) {
case 0: {
if (instr->ExtractBits(15, 10) != 0) {
if (instr->ExtractBits(14, 10) == 0x1) {
if (instr->Mask(0xE0000010) == 0xA0000000) {
VisitRotateRightIntoFlags(instr);
} else {
VisitUnallocated(instr);
}
} else {
if (instr->ExtractBits(13, 10) == 0x2) {
if (instr->Mask(0xE01F801F) == 0x2000000D) {
VisitEvaluateIntoFlags(instr);
} else {
VisitUnallocated(instr);
}
} else {
VisitUnallocated(instr);
}
}
} else {
VisitAddSubWithCarry(instr);
}
break;
}
case 2: {
if ((instr->ExtractBit(29) == 0) || (instr->Mask(0x00000410) != 0)) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(11) == 0) {
VisitConditionalCompareRegister(instr);
} else {
VisitConditionalCompareImmediate(instr);
}
}
break;
}
case 4: {
if (instr->Mask(0x20000800) != 0x00000000) {
VisitUnallocated(instr);
} else {
VisitConditionalSelect(instr);
}
break;
}
case 6: {
if (instr->ExtractBit(29) == 0x1) {
VisitUnallocated(instr);
VIXL_FALLTHROUGH();
} else {
if (instr->ExtractBit(30) == 0) {
if ((instr->ExtractBit(15) == 0x1) ||
(instr->ExtractBits(15, 11) == 0) ||
(instr->ExtractBits(15, 12) == 0x1) ||
((instr->ExtractBits(15, 12) == 0x3) &&
(instr->ExtractBit(31) == 0)) ||
(instr->ExtractBits(15, 13) == 0x3) ||
(instr->Mask(0x8000EC00) == 0x00004C00) ||
(instr->Mask(0x8000E800) == 0x80004000) ||
(instr->Mask(0x8000E400) == 0x80004000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing2Source(instr);
}
} else {
if ((instr->ExtractBits(20, 17) != 0) ||
(instr->ExtractBit(15) == 1) ||
((instr->ExtractBit(16) == 1) &&
((instr->ExtractBits(14, 10) > 17) ||
(instr->ExtractBit(31) == 0))) ||
((instr->ExtractBit(16) == 0) &&
((instr->ExtractBits(14, 13) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)))) {
VisitUnallocated(instr);
} else {
VisitDataProcessing1Source(instr);
}
}
break;
}
}
case 1:
case 3:
case 5:
case 7:
VisitUnallocated(instr);
break;
}
}
} else {
if (instr->ExtractBit(28) == 0) {
if (instr->ExtractBit(21) == 0) {
if ((instr->ExtractBits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else {
VisitAddSubShifted(instr);
}
} else {
if ((instr->Mask(0x00C00000) != 0x00000000) ||
(instr->Mask(0x00001400) == 0x00001400) ||
(instr->Mask(0x00001800) == 0x00001800)) {
VisitUnallocated(instr);
} else {
VisitAddSubExtended(instr);
}
}
} else {
if ((instr->ExtractBit(30) == 0x1) ||
(instr->ExtractBits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
(instr->Mask(0x60E00000) == 0x00E00000) ||
(instr->Mask(0x60E00000) == 0x00800000) ||
(instr->Mask(0x60E00000) == 0x00600000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing3Source(instr);
}
}
}
}
void Decoder::DecodeFP(const Instruction* instr) {
VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xE) ||
(instr->ExtractBits(27, 24) == 0xF));
if (instr->ExtractBit(28) == 0) {
DecodeNEONVectorDataProcessing(instr);
} else {
if (instr->ExtractBits(31, 30) == 0x3) {
VisitUnallocated(instr);
} else if (instr->ExtractBits(31, 30) == 0x1) {
DecodeNEONScalarDataProcessing(instr);
} else {
if (instr->ExtractBit(29) == 0) {
if (instr->ExtractBit(24) == 0) {
if (instr->ExtractBit(21) == 0) {
if ((instr->ExtractBits(23, 22) == 0x2) ||
(instr->ExtractBit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x00160000) == 0x00000000) ||
(instr->Mask(0x00160000) == 0x00120000)) {
VisitUnallocated(instr);
} else {
VisitFPFixedPointConvert(instr);
}
} else {
if (instr->ExtractBits(15, 10) == 32) {
VisitUnallocated(instr);
} else if (instr->ExtractBits(15, 10) == 0) {
if ((instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x000E0000) == 0x000C0000) ||
(instr->Mask(0x00160000) == 0x00120000) ||
(instr->Mask(0x00160000) == 0x00140000) ||
(instr->Mask(0x20C40000) == 0x00800000) ||
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
(instr->Mask(0xA0D60000) == 0x00160000) ||
(instr->Mask(0xA0D60000) == 0x80560000) ||
(instr->Mask(0xA0D60000) == 0x80960000)) {
VisitUnallocated(instr);
} else {
VisitFPIntegerConvert(instr);
}
} else if (instr->ExtractBits(14, 10) == 16) {
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
if ((instr->Mask(0x80180000) != 0) ||
(masked_A0DF8000 == 0x00020000) ||
(masked_A0DF8000 == 0x00030000) ||
(masked_A0DF8000 == 0x00068000) ||
(masked_A0DF8000 == 0x00428000) ||
(masked_A0DF8000 == 0x00430000) ||
(masked_A0DF8000 == 0x00468000) ||
(instr->Mask(0xA0D80000) == 0x00800000) ||
(instr->Mask(0xA0DF0000) == 0x00C30000) ||
(instr->Mask(0xA0DF8000) == 0x00C68000)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing1Source(instr);
}
} else if (instr->ExtractBits(13, 10) == 8) {
if ((instr->ExtractBits(15, 14) != 0) ||
(instr->ExtractBits(2, 0) != 0) ||
(instr->ExtractBit(31) == 1) ||
(instr->ExtractBits(23, 22) == 0x2)) {
VisitUnallocated(instr);
} else {
VisitFPCompare(instr);
}
} else if (instr->ExtractBits(12, 10) == 4) {
if ((instr->ExtractBits(9, 5) != 0) ||
// Valid enc: 01d, 00s, 11h.
(instr->ExtractBits(23, 22) == 0x2) ||
(instr->ExtractBit(31) == 1)) {
VisitUnallocated(instr);
} else {
VisitFPImmediate(instr);
}
} else {
if ((instr->ExtractBits(23, 22) == 0x2) ||
(instr->ExtractBit(31) == 1)) {
VisitUnallocated(instr);
} else {
switch (instr->ExtractBits(11, 10)) {
case 1: {
VisitFPConditionalCompare(instr);
break;
}
case 2: {
if (instr->ExtractBits(15, 12) > 0x8) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing2Source(instr);
}
break;
}
case 3: {
VisitFPConditionalSelect(instr);
break;
}
default:
VIXL_UNREACHABLE();
}
}
}
}
} else {
// Bit 30 == 1 has been handled earlier.
VIXL_ASSERT(instr->ExtractBit(30) == 0);
if ((instr->Mask(0xA0000000) != 0) ||
(instr->ExtractBits(23, 22) == 0x2)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing3Source(instr);
}
}
} else {
VisitUnallocated(instr);
}
}
}
}
void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(29, 25) == 0x6);
if (instr->ExtractBit(31) == 0) {
if ((instr->ExtractBit(24) == 0) && (instr->ExtractBit(21) == 1)) {
VisitUnallocated(instr);
return;
}
if (instr->ExtractBit(23) == 0) {
if (instr->ExtractBits(20, 16) == 0) {
if (instr->ExtractBit(24) == 0) {
VisitNEONLoadStoreMultiStruct(instr);
} else {
VisitNEONLoadStoreSingleStruct(instr);
}
} else {
VisitUnallocated(instr);
}
} else {
if (instr->ExtractBit(24) == 0) {
VisitNEONLoadStoreMultiStructPostIndex(instr);
} else {
VisitNEONLoadStoreSingleStructPostIndex(instr);
}
}
} else {
VisitUnallocated(instr);
}
}
void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(28, 25) == 0x7);
if (instr->ExtractBit(31) == 0) {
if (instr->ExtractBit(24) == 0) {
if (instr->ExtractBit(21) == 0) {
if (instr->ExtractBit(15) == 0) {
if (instr->ExtractBit(10) == 0) {
if (instr->ExtractBit(29) == 0) {
if (instr->ExtractBit(11) == 0) {
VisitNEONTable(instr);
} else {
VisitNEONPerm(instr);
}
} else {
VisitNEONExtract(instr);
}
} else {
if (instr->ExtractBits(23, 22) == 0) {
VisitNEONCopy(instr);
} else if (instr->ExtractBit(14) == 0x0 &&
instr->ExtractBit(22) == 0x1) {
// U + a + opcode.
uint8_t decode_field =
(instr->ExtractBit(29) << 1) | instr->ExtractBit(23);
decode_field = (decode_field << 3) | instr->ExtractBits(13, 11);
switch (decode_field) {
case 0x5:
case 0xB:
case 0xC:
case 0xD:
case 0x11:
case 0x19:
case 0x1B:
case 0x1F:
VisitUnallocated(instr);
break;
default:
VisitNEON3SameFP16(instr);
break;
}
} else {
VisitUnallocated(instr);
}
}
} else if (instr->ExtractBit(10) == 0) {
VisitUnallocated(instr);
} else if ((instr->ExtractBits(14, 11) == 0x3) ||
(instr->ExtractBits(14, 13) == 0x1)) {
// opcode = 0b0011
// opcode = 0b01xx
VisitUnallocated(instr);
} else if (instr->ExtractBit(29) == 0) {
// U == 0
if (instr->ExtractBits(14, 11) == 0x2) {
// opcode = 0b0010
VisitNEON3SameExtra(instr);
} else {
VisitUnallocated(instr);
}
} else {
// U == 1
if ((instr->ExtractBits(14, 11) == 0xd) ||
(instr->ExtractBits(14, 11) == 0xf)) {
// opcode = 0b11x1
VisitUnallocated(instr);
} else {
VisitNEON3SameExtra(instr);
}
}
} else {
if (instr->ExtractBit(10) == 0) {
if (instr->ExtractBit(11) == 0) {
VisitNEON3Different(instr);
} else {
if (instr->ExtractBits(18, 17) == 0) {
if (instr->ExtractBit(20) == 0) {
if (instr->ExtractBit(19) == 0) {
VisitNEON2RegMisc(instr);
} else {
if (instr->ExtractBits(30, 29) == 0x2) {
VisitCryptoAES(instr);
} else {
VisitUnallocated(instr);
}
}
} else {
if (instr->ExtractBit(19) == 0) {
VisitNEONAcrossLanes(instr);
} else {
if (instr->ExtractBit(22) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->ExtractBits(16, 15) == 0x0) ||
(instr->ExtractBits(16, 14) == 0x2) ||
(instr->ExtractBits(16, 15) == 0x2) ||
(instr->ExtractBits(16, 12) == 0x1e) ||
((instr->ExtractBit(23) == 0) &&
((instr->ExtractBits(16, 14) == 0x3) ||
(instr->ExtractBits(16, 12) == 0x1f))) ||
((instr->ExtractBit(23) == 1) &&
(instr->ExtractBits(16, 12) == 0x1c))) {
VisitUnallocated(instr);
} else {
VisitNEON2RegMiscFP16(instr);
}
}
}
}
} else {
VisitUnallocated(instr);
}
}
} else {
VisitNEON3Same(instr);
}
}
} else {
if (instr->ExtractBit(10) == 0) {
VisitNEONByIndexedElement(instr);
} else {
if (instr->ExtractBit(23) == 0) {
if (instr->ExtractBits(22, 19) == 0) {
VisitNEONModifiedImmediate(instr);
} else {
VisitNEONShiftImmediate(instr);
}
} else {
VisitUnallocated(instr);
}
}
}
} else {
VisitUnallocated(instr);
}
}
void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
VIXL_ASSERT(instr->ExtractBits(28, 25) == 0xF);
if (instr->ExtractBit(24) == 0) {
if (instr->ExtractBit(21) == 0) {
if (instr->ExtractBit(15) == 0) {
if (instr->ExtractBit(10) == 0) {
if (instr->ExtractBit(29) == 0) {
if (instr->ExtractBit(11) == 0) {
VisitCrypto3RegSHA(instr);
} else {
VisitUnallocated(instr);
}
} else {
VisitUnallocated(instr);
}
} else {
if (instr->ExtractBits(23, 22) == 0) {
VisitNEONScalarCopy(instr);
} else {
if (instr->Mask(0x00404000) == 0x00400000) {
if ((instr->ExtractBits(13, 11) == 0x6) ||
(instr->ExtractBits(13, 11) < 2) ||
((instr->Mask(0x20800000) == 0x00000000) &&
((instr->ExtractBits(13, 11) < 0x3) ||
(instr->ExtractBits(13, 11) == 0x5))) ||
((instr->Mask(0x20800000) == 0x00800000) &&
(instr->ExtractBits(13, 11) < 0x7)) ||
((instr->Mask(0x20800000) == 0x20000000) &&
((instr->ExtractBits(13, 11) < 0x4) ||
(instr->ExtractBits(13, 11) == 0x7))) ||
((instr->Mask(0x20800000) == 0x20800000) &&
(instr->ExtractBits(12, 11) == 0x3))) {
VisitUnallocated(instr);
} else {
VisitNEONScalar3SameFP16(instr);
}
} else {
VisitUnallocated(instr);
}
}
}
} else {
if (instr->ExtractBit(29) == 0) {
VisitUnallocated(instr);
} else {
if (instr->ExtractBit(10) == 0) {
VisitUnallocated(instr);
} else {
VisitNEONScalar3SameExtra(instr);
}
}
}
} else {
if (instr->ExtractBit(10) == 0) {
if (instr->ExtractBit(11) == 0) {
VisitNEONScalar3Diff(instr);
} else {
if (instr->ExtractBits(18, 17) == 0) {
if (instr->ExtractBit(20) == 0) {
if (instr->ExtractBit(19) == 0) {
VisitNEONScalar2RegMisc(instr);
} else {
if (instr->ExtractBit(29) == 0) {
VisitCrypto2RegSHA(instr);
} else {
VisitUnallocated(instr);
}
}
} else {
if (instr->ExtractBit(19) == 0) {
VisitNEONScalarPairwise(instr);
} else {
if (instr->ExtractBit(22) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->ExtractBits(16, 15) == 0x0) ||
(instr->ExtractBits(16, 14) == 0x2) ||
(instr->ExtractBits(16, 15) == 0x2) ||
(instr->ExtractBits(16, 13) == 0xc) ||
(instr->ExtractBits(16, 12) == 0x1e) ||
((instr->ExtractBit(23) == 0) &&
((instr->ExtractBits(16, 14) == 0x3) ||
(instr->ExtractBits(16, 12) == 0x1f))) ||
((instr->ExtractBit(23) == 1) &&
((instr->ExtractBits(16, 12) == 0xf) ||
(instr->ExtractBits(16, 12) == 0x1c) ||
((instr->ExtractBit(29) == 1) &&
((instr->ExtractBits(16, 12) == 0xe) ||
(instr->ExtractBits(16, 12) == 0x1f)))))) {
VisitUnallocated(instr);
} else {
VisitNEONScalar2RegMiscFP16(instr);
}
}
}
}
} else {
VisitUnallocated(instr);
}
}
} else {
VisitNEONScalar3Same(instr);
}
}
} else {
if (instr->ExtractBit(10) == 0) {
VisitNEONScalarByIndexedElement(instr);
} else {
if (instr->ExtractBit(23) == 0) {
VisitNEONScalarShiftImmediate(instr);
} else {
VisitUnallocated(instr);
}
}
}
}
#define DEFINE_VISITOR_CALLERS(A) \
void Decoder::Visit##A(const Instruction* instr) { \
VIXL_ASSERT(((A##FMask == 0) && (A##Fixed == 0)) || \
(instr->Mask(A##FMask) == A##Fixed)); \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
(*it)->Visit##A(instr); \
} \
}
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} // namespace aarch64
} // namespace vixl
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DECODER_AARCH64_H_
#define VIXL_AARCH64_DECODER_AARCH64_H_
#include <list>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST_THAT_RETURN(V) \
V(AddSubExtended) \
V(AddSubImmediate) \
V(AddSubShifted) \
V(AddSubWithCarry) \
V(AtomicMemory) \
V(Bitfield) \
V(CompareBranch) \
V(ConditionalBranch) \
V(ConditionalCompareImmediate) \
V(ConditionalCompareRegister) \
V(ConditionalSelect) \
V(Crypto2RegSHA) \
V(Crypto3RegSHA) \
V(CryptoAES) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(Exception) \
V(Extract) \
V(EvaluateIntoFlags) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPFixedPointConvert) \
V(FPImmediate) \
V(FPIntegerConvert) \
V(LoadLiteral) \
V(LoadStoreExclusive) \
V(LoadStorePAC) \
V(LoadStorePairNonTemporal) \
V(LoadStorePairOffset) \
V(LoadStorePairPostIndex) \
V(LoadStorePairPreIndex) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRCpcUnscaledOffset) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnscaledOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalImmediate) \
V(LogicalShifted) \
V(MoveWideImmediate) \
V(NEON2RegMisc) \
V(NEON2RegMiscFP16) \
V(NEON3Different) \
V(NEON3Same) \
V(NEON3SameExtra) \
V(NEON3SameFP16) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONPerm) \
V(NEONScalar2RegMisc) \
V(NEONScalar2RegMiscFP16) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalar3SameExtra) \
V(NEONScalar3SameFP16) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(PCRelAddressing) \
V(RotateRightIntoFlags) \
V(System) \
V(TestBranch) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister)
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
V(Unallocated) \
V(Unimplemented)
#define VISITOR_LIST(V) \
VISITOR_LIST_THAT_RETURN(V) \
VISITOR_LIST_THAT_DONT_RETURN(V)
namespace vixl {
namespace aarch64 {
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
enum VisitorConstness { kConstVisitor, kNonConstVisitor };
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
: constness_(constness) {}
virtual ~DecoderVisitor() {}
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
Instruction* MutableInstruction(const Instruction* instr) {
VIXL_ASSERT(!IsConstVisitor());
return const_cast<Instruction*>(instr);
}
private:
const VisitorConstness constness_;
};
class Decoder {
public:
Decoder() {}
// Top-level wrappers around the actual decoding function.
void Decode(const Instruction* instr) {
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
VIXL_ASSERT((*it)->IsConstVisitor());
}
DecodeInstruction(instr);
}
void Decode(Instruction* instr) {
DecodeInstruction(const_cast<const Instruction*>(instr));
}
// Decode all instructions from start (inclusive) to end (exclusive).
template <typename T>
void Decode(T start, T end) {
for (T instr = start; instr < end; instr = instr->GetNextInstruction()) {
Decode(instr);
}
}
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in order.
// A visitor can be registered multiple times.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2);
// d.AppendVisitor(V3);
//
// d.Decode(i);
//
// will call in order visitor methods in V2, V1, V2, V3.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
// These helpers register `new_visitor` before or after the first instance of
// `registered_visiter` in the list.
// So if
// V1, V2, V1, V2
// are registered in this order in the decoder, calls to
// d.InsertVisitorAfter(V3, V1);
// d.InsertVisitorBefore(V4, V2);
// will yield the order
// V1, V3, V4, V2, V1, V2
//
// For more complex modifications of the order of registered visitors, one can
// directly access and modify the list of visitors via the `visitors()'
// accessor.
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove all instances of a previously registered visitor class from the list
// of visitors stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
private:
// Decodes an instruction and calls the visitor functions registered with the
// Decoder class.
void DecodeInstruction(const Instruction* instr);
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(const Instruction* instr);
// Decode the add/subtract immediate instruction, and call the correspoding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(const Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(const Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(const Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(const Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(const Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(const Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(const Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeNEONLoadStore(const Instruction* instr);
// Decode the Advanced SIMD (NEON) vector data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0x7.
void DecodeNEONVectorDataProcessing(const Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(const Instruction* instr);
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DECODER_AARCH64_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DISASM_AARCH64_H
#define VIXL_AARCH64_DISASM_AARCH64_H
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "cpu-features-auditor-aarch64.h"
#include "decoder-aarch64.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
class Disassembler : public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(const Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
// TODO: This currently doesn't allow renaming of V registers.
virtual void AppendRegisterNameToOutput(const Instruction* instr,
const CPURegister& reg);
// Prints a PC-relative offset. This is used for example when disassembling
// branches to immediate offsets.
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
int64_t offset);
// Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction.
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some code.
// This is used for example to print the target address of a branch to an
// immediate offset.
// A sub-class can for example override this method to lookup the address and
// print an appropriate name.
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some data.
// This is used for example to print the source address of a load literal
// instruction.
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
const void* addr);
// Same as the above, but for addresses that are not relative to the code
// buffer. They are currently not used by VIXL.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr);
public:
// Get/Set the offset that should be added to code addresses when printing
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
// helpers.
// Below is an example of how a branch immediate instruction in memory at
// address 0xb010200 would disassemble with different offsets.
// Base address | Disassembly
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
int64_t CodeRelativeAddress(const void* instr);
private:
void Format(const Instruction* instr,
const char* mnemonic,
const char* format);
void Substitute(const Instruction* instr, const char* string);
int SubstituteField(const Instruction* instr, const char* format);
int SubstituteRegisterField(const Instruction* instr, const char* format);
int SubstituteImmediateField(const Instruction* instr, const char* format);
int SubstituteLiteralField(const Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(const Instruction* instr,
const char* format);
int SubstituteShiftField(const Instruction* instr, const char* format);
int SubstituteExtendField(const Instruction* instr, const char* format);
int SubstituteConditionField(const Instruction* instr, const char* format);
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format);
int SubstituteSysOpField(const Instruction* instr, const char* format);
int SubstituteCrField(const Instruction* instr, const char* format);
bool RdIsZROrSP(const Instruction* instr) const {
return (instr->GetRd() == kZeroRegCode);
}
bool RnIsZROrSP(const Instruction* instr) const {
return (instr->GetRn() == kZeroRegCode);
}
bool RmIsZROrSP(const Instruction* instr) const {
return (instr->GetRm() == kZeroRegCode);
}
bool RaIsZROrSP(const Instruction* instr) const {
return (instr->GetRa() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
int64_t code_address_offset() const { return code_address_offset_; }
protected:
void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
void set_code_address_offset(int64_t code_address_offset) {
code_address_offset_ = code_address_offset;
}
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
int64_t code_address_offset_;
};
class PrintDisassembler : public Disassembler {
public:
explicit PrintDisassembler(FILE* stream)
: cpu_features_auditor_(NULL),
cpu_features_prefix_("// Needs: "),
cpu_features_suffix_(""),
stream_(stream) {}
// Convenience helpers for quick disassembly, without having to manually
// create a decoder.
void DisassembleBuffer(const Instruction* start, uint64_t size);
void DisassembleBuffer(const Instruction* start, const Instruction* end);
void Disassemble(const Instruction* instr);
// If a CPUFeaturesAuditor is specified, it will be used to annotate
// disassembly. The CPUFeaturesAuditor is expected to visit the instructions
// _before_ the disassembler, such that the CPUFeatures information is
// available when the disassembler is called.
void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) {
cpu_features_auditor_ = auditor;
}
// Set the prefix to appear before the CPU features annotations.
void SetCPUFeaturesPrefix(const char* prefix) {
VIXL_ASSERT(prefix != NULL);
cpu_features_prefix_ = prefix;
}
// Set the suffix to appear after the CPU features annotations.
void SetCPUFeaturesSuffix(const char* suffix) {
VIXL_ASSERT(suffix != NULL);
cpu_features_suffix_ = suffix;
}
protected:
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE;
CPUFeaturesAuditor* cpu_features_auditor_;
const char* cpu_features_prefix_;
const char* cpu_features_suffix_;
private:
FILE* stream_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DISASM_AARCH64_H
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "instructions-aarch64.h"
#include "assembler-aarch64.h"
namespace vixl {
namespace aarch64 {
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
uint64_t result = value & ((UINT64_C(1) << width) - 1);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_b:
case LDR_h:
case LDR_s:
case LDR_d:
case LDR_q:
return true;
default:
return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_b:
case STR_h:
case STR_s:
case STR_d:
case STR_q:
return true;
default:
return false;
}
}
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are
// not met.
uint64_t Instruction::GetImmLogical() const {
unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
int32_t n = GetBitN();
int32_t imm_s = GetImmSetBits();
int32_t imm_r = GetImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//
if (n == 1) {
if (imm_s == 0x3f) {
return 0;
}
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1f) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
VIXL_UNREACHABLE();
return 0;
}
uint32_t Instruction::GetImmNEONabcdefgh() const {
return GetImmNEONabc() << 5 | GetImmNEONdefgh();
}
Float16 Instruction::Imm8ToFloat16(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Half: aBbb.cdef.gh00.0000 (16 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint16_t bit7 = (bits >> 7) & 0x1;
uint16_t bit6 = (bits >> 6) & 0x1;
uint16_t bit5_to_0 = bits & 0x3f;
uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6);
return RawbitsToFloat16(result);
}
float Instruction::Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return RawbitsToFloat(result);
}
Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); }
float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
double Instruction::Imm8ToFP64(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return RawbitsToDouble(result);
}
double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
Float16 Instruction::GetImmNEONFP16() const {
return Imm8ToFloat16(GetImmNEONabcdefgh());
}
float Instruction::GetImmNEONFP32() const {
return Imm8ToFP32(GetImmNEONabcdefgh());
}
double Instruction::GetImmNEONFP64() const {
return Imm8ToFP64(GetImmNEONabcdefgh());
}
unsigned CalcLSDataSize(LoadStoreOp op) {
VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeInBytesLog2;
}
}
return size;
}
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
switch (op) {
case STP_q:
case LDP_q:
return kQRegSizeInBytesLog2;
case STP_x:
case LDP_x:
case STP_d:
case LDP_d:
return kXRegSizeInBytesLog2;
default:
return kWRegSizeInBytesLog2;
}
}
int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
switch (branch_type) {
case UncondBranchType:
return ImmUncondBranch_width;
case CondBranchType:
return ImmCondBranch_width;
case CompareBranchType:
return ImmCmpBranch_width;
case TestBranchType:
return ImmTestBranch_width;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
return encoded_max * kInstructionSize;
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int64_t offset) {
return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
}
const Instruction* Instruction::GetImmPCOffsetTarget() const {
const Instruction* base = this;
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// ADR and ADRP.
offset = GetImmPCRel();
if (Mask(PCRelAddressingMask) == ADRP) {
base = AlignDown(base, kPageSize);
offset *= kPageSize;
} else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
}
} else {
// All PC-relative branches.
VIXL_ASSERT(GetBranchType() != UnknownBranchType);
// Relative branch offsets are instruction-size-aligned.
offset = GetImmBranch() * static_cast<int>(kInstructionSize);
}
return base + offset;
}
int Instruction::GetImmBranch() const {
switch (GetBranchType()) {
case CondBranchType:
return GetImmCondBranch();
case UncondBranchType:
return GetImmUncondBranch();
case CompareBranchType:
return GetImmCmpBranch();
case TestBranchType:
return GetImmTestBranch();
default:
VIXL_UNREACHABLE();
}
return 0;
}
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else {
SetBranchImmTarget(target);
}
}
void Instruction::SetPCRelImmTarget(const Instruction* target) {
ptrdiff_t imm21;
if ((Mask(PCRelAddressingMask) == ADR)) {
imm21 = target - this;
} else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
imm21 = target_page - this_page;
}
Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}
void Instruction::SetBranchImmTarget(const Instruction* target) {
VIXL_ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
switch (GetBranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default:
VIXL_UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(const Instruction* source) {
VIXL_ASSERT(IsWordAligned(source));
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
}
VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
switch (vform) {
case kFormat8H:
return kFormat8B;
case kFormat4S:
return kFormat4H;
case kFormat2D:
return kFormat2S;
case kFormatH:
return kFormatB;
case kFormatS:
return kFormatH;
case kFormatD:
return kFormatS;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
switch (vform) {
case kFormat8B:
return kFormat8H;
case kFormat4H:
return kFormat4S;
case kFormat2S:
return kFormat2D;
case kFormatB:
return kFormatH;
case kFormatH:
return kFormatS;
case kFormatS:
return kFormatD;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatFillQ(VectorFormat vform) {
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return kFormat16B;
case kFormatH:
case kFormat4H:
case kFormat8H:
return kFormat8H;
case kFormatS:
case kFormat2S:
case kFormat4S:
return kFormat4S;
case kFormatD:
case kFormat1D:
case kFormat2D:
return kFormat2D;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
switch (vform) {
case kFormat4H:
return kFormat8B;
case kFormat8H:
return kFormat16B;
case kFormat2S:
return kFormat4H;
case kFormat4S:
return kFormat8H;
case kFormat1D:
return kFormat2S;
case kFormat2D:
return kFormat4S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
switch (vform) {
case kFormat8B:
return kFormat16B;
case kFormat4H:
return kFormat8H;
case kFormat2S:
return kFormat4S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
switch (vform) {
case kFormat16B:
return kFormat8B;
case kFormat8H:
return kFormat4H;
case kFormat4S:
return kFormat2S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
switch (laneSize) {
case 8:
return kFormatB;
case 16:
return kFormatH;
case 32:
return kFormatS;
case 64:
return kFormatD;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
}
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
return kBRegSize;
case kFormatH:
return kHRegSize;
case kFormatS:
case kFormat2H:
return kSRegSize;
case kFormatD:
return kDRegSize;
case kFormat8B:
case kFormat4H:
case kFormat2S:
case kFormat1D:
return kDRegSize;
default:
return kQRegSize;
}
}
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
return RegisterSizeInBitsFromFormat(vform) / 8;
}
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 8;
case kFormatH:
case kFormat2H:
case kFormat4H:
case kFormat8H:
return 16;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 32;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 64;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int LaneSizeInBytesFromFormat(VectorFormat vform) {
return LaneSizeInBitsFromFormat(vform) / 8;
}
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 0;
case kFormatH:
case kFormat2H:
case kFormat4H:
case kFormat8H:
return 1;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 2;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 3;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int LaneCountFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormat16B:
return 16;
case kFormat8B:
case kFormat8H:
return 8;
case kFormat4H:
case kFormat4S:
return 4;
case kFormat2H:
case kFormat2S:
case kFormat2D:
return 2;
case kFormat1D:
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return 1;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int MaxLaneCountFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 16;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 8;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 4;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 2;
default:
VIXL_UNREACHABLE();
return 0;
}
}
// Does 'vform' indicate a vector format or a scalar format?
bool IsVectorFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return false;
default:
return true;
}
}
int64_t MaxIntFromFormat(VectorFormat vform) {
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
int64_t MinIntFromFormat(VectorFormat vform) {
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
}
uint64_t MaxUintFromFormat(VectorFormat vform) {
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
} // namespace aarch64
} // namespace vixl
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
namespace vixl {
namespace aarch64 {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
const unsigned kLiteralEntrySize = 4;
const unsigned kLiteralEntrySizeLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
// This is the nominal page size (as used by the adrp instruction); the actual
// size of the memory pages allocated by the kernel is likely to differ.
const unsigned kPageSize = 4 * KBytes;
const unsigned kPageSizeLog2 = 12;
const unsigned kBRegSize = 8;
const unsigned kBRegSizeLog2 = 3;
const unsigned kBRegSizeInBytes = kBRegSize / 8;
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
const unsigned kHRegSize = 16;
const unsigned kHRegSizeLog2 = 4;
const unsigned kHRegSizeInBytes = kHRegSize / 8;
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize / 8;
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
const unsigned kXRegSize = 64;
const unsigned kXRegSizeLog2 = 6;
const unsigned kXRegSizeInBytes = kXRegSize / 8;
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
const unsigned kSRegSize = 32;
const unsigned kSRegSizeLog2 = 5;
const unsigned kSRegSizeInBytes = kSRegSize / 8;
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
const unsigned kDRegSize = 64;
const unsigned kDRegSizeLog2 = 6;
const unsigned kDRegSizeInBytes = kDRegSize / 8;
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
const unsigned kQRegSize = 128;
const unsigned kQRegSizeLog2 = 7;
const unsigned kQRegSizeInBytes = kQRegSize / 8;
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
const uint64_t kWRegMask = UINT64_C(0xffffffff);
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kHRegMask = UINT64_C(0xffff);
const uint64_t kSRegMask = UINT64_C(0xffffffff);
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kSSignMask = UINT64_C(0x80000000);
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
const uint64_t kWSignMask = UINT64_C(0x80000000);
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
const uint64_t kByteMask = UINT64_C(0xff);
const uint64_t kHalfWordMask = UINT64_C(0xffff);
const uint64_t kWordMask = UINT64_C(0xffffffff);
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
const uint64_t kHMaxUInt = UINT64_C(0xffff);
// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
const int64_t kXMinInt = -kXMaxInt - 1;
const int32_t kWMaxInt = INT32_C(0x7fffffff);
const int32_t kWMinInt = -kWMaxInt - 1;
const int16_t kHMaxInt = INT16_C(0x7fff);
const int16_t kHMinInt = -kHMaxInt - 1;
const unsigned kFpRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kSpRegCode = 31;
const unsigned kZeroRegCode = 31;
const unsigned kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kAtomicAccessGranule = 16;
const unsigned kAddressTagOffset = 56;
const unsigned kAddressTagWidth = 8;
const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
<< kAddressTagOffset;
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
const uint64_t kTTBRMask = UINT64_C(1) << 55;
// Make these moved float constants backwards compatible
// with explicit vixl::aarch64:: namespace references.
using vixl::kDoubleMantissaBits;
using vixl::kDoubleExponentBits;
using vixl::kFloatMantissaBits;
using vixl::kFloatExponentBits;
using vixl::kFloat16MantissaBits;
using vixl::kFloat16ExponentBits;
using vixl::kFP16PositiveInfinity;
using vixl::kFP16NegativeInfinity;
using vixl::kFP32PositiveInfinity;
using vixl::kFP32NegativeInfinity;
using vixl::kFP64PositiveInfinity;
using vixl::kFP64NegativeInfinity;
using vixl::kFP16DefaultNaN;
using vixl::kFP32DefaultNaN;
using vixl::kFP64DefaultNaN;
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode { Offset, PreIndex, PostIndex };
enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
Instr GetInstructionBits() const {
return *(reinterpret_cast<const Instr*>(this));
}
VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
return GetInstructionBits();
}
void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr;
}
int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
return ExtractBit(pos);
}
uint32_t ExtractBits(int msb, int lsb) const {
return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
}
VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
return ExtractBits(msb, lsb);
}
int32_t ExtractSignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return ExtractSignedBitfield32(msb, lsb, bits);
}
VIXL_DEPRECATED("ExtractSignedBits",
int32_t SignedBits(int msb, int lsb) const) {
return ExtractSignedBits(msb, lsb);
}
Instr Mask(uint32_t mask) const {
VIXL_ASSERT(mask != 0);
return GetInstructionBits() & mask;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int GetImmPCRel() const {
uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
uint32_t lo = GetImmPCRelLo();
uint32_t offset = (hi << ImmPCRelLo_width) | lo;
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return ExtractSignedBitfield32(width - 1, 0, offset);
}
VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
// ImmLSPAC is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmLSPACLo and ImmLSPACHi.
int GetImmLSPAC() const {
uint32_t hi = static_cast<uint32_t>(GetImmLSPACHi());
uint32_t lo = GetImmLSPACLo();
uint32_t offset = (hi << ImmLSPACLo_width) | lo;
int width = ImmLSPACLo_width + ImmLSPACHi_width;
return ExtractSignedBitfield32(width - 1, 0, offset) << 3;
}
uint64_t GetImmLogical() const;
VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
return GetImmLogical();
}
unsigned GetImmNEONabcdefgh() const;
VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
return GetImmNEONabcdefgh();
}
Float16 GetImmFP16() const;
float GetImmFP32() const;
VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
double GetImmFP64() const;
VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
Float16 GetImmNEONFP16() const;
float GetImmNEONFP32() const;
VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
return GetImmNEONFP32();
}
double GetImmNEONFP64() const;
VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
return GetImmNEONFP64();
}
unsigned GetSizeLS() const {
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
}
VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
unsigned GetSizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
return GetSizeLSPair();
}
int GetNEONLSIndex(int access_size_shift) const {
int64_t q = GetNEONQ();
int64_t s = GetNEONS();
int64_t size = GetNEONLSSize();
int64_t index = (q << 3) | (s << 2) | size;
return static_cast<int>(index >> access_size_shift);
}
VIXL_DEPRECATED("GetNEONLSIndex",
int NEONLSIndex(int access_size_shift) const) {
return GetNEONLSIndex(access_size_shift);
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
bool IsLoad() const;
bool IsStore() const;
bool IsLoadLiteral() const {
// This includes PRFM_lit.
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; }
bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; }
bool IsBti() const {
if (Mask(SystemHintFMask) == SystemHintFixed) {
int imm_hint = GetImmHint();
switch (imm_hint) {
case BTI:
case BTI_c:
case BTI_j:
case BTI_jc:
return true;
}
}
return false;
}
static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchRangeBitwidth",
static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
return GetImmBranchRangeBitwidth(branch_type);
}
static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchForwardRange",
static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
return GetImmBranchForwardRange(branch_type);
}
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode GetRdMode() const {
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode GetRnMode() const {
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
ImmBranchType GetBranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
return GetBranchType();
}
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
const Instruction* GetImmPCOffsetTarget() const;
VIXL_DEPRECATED("GetImmPCOffsetTarget",
const Instruction* ImmPCOffsetTarget() const) {
return GetImmPCOffsetTarget();
}
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(const Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source);
// The range of a load literal instruction, expressed as 'instr +- range'.
// The range is actually the 'positive' range; the branch instruction can
// target [instr - range - kInstructionSize, instr + range].
static const int kLoadLiteralImmBitwidth = 19;
static const int kLoadLiteralRange =
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
// Calculate the address of a literal referred to by a load-literal
// instruction, and return it as the specified type.
//
// The literal itself is safely mutable only if the backing buffer is safely
// mutable.
template <typename T>
T GetLiteralAddress() const {
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
uint64_t address_raw = base_raw + offset;
// Cast the address using a C-style cast. A reinterpret_cast would be
// appropriate, but it can't cast one integral type to another.
T address = (T)(address_raw);
// Assert that the address can be represented by the specified type.
VIXL_ASSERT((uint64_t)(address) == address_raw);
return address;
}
template <typename T>
VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
return GetLiteralAddress<T>();
}
uint32_t GetLiteral32() const {
uint32_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
return GetLiteral32();
}
uint64_t GetLiteral64() const {
uint64_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
return GetLiteral64();
}
float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
return GetLiteralFP32();
}
double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
return GetLiteralFP64();
}
Instruction* GetNextInstruction() { return this + kInstructionSize; }
const Instruction* GetNextInstruction() const {
return this + kInstructionSize;
}
VIXL_DEPRECATED("GetNextInstruction",
const Instruction* NextInstruction() const) {
return GetNextInstruction();
}
const Instruction* GetInstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
VIXL_DEPRECATED("GetInstructionAtOffset",
const Instruction* InstructionAtOffset(int64_t offset)
const) {
return GetInstructionAtOffset(offset);
}
template <typename T>
static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
template <typename T>
static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src);
}
private:
int GetImmBranch() const;
static Float16 Imm8ToFloat16(uint32_t imm8);
static float Imm8ToFP32(uint32_t imm8);
static double Imm8ToFP64(uint32_t imm8);
void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target);
};
// Functions for handling NEON vector format information.
enum VectorFormat {
kFormatUndefined = 0xffffffff,
kFormat8B = NEON_8B,
kFormat16B = NEON_16B,
kFormat4H = NEON_4H,
kFormat8H = NEON_8H,
kFormat2S = NEON_2S,
kFormat4S = NEON_4S,
kFormat1D = NEON_1D,
kFormat2D = NEON_2D,
// Scalar formats. We add the scalar bit to distinguish between scalar and
// vector enumerations; the bit is always set in the encoding of scalar ops
// and always clear for vector ops. Although kFormatD and kFormat1D appear
// to be the same, their meaning is subtly different. The first is a scalar
// operation, the second a vector operation that only affects one lane.
kFormatB = NEON_B | NEONScalar,
kFormatH = NEON_H | NEONScalar,
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar,
// An artificial value, used by simulator trace tests and a few oddball
// instructions (such as FMLAL).
kFormat2H = 0xfffffffe
};
const int kMaxLanesPerVector = 16;
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
VectorFormat ScalarFormatFromLaneSize(int lanesize);
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatFillQ(VectorFormat vform);
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
// TODO: Make the return types of these functions consistent.
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
int LaneSizeInBytesFromFormat(VectorFormat vform);
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
int LaneCountFromFormat(VectorFormat vform);
int MaxLaneCountFromFormat(VectorFormat vform);
bool IsVectorFormat(VectorFormat vform);
int64_t MaxIntFromFormat(VectorFormat vform);
int64_t MinIntFromFormat(VectorFormat vform);
uint64_t MaxUintFromFormat(VectorFormat vform);
// clang-format off
enum NEONFormat {
NF_UNDEF = 0,
NF_8B = 1,
NF_16B = 2,
NF_4H = 3,
NF_8H = 4,
NF_2S = 5,
NF_4S = 6,
NF_1D = 7,
NF_2D = 8,
NF_B = 9,
NF_H = 10,
NF_S = 11,
NF_D = 12
};
// clang-format on
static const unsigned kNEONFormatMaxBits = 6;
struct NEONFormatMap {
// The bit positions in the instruction to consider.
uint8_t bits[kNEONFormatMaxBits];
// Mapping from concatenated bits to format.
NEONFormat map[1 << kNEONFormatMaxBits];
};
class NEONFormatDecoder {
public:
enum SubstitutionMode { kPlaceholder, kFormat };
// Construct a format decoder with increasingly specific format maps for each
// subsitution. If no format map is specified, the default is the integer
// format map.
explicit NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(IntegerFormatMap());
}
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1, format2);
}
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL) {
VIXL_ASSERT(format0 != NULL);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
}
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
VIXL_ASSERT(index <= ArrayLength(formats_));
VIXL_ASSERT(format != NULL);
formats_[index] = format;
}
// Substitute %s in the input string with the placeholder string for each
// register, ie. "'B", "'H", etc.
const char* SubstitutePlaceholders(const char* string) {
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}
// Substitute %s in the input string with a new string based on the
// substitution mode.
const char* Substitute(const char* string,
SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
SubstitutionMode mode2 = kFormat) {
snprintf(form_buffer_,
sizeof(form_buffer_),
string,
GetSubstitute(0, mode0),
GetSubstitute(1, mode1),
GetSubstitute(2, mode2));
return form_buffer_;
}
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic) {
if ((instrbits_ & NEON_Q) != 0) {
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
return mne_buffer_;
}
return mnemonic;
}
VectorFormat GetVectorFormat(int format_index = 0) {
return GetVectorFormat(formats_[format_index]);
}
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
static const VectorFormat vform[] = {kFormatUndefined,
kFormat8B,
kFormat16B,
kFormat4H,
kFormat8H,
kFormat2S,
kFormat4S,
kFormat1D,
kFormat2D,
kFormatB,
kFormatH,
kFormatS,
kFormatD};
VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
return vform[GetNEONFormat(format_map)];
}
// Built in mappings for common cases.
// The integer format map uses three bits (Q, size<1:0>) to encode the
// "standard" set of NEON integer vector formats.
static const NEONFormatMap* IntegerFormatMap() {
static const NEONFormatMap map =
{{23, 22, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The long integer format map uses two bits (size<1:0>) to encode the
// long set of NEON integer vector formats. These are used in narrow, wide
// and long operations.
static const NEONFormatMap* LongIntegerFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
return &map;
}
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
// formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap* FPFormatMap() {
// The FP format map assumes two bits (Q, size<0>) are used to encode the
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap map = {{22, 30},
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The FP16 format map uses one bit (Q) to encode the NEON vector format:
// NF_4H, NF_8H.
static const NEONFormatMap* FP16FormatMap() {
static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
return &map;
}
// The load/store format map uses three bits (Q, 11, 10) to encode the
// set of NEON vector formats.
static const NEONFormatMap* LoadStoreFormatMap() {
static const NEONFormatMap map =
{{11, 10, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
return &map;
}
// The logical format map uses one bit (Q) to encode the NEON vector format:
// NF_8B, NF_16B.
static const NEONFormatMap* LogicalFormatMap() {
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
return &map;
}
// The triangular format map uses between two and five bits to encode the NEON
// vector format:
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
static const NEONFormatMap* TriangularFormatMap() {
static const NEONFormatMap map =
{{19, 18, 17, 16, 30},
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
return &map;
}
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
// formats: NF_B, NF_H, NF_S, NF_D.
static const NEONFormatMap* ScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
return &map;
}
// The long scalar format map uses two bits (size<1:0>) to encode the longer
// NEON scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* LongScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
return &map;
}
// The FP scalar format map assumes one bit (size<0>) is used to encode the
// NEON FP scalar formats: NF_S, NF_D.
static const NEONFormatMap* FPScalarFormatMap() {
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
return &map;
}
// The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
// encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* FPScalarPairwiseFormatMap() {
static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
return &map;
}
// The triangular scalar format map uses between one and four bits to encode
// the NEON FP scalar formats:
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
static const NEONFormatMap* TriangularScalarFormatMap() {
static const NEONFormatMap map = {{19, 18, 17, 16},
{NF_UNDEF,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B,
NF_D,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B}};
return &map;
}
private:
// Get a pointer to a string that represents the format or placeholder for
// the specified substitution index, based on the format map and instruction.
const char* GetSubstitute(int index, SubstitutionMode mode) {
if (mode == kFormat) {
return NEONFormatAsString(GetNEONFormat(formats_[index]));
}
VIXL_ASSERT(mode == kPlaceholder);
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}
// Get the NEONFormat enumerated value for bits obtained from the
// instruction based on the specified format mapping.
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
return format_map->map[PickBits(format_map->bits)];
}
// Convert a NEONFormat into a string.
static const char* NEONFormatAsString(NEONFormat format) {
// clang-format off
static const char* formats[] = {
"undefined",
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
"b", "h", "s", "d"
};
// clang-format on
VIXL_ASSERT(format < ArrayLength(formats));
return formats[format];
}
// Convert a NEONFormat into a register placeholder string.
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
(format == NF_D) || (format == NF_UNDEF));
// clang-format off
static const char* formats[] = {
"undefined",
"undefined", "undefined", "undefined", "undefined",
"undefined", "undefined", "undefined", "undefined",
"'B", "'H", "'S", "'D"
};
// clang-format on
return formats[format];
}
// Select bits from instrbits_ defined by the bits array, concatenate them,
// and return the value.
uint8_t PickBits(const uint8_t bits[]) {
uint8_t result = 0;
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
if (bits[b] == 0) break;
result <<= 1;
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
}
return result;
}
Instr instrbits_;
const NEONFormatMap* formats_[3];
char form_buffer_[64];
char mne_buffer_[16];
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "instrument-aarch64.h"
namespace vixl {
namespace aarch64 {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
VIXL_ASSERT(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
// Make sure `name_` is always NULL-terminated, even if the source's length is
// higher.
name_[kCounterNameMaxLength - 1] = '\0';
}
void Counter::Enable() { enabled_ = true; }
void Counter::Disable() { enabled_ = false; }
bool Counter::IsEnabled() { return enabled_; }
void Counter::Increment() {
if (enabled_) {
count_++;
}
}
uint64_t Counter::GetCount() {
uint64_t result = count_;
if (type_ == Gauge) {
// If the counter is a Gauge, reset the count after reading.
count_ = 0;
}
return result;
}
const char* Counter::GetName() { return name_; }
CounterType Counter::GetType() { return type_; }
struct CounterDescriptor {
const char* name;
CounterType type;
};
static const CounterDescriptor kCounterList[] =
{{"Instruction", Cumulative},
{"Move Immediate", Gauge},
{"Add/Sub DP", Gauge},
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge},
{"Test and Branch", Gauge},
{"Conditional Branch", Gauge},
{"Load Integer", Gauge},
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"NEON", Gauge},
{"Crypto", Gauge}};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stdout), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stdout.
if (datafile != NULL) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
printf("Can't open output file %s. Using stdout.\n", datafile);
output_stream_ = stdout;
}
}
static const int num_counters =
sizeof(kCounterList) / sizeof(CounterDescriptor);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
// Construct Counter objects from counter description array.
for (int i = 0; i < num_counters; i++) {
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
counters_.push_back(counter);
}
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
// Free all the counter objects.
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
delete *it;
}
if (output_stream_ != stdout) {
fclose(output_stream_);
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
VIXL_ASSERT(counter->GetType() == Cumulative);
counter->Increment();
if ((sample_period_ != 0) && counter->IsEnabled() &&
(counter->GetCount() % sample_period_) == 0) {
DumpCounters();
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%" PRIu64 ",", (*it)->GetCount());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%s,", (*it)->GetName());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable:
Enable();
break;
case InstrumentStateDisable:
Disable();
break;
default:
DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
static Counter* counter = GetCounter("Instruction");
fprintf(output_stream_,
"# %c%c @ %" PRId64 "\n",
marker & 0xff,
(marker >> 8) & 0xff,
counter->GetCount());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
if (strcmp((*it)->GetName(), name) == 0) {
return *it;
}
}
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Enable();
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Disable();
}
}
void Instrument::VisitPCRelAddressing(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitLogicalImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
if (instr->IsMovn() && (instr->GetRd() == kZeroRegCode)) {
unsigned imm = instr->GetImmMoveWide();
HandleInstrumentationEvent(imm);
} else {
counter->Increment();
}
}
void Instrument::VisitBitfield(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
if (instr->Mask(LoadStorePairLBit) != 0) {
load_pair_counter->Increment();
} else {
store_pair_counter->Increment();
}
}
void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitAtomicMemory(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitLoadLiteral(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::VisitLoadStorePAC(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Load Integer");
counter->Increment();
}
void Instrument::InstrumentLoadStore(const Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) {
case STRB_w:
case STRH_w:
case STR_w:
VIXL_FALLTHROUGH();
case STR_x:
store_int_counter->Increment();
break;
case STR_s:
VIXL_FALLTHROUGH();
case STR_d:
store_fp_counter->Increment();
break;
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_x:
case LDRSH_x:
case LDRSW_x:
case LDRSB_w:
VIXL_FALLTHROUGH();
case LDRSH_w:
load_int_counter->Increment();
break;
case LDR_s:
VIXL_FALLTHROUGH();
case LDR_d:
load_fp_counter->Increment();
break;
}
}
void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
USE(instr);
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRCpcUnscaledOffset(const Instruction* instr) {
Update();
switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) {
case STLURB:
case STLURH:
case STLUR_w:
case STLUR_x: {
static Counter* counter = GetCounter("Store Integer");
counter->Increment();
break;
}
case LDAPURB:
case LDAPURSB_w:
case LDAPURSB_x:
case LDAPURH:
case LDAPURSH_w:
case LDAPURSH_x:
case LDAPUR_w:
case LDAPURSW:
case LDAPUR_x: {
static Counter* counter = GetCounter("Load Integer");
counter->Increment();
break;
}
}
}
void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLogicalShifted(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitAddSubShifted(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitRotateRightIntoFlags(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitEvaluateIntoFlags(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitCryptoAES(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON2RegMiscFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Same(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3SameFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3SameExtra(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Different(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONCopy(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONExtract(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3SameFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3SameExtra(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONTable(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONPerm(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitUnallocated(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitUnimplemented(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} // namespace aarch64
} // namespace vixl
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
#include "decoder-aarch64.h"
#include "instrument-aarch64.h"
namespace vixl {
namespace aarch64 {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
explicit Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t GetCount();
VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); }
const char* GetName();
VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); }
CounterType GetType();
VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); }
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument : public DecoderVisitor {
public:
explicit Instrument(
const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
void Enable();
void Disable();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(const Instruction* instr);
void InstrumentLoadStorePair(const Instruction* instr);
std::list<Counter*> counters_;
FILE* output_stream_;
// Counter information is dumped every sample_period_ instructions decoded.
// For a sample_period_ = 0 a final counter value is only produced when the
// Instrumentation class is destroyed.
uint64_t sample_period_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountTrailingZeros(list_);
VIXL_ASSERT((1 << index) & list_);
Remove(index);
return CPURegister(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
VIXL_ASSERT(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountLeadingZeros(list_);
index = kRegListSizeInBits - 1 - index;
VIXL_ASSERT((1 << index) & list_);
Remove(index);
return CPURegister(index, size_, type_);
}
bool CPURegList::IsValid() const {
if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) {
bool is_valid = true;
// Try to create a CPURegister for each element in the list.
for (int i = 0; i < kRegListSizeInBits; i++) {
if (((list_ >> i) & 1) != 0) {
is_valid &= CPURegister(i, size_, type_).IsValid();
}
}
return is_valid;
} else if (type_ == CPURegister::kNoRegister) {
// We can't use IsEmpty here because that asserts IsValid().
return list_ == 0;
} else {
return false;
}
}
void CPURegList::RemoveCalleeSaved() {
if (GetType() == CPURegister::kRegister) {
Remove(GetCalleeSaved(GetRegisterSizeInBits()));
} else if (GetType() == CPURegister::kVRegister) {
Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
} else {
VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
VIXL_ASSERT(IsEmpty());
// The list must already be empty, so do nothing.
}
}
CPURegList CPURegList::Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3) {
return Union(list_1, Union(list_2, list_3));
}
CPURegList CPURegList::Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4) {
return Union(Union(list_1, list_2), Union(list_3, list_4));
}
CPURegList CPURegList::Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3) {
return Intersection(list_1, Intersection(list_2, list_3));
}
CPURegList CPURegList::Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4) {
return Intersection(Intersection(list_1, list_2),
Intersection(list_3, list_4));
}
CPURegList CPURegList::GetCalleeSaved(unsigned size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
return CPURegList(CPURegister::kVRegister, size, 8, 15);
}
CPURegList CPURegList::GetCallerSaved(unsigned size) {
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
// Do not use lr directly to avoid initialisation order fiasco bugs for users.
list.Combine(Register(30, kXRegSize));
return list;
}
CPURegList CPURegList::GetCallerSavedV(unsigned size) {
// Registers d0-d7 and d16-d31 are caller-saved.
CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
return list;
}
const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
// Registers.
#define WREG(n) w##n,
const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)};
#undef WREG
#define XREG(n) x##n,
const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)};
#undef XREG
#define BREG(n) b##n,
const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)};
#undef BREG
#define HREG(n) h##n,
const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)};
#undef HREG
#define SREG(n) s##n,
const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)};
#undef SREG
#define DREG(n) d##n,
const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)};
#undef DREG
#define QREG(n) q##n,
const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)};
#undef QREG
#define VREG(n) v##n,
const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)};
#undef VREG
const Register& Register::GetWRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wsp;
} else {
VIXL_ASSERT(code < kNumberOfRegisters);
return wregisters[code];
}
}
const Register& Register::GetXRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return sp;
} else {
VIXL_ASSERT(code < kNumberOfRegisters);
return xregisters[code];
}
}
const VRegister& VRegister::GetBRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return bregisters[code];
}
const VRegister& VRegister::GetHRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return hregisters[code];
}
const VRegister& VRegister::GetSRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return sregisters[code];
}
const VRegister& VRegister::GetDRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return dregisters[code];
}
const VRegister& VRegister::GetQRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return qregisters[code];
}
const VRegister& VRegister::GetVRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return vregisters[code];
}
const Register& CPURegister::W() const {
VIXL_ASSERT(IsValidRegister());
return Register::GetWRegFromCode(code_);
}
const Register& CPURegister::X() const {
VIXL_ASSERT(IsValidRegister());
return Register::GetXRegFromCode(code_);
}
const VRegister& CPURegister::B() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetBRegFromCode(code_);
}
const VRegister& CPURegister::H() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetHRegFromCode(code_);
}
const VRegister& CPURegister::S() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetSRegFromCode(code_);
}
const VRegister& CPURegister::D() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetDRegFromCode(code_);
}
const VRegister& CPURegister::Q() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetQRegFromCode(code_);
}
const VRegister& CPURegister::V() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetVRegFromCode(code_);
}
// Operand.
Operand::Operand(int64_t immediate)
: immediate_(immediate),
reg_(NoReg),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {}
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: reg_(reg),
shift_(shift),
extend_(NO_EXTEND),
shift_amount_(shift_amount) {
VIXL_ASSERT(shift != MSL);
VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
VIXL_ASSERT(!reg.IsSP());
}
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
: reg_(reg),
shift_(NO_SHIFT),
extend_(extend),
shift_amount_(shift_amount) {
VIXL_ASSERT(reg.IsValid());
VIXL_ASSERT(shift_amount <= 4);
VIXL_ASSERT(!reg.IsSP());
// Extend modes SXTX and UXTX require a 64-bit register.
VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
bool Operand::IsPlainRegister() const {
return reg_.IsValid() &&
(((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
// No-op shifts.
((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
// No-op extend operations.
// We can't include [US]XTW here without knowing more about the
// context; they are only no-ops for 32-bit operations.
//
// For example, this operand could be replaced with w1:
// __ Add(w0, w0, Operand(w1, UXTW));
// However, no plain register can replace it in this context:
// __ Add(x0, x0, Operand(w1, UXTW));
(((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
}
bool Operand::IsShiftedRegister() const {
return reg_.IsValid() && (shift_ != NO_SHIFT);
}
bool Operand::IsExtendedRegister() const {
return reg_.IsValid() && (extend_ != NO_EXTEND);
}
bool Operand::IsZero() const {
if (IsImmediate()) {
return GetImmediate() == 0;
} else {
return GetRegister().IsZero();
}
}
Operand Operand::ToExtendedRegister() const {
VIXL_ASSERT(IsShiftedRegister());
VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
// MemOperand
MemOperand::MemOperand()
: base_(NoReg),
regoffset_(NoReg),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(NO_EXTEND) {}
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base),
regoffset_(NoReg),
offset_(offset),
addrmode_(addrmode),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
}
MemOperand::MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount)
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(extend),
shift_amount_(shift_amount) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
VIXL_ASSERT(!regoffset.IsSP());
VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
// SXTX extend mode requires a 64-bit offset register.
VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
}
MemOperand::MemOperand(Register base,
Register regoffset,
Shift shift,
unsigned shift_amount)
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(shift),
extend_(NO_EXTEND),
shift_amount_(shift_amount) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
VIXL_ASSERT(shift == LSL);
}
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
: base_(base),
regoffset_(NoReg),
addrmode_(addrmode),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
offset_ = offset.GetImmediate();
} else if (offset.IsShiftedRegister()) {
VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
regoffset_ = offset.GetRegister();
shift_ = offset.GetShift();
shift_amount_ = offset.GetShiftAmount();
extend_ = NO_EXTEND;
offset_ = 0;
// These assertions match those in the shifted-register constructor.
VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
VIXL_ASSERT(shift_ == LSL);
} else {
VIXL_ASSERT(offset.IsExtendedRegister());
VIXL_ASSERT(addrmode == Offset);
regoffset_ = offset.GetRegister();
extend_ = offset.GetExtend();
shift_amount_ = offset.GetShiftAmount();
shift_ = NO_SHIFT;
offset_ = 0;
// These assertions match those in the extended-register constructor.
VIXL_ASSERT(!regoffset_.IsSP());
VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
}
}
bool MemOperand::IsImmediateOffset() const {
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
}
bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
}
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
void MemOperand::AddOffset(int64_t offset) {
VIXL_ASSERT(IsImmediateOffset());
offset_ += offset;
}
GenericOperand::GenericOperand(const CPURegister& reg)
: cpu_register_(reg), mem_op_size_(0) {
if (reg.IsQ()) {
VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
// Support for Q registers is not implemented yet.
VIXL_UNIMPLEMENTED();
}
}
GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
: cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
if (mem_op_size_ > kXRegSizeInBytes) {
// We only support generic operands up to the size of X registers.
VIXL_UNIMPLEMENTED();
}
}
bool GenericOperand::Equals(const GenericOperand& other) const {
if (!IsValid() || !other.IsValid()) {
// Two invalid generic operands are considered equal.
return !IsValid() && !other.IsValid();
}
if (IsCPURegister() && other.IsCPURegister()) {
return GetCPURegister().Is(other.GetCPURegister());
} else if (IsMemOperand() && other.IsMemOperand()) {
return GetMemOperand().Equals(other.GetMemOperand()) &&
(GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
}
return false;
}
}
} // namespace vixl::aarch64
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
#define VIXL_AARCH64_OPERANDS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
typedef uint64_t RegList;
static const int kRegListSizeInBits = sizeof(RegList) * 8;
// Registers.
// Some CPURegister methods can return Register or VRegister types, so we need
// to declare them in advance.
class Register;
class VRegister;
class CPURegister {
public:
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
kVRegister,
kFPRegister = kVRegister,
kNoRegister
};
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
VIXL_ASSERT(!IsValid());
VIXL_ASSERT(IsNone());
}
CPURegister(unsigned code, unsigned size, RegisterType type)
: code_(code), size_(size), type_(type) {
VIXL_ASSERT(IsValidOrNone());
}
unsigned GetCode() const {
VIXL_ASSERT(IsValid());
return code_;
}
VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); }
RegisterType GetType() const {
VIXL_ASSERT(IsValidOrNone());
return type_;
}
VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); }
RegList GetBit() const {
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
}
VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); }
int GetSizeInBytes() const {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(size_ % 8 == 0);
return size_ / 8;
}
VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) {
return GetSizeInBytes();
}
int GetSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) {
return GetSizeInBits();
}
VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) {
return GetSizeInBits();
}
bool Is8Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 8;
}
bool Is16Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 16;
}
bool Is32Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 32;
}
bool Is64Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 64;
}
bool Is128Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 128;
}
bool IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
VIXL_ASSERT(!IsNone());
return true;
} else {
// This assert is hit when the register has not been properly initialized.
// One cause for this can be an initialisation order fiasco. See
// https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
VIXL_ASSERT(IsNone());
return false;
}
}
bool IsValidRegister() const {
return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
}
bool IsValidVRegister() const {
return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) ||
(size_ == kSRegSize) || (size_ == kDRegSize) ||
(size_ == kQRegSize)) &&
(code_ < kNumberOfVRegisters);
}
bool IsValidFPRegister() const {
return IsFPRegister() && (code_ < kNumberOfVRegisters);
}
bool IsNone() const {
// kNoRegister types should always have size 0 and code 0.
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
return type_ == kNoRegister;
}
bool Aliases(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return (code_ == other.code_) && (type_ == other.type_);
}
bool Is(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return Aliases(other) && (size_ == other.size_);
}
bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
bool IsRegister() const { return type_ == kRegister; }
bool IsVRegister() const { return type_ == kVRegister; }
bool IsFPRegister() const { return IsS() || IsD(); }
bool IsW() const { return IsValidRegister() && Is32Bits(); }
bool IsX() const { return IsValidRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
// So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
// does not imply Is1D() or Is8B().
// Check the number of lanes, ie. the format of the vector, using methods such
// as Is8B(), Is1D(), etc. in the VRegister class.
bool IsV() const { return IsVRegister(); }
bool IsB() const { return IsV() && Is8Bits(); }
bool IsH() const { return IsV() && Is16Bits(); }
bool IsS() const { return IsV() && Is32Bits(); }
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
// Semantic type for sdot and udot instructions.
bool IsS4B() const { return IsS(); }
const VRegister& S4B() const { return S(); }
const Register& W() const;
const Register& X() const;
const VRegister& V() const;
const VRegister& B() const;
const VRegister& H() const;
const VRegister& S() const;
const VRegister& D() const;
const VRegister& Q() const;
bool IsSameType(const CPURegister& other) const {
return type_ == other.type_;
}
bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && IsSameType(other);
}
protected:
unsigned code_;
int size_;
RegisterType type_;
private:
bool IsValidOrNone() const { return IsValid() || IsNone(); }
};
class Register : public CPURegister {
public:
Register() : CPURegister() {}
explicit Register(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) {
VIXL_ASSERT(IsValidRegister());
}
Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {}
bool IsValid() const {
VIXL_ASSERT(IsRegister() || IsNone());
return IsValidRegister();
}
static const Register& GetWRegFromCode(unsigned code);
VIXL_DEPRECATED("GetWRegFromCode",
static const Register& WRegFromCode(unsigned code)) {
return GetWRegFromCode(code);
}
static const Register& GetXRegFromCode(unsigned code);
VIXL_DEPRECATED("GetXRegFromCode",
static const Register& XRegFromCode(unsigned code)) {
return GetXRegFromCode(code);
}
private:
static const Register wregisters[];
static const Register xregisters[];
};
namespace internal {
template <int size_in_bits>
class FixedSizeRegister : public Register {
public:
FixedSizeRegister() : Register() {}
explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) {
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const Register& other)
: Register(other.GetCode(), size_in_bits) {
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const CPURegister& other)
: Register(other.GetCode(), other.GetSizeInBits()) {
VIXL_ASSERT(other.GetType() == kRegister);
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
bool IsValid() const {
return Register::IsValid() && (GetSizeInBits() == size_in_bits);
}
};
} // namespace internal
typedef internal::FixedSizeRegister<kXRegSize> XRegister;
typedef internal::FixedSizeRegister<kWRegSize> WRegister;
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(), lanes_(1) {}
explicit VRegister(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()),
lanes_(1) {
VIXL_ASSERT(IsValidVRegister());
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, unsigned size, unsigned lanes = 1)
: CPURegister(code, size, kVRegister), lanes_(lanes) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, VectorFormat format)
: CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
bool IsValid() const {
VIXL_ASSERT(IsVRegister() || IsNone());
return IsValidVRegister();
}
static const VRegister& GetBRegFromCode(unsigned code);
VIXL_DEPRECATED("GetBRegFromCode",
static const VRegister& BRegFromCode(unsigned code)) {
return GetBRegFromCode(code);
}
static const VRegister& GetHRegFromCode(unsigned code);
VIXL_DEPRECATED("GetHRegFromCode",
static const VRegister& HRegFromCode(unsigned code)) {
return GetHRegFromCode(code);
}
static const VRegister& GetSRegFromCode(unsigned code);
VIXL_DEPRECATED("GetSRegFromCode",
static const VRegister& SRegFromCode(unsigned code)) {
return GetSRegFromCode(code);
}
static const VRegister& GetDRegFromCode(unsigned code);
VIXL_DEPRECATED("GetDRegFromCode",
static const VRegister& DRegFromCode(unsigned code)) {
return GetDRegFromCode(code);
}
static const VRegister& GetQRegFromCode(unsigned code);
VIXL_DEPRECATED("GetQRegFromCode",
static const VRegister& QRegFromCode(unsigned code)) {
return GetQRegFromCode(code);
}
static const VRegister& GetVRegFromCode(unsigned code);
VIXL_DEPRECATED("GetVRegFromCode",
static const VRegister& VRegFromCode(unsigned code)) {
return GetVRegFromCode(code);
}
VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
VRegister V2H() const { return VRegister(code_, kSRegSize, 2); }
VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); }
bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
// could alias.
bool Is1B() const {
VIXL_ASSERT(!(Is8Bits() && IsVector()));
return Is8Bits();
}
bool Is1H() const {
VIXL_ASSERT(!(Is16Bits() && IsVector()));
return Is16Bits();
}
bool Is1S() const {
VIXL_ASSERT(!(Is32Bits() && IsVector()));
return Is32Bits();
}
// Semantic type for sdot and udot instructions.
bool Is1S4B() const { return Is1S(); }
bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; }
bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; }
bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; }
bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; }
int GetLanes() const { return lanes_; }
VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); }
bool IsScalar() const { return lanes_ == 1; }
bool IsVector() const { return lanes_ > 1; }
bool IsSameFormat(const VRegister& other) const {
return (size_ == other.size_) && (lanes_ == other.lanes_);
}
unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; }
VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) {
return GetLaneSizeInBytes();
}
unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; }
VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) {
return GetLaneSizeInBits();
}
private:
static const VRegister bregisters[];
static const VRegister hregisters[];
static const VRegister sregisters[];
static const VRegister dregisters[];
static const VRegister qregisters[];
static const VRegister vregisters[];
int lanes_;
};
// Backward compatibility for FPRegisters.
typedef VRegister FPRegister;
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
const Register NoReg;
const VRegister NoVReg;
const FPRegister NoFPReg; // For backward compatibility.
const CPURegister NoCPUReg;
#define DEFINE_REGISTERS(N) \
const WRegister w##N(N); \
const XRegister x##N(N);
AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
const WRegister wsp(kSPRegInternalCode);
const XRegister sp(kSPRegInternalCode);
#define DEFINE_VREGISTERS(N) \
const VRegister b##N(N, kBRegSize); \
const VRegister h##N(N, kHRegSize); \
const VRegister s##N(N, kSRegSize); \
const VRegister d##N(N, kDRegSize); \
const VRegister q##N(N, kQRegSize); \
const VRegister v##N(N, kQRegSize);
AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
// Register aliases.
const XRegister ip0 = x16;
const XRegister ip1 = x17;
const XRegister lr = x30;
const XRegister xzr = x31;
const WRegister wzr = w31;
// AreAliased returns true if any of the named registers overlap. Arguments
// set to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreSameSizeAndType returns true if all of the specified registers have the
// same size, and are of the same type. The system stack pointer may be
// specified. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg,
const CPURegister& reg5 = NoCPUReg,
const CPURegister& reg6 = NoCPUReg,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
// AreEven returns true if all of the specified registers have even register
// indices. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreEven(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreConsecutive returns true if all of the specified registers are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoCPUReg).
bool AreConsecutive(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg);
// AreSameFormat returns true if all of the specified VRegisters have the same
// vector format. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoVReg).
bool AreSameFormat(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// AreConsecutive returns true if all of the specified VRegisters are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoVReg).
bool AreConsecutive(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// Lists of registers.
class CPURegList {
public:
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
size_(reg1.GetSizeInBits()),
type_(reg1.GetType()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type,
unsigned size,
unsigned first_reg,
unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(
((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kVRegister) &&
(last_reg < kNumberOfVRegisters)));
VIXL_ASSERT(last_reg >= first_reg);
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
list_ &= ~((UINT64_C(1) << first_reg) - 1);
VIXL_ASSERT(IsValid());
}
CPURegister::RegisterType GetType() const {
VIXL_ASSERT(IsValid());
return type_;
}
VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
return GetType();
}
// Combine another CPURegList into this one. Registers that already exist in
// this list are left unchanged. The type and size of the registers in the
// 'other' list must match those in this list.
void Combine(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ |= other.GetList();
}
// Remove every register in the other CPURegList from this one. Registers that
// do not exist in this list are ignored. The type and size of the registers
// in the 'other' list must match those in this list.
void Remove(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ &= ~other.GetList();
}
// Variants of Combine and Remove which take a single register.
void Combine(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Combine(other.GetCode());
}
void Remove(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Remove(other.GetCode());
}
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
bool Overlaps(const CPURegList& other) const {
return (type_ == other.type_) && ((list_ & other.list_) != 0);
}
RegList GetList() const {
VIXL_ASSERT(IsValid());
return list_;
}
VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
void SetList(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
return SetList(new_list);
}
// Remove all callee-saved registers from the list. This can be useful when
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
CPURegister PopLowestIndex();
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
// AAPCS64 caller-saved registers. Note that this includes lr.
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
// 64-bits being caller-saved.
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0);
}
bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
int GetCount() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_);
}
VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
int GetRegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
return GetRegisterSizeInBits();
}
int GetRegisterSizeInBytes() const {
int size_in_bits = GetRegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
return GetRegisterSizeInBytes();
}
unsigned GetTotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return GetRegisterSizeInBytes() * GetCount();
}
VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
return GetTotalSizeInBytes();
}
private:
RegList list_;
int size_;
CPURegister::RegisterType type_;
bool IsValid() const;
};
// AAPCS64 callee-saved registers.
extern const CPURegList kCalleeSaved;
extern const CPURegList kCalleeSavedV;
// AAPCS64 caller-saved registers. Note that this includes lr.
extern const CPURegList kCallerSaved;
extern const CPURegList kCallerSavedV;
// Operand.
class Operand {
public:
// #<immediate>
// where <immediate> is int64_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
// rm, {<shift> #<shift_amount>}
// where <shift> is one of {LSL, LSR, ASR, ROR}.
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register reg,
Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, {<extend> {#<shift_amount>}}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
bool IsImmediate() const;
bool IsPlainRegister() const;
bool IsShiftedRegister() const;
bool IsExtendedRegister() const;
bool IsZero() const;
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
// which helps in the encoding of instructions that use the stack pointer.
Operand ToExtendedRegister() const;
int64_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return immediate_;
}
VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
return GetImmediate();
}
int64_t GetEquivalentImmediate() const {
return IsZero() ? 0 : GetImmediate();
}
Register GetRegister() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
Register GetBaseRegister() const { return GetRegister(); }
Shift GetShift() const {
VIXL_ASSERT(IsShiftedRegister());
return shift_;
}
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const {
VIXL_ASSERT(IsExtendedRegister());
return extend_;
}
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
private:
int64_t immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// MemOperand represents the addressing mode of a load or store instruction.
class MemOperand {
public:
// Creates an invalid `MemOperand`.
MemOperand();
explicit MemOperand(Register base,
int64_t offset = 0,
AddrMode addrmode = Offset);
MemOperand(Register base,
Register regoffset,
Shift shift = LSL,
unsigned shift_amount = 0);
MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount = 0);
MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
const Register& GetBaseRegister() const { return base_; }
VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) {
return GetBaseRegister();
}
const Register& GetRegisterOffset() const { return regoffset_; }
VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) {
return GetRegisterOffset();
}
int64_t GetOffset() const { return offset_; }
VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); }
AddrMode GetAddrMode() const { return addrmode_; }
VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) {
return GetAddrMode();
}
Shift GetShift() const { return shift_; }
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const { return extend_; }
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const { return shift_amount_; }
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
bool IsImmediateOffset() const;
bool IsRegisterOffset() const;
bool IsPreIndex() const;
bool IsPostIndex() const;
void AddOffset(int64_t offset);
bool IsValid() const {
return base_.IsValid() &&
((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
(addrmode_ == PostIndex)) &&
((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
((offset_ == 0) || !regoffset_.IsValid());
}
bool Equals(const MemOperand& other) const {
return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
(offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
(shift_ == other.shift_) && (extend_ == other.extend_) &&
(shift_amount_ == other.shift_amount_);
}
private:
Register base_;
Register regoffset_;
int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// This an abstraction that can represent a register or memory location. The
// `MacroAssembler` provides helpers to move data between generic operands.
class GenericOperand {
public:
GenericOperand() { VIXL_ASSERT(!IsValid()); }
GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit)
GenericOperand(const MemOperand& mem_op,
size_t mem_op_size = 0); // NOLINT(runtime/explicit)
bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
bool Equals(const GenericOperand& other) const;
bool IsCPURegister() const {
VIXL_ASSERT(IsValid());
return cpu_register_.IsValid();
}
bool IsRegister() const {
return IsCPURegister() && cpu_register_.IsRegister();
}
bool IsVRegister() const {
return IsCPURegister() && cpu_register_.IsVRegister();
}
bool IsSameCPURegisterType(const GenericOperand& other) {
return IsCPURegister() && other.IsCPURegister() &&
GetCPURegister().IsSameType(other.GetCPURegister());
}
bool IsMemOperand() const {
VIXL_ASSERT(IsValid());
return mem_op_.IsValid();
}
CPURegister GetCPURegister() const {
VIXL_ASSERT(IsCPURegister());
return cpu_register_;
}
MemOperand GetMemOperand() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_;
}
size_t GetMemOperandSizeInBytes() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_size_;
}
size_t GetSizeInBytes() const {
return IsCPURegister() ? cpu_register_.GetSizeInBytes()
: GetMemOperandSizeInBytes();
}
size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
private:
CPURegister cpu_register_;
MemOperand mem_op_;
// The size of the memory region pointed to, in bytes.
// We only support sizes up to X/D register sizes.
size_t mem_op_size_;
};
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#include "simulator-aarch64.h"
#include "utils-vixl.h"
namespace vixl {
namespace aarch64 {
// Randomly generated example keys for simulating only.
const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
0xab9fd4e14b2fec51,
0};
const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8,
0x5267ac6fc280fb7c,
1};
const Simulator::PACKey Simulator::kPACKeyDA = {0x5caef808deb8b1e2,
0xd347cbc06b7b0f77,
0};
const Simulator::PACKey Simulator::kPACKeyDB = {0xe06aa1a949ba8cc7,
0xcfde69e3db6d0432,
1};
// The general PAC key isn't intended to be used with AuthPAC so we ensure the
// key number is invalid and asserts if used incorrectly.
const Simulator::PACKey Simulator::kPACKeyGA = {0xfcd98a44d564b3d5,
0x6c56df1904bf0ddc,
-1};
static uint64_t GetNibble(uint64_t in_data, int position) {
return (in_data >> position) & 0xf;
}
static uint64_t ShuffleNibbles(uint64_t in_data) {
static int in_positions[16] =
{4, 36, 52, 40, 44, 0, 24, 12, 56, 60, 8, 32, 16, 28, 20, 48};
uint64_t out_data = 0;
for (int i = 0; i < 16; i++) {
out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
}
return out_data;
}
static uint64_t SubstituteNibbles(uint64_t in_data) {
// Randomly chosen substitutes.
static uint64_t subs[16] =
{4, 7, 3, 9, 10, 14, 0, 1, 15, 2, 8, 6, 12, 5, 11, 13};
uint64_t out_data = 0;
for (int i = 0; i < 16; i++) {
int index = (in_data >> (4 * i)) & 0xf;
out_data |= subs[index] << (4 * i);
}
return out_data;
}
// Rotate nibble to the left by the amount specified.
static uint64_t RotNibble(uint64_t in_cell, int amount) {
VIXL_ASSERT((amount >= 0) && (amount <= 3));
in_cell &= 0xf;
uint64_t temp = (in_cell << 4) | in_cell;
return (temp >> (4 - amount)) & 0xf;
}
static uint64_t BigShuffle(uint64_t in_data) {
uint64_t out_data = 0;
for (int i = 0; i < 4; i++) {
uint64_t n12 = GetNibble(in_data, 4 * (i + 12));
uint64_t n8 = GetNibble(in_data, 4 * (i + 8));
uint64_t n4 = GetNibble(in_data, 4 * (i + 4));
uint64_t n0 = GetNibble(in_data, 4 * (i + 0));
uint64_t t0 = RotNibble(n8, 2) ^ RotNibble(n4, 1) ^ RotNibble(n0, 1);
uint64_t t1 = RotNibble(n12, 1) ^ RotNibble(n4, 2) ^ RotNibble(n0, 1);
uint64_t t2 = RotNibble(n12, 2) ^ RotNibble(n8, 1) ^ RotNibble(n0, 1);
uint64_t t3 = RotNibble(n12, 1) ^ RotNibble(n8, 1) ^ RotNibble(n4, 2);
out_data |= t3 << (4 * (i + 0));
out_data |= t2 << (4 * (i + 4));
out_data |= t1 << (4 * (i + 8));
out_data |= t0 << (4 * (i + 12));
}
return out_data;
}
// A simple, non-standard hash function invented for simulating. It mixes
// reasonably well, however it is unlikely to be cryptographically secure and
// may have a higher collision chance than other hashing algorithms.
uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) {
uint64_t working_value = data ^ key.high;
working_value = BigShuffle(working_value);
working_value = ShuffleNibbles(working_value);
working_value ^= key.low;
working_value = ShuffleNibbles(working_value);
working_value = BigShuffle(working_value);
working_value ^= context;
working_value = SubstituteNibbles(working_value);
working_value = BigShuffle(working_value);
working_value = SubstituteNibbles(working_value);
return working_value;
}
// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without
// codes, but is always 55 once a PAC code is added to a pointer. For this
// reason, it must be calculated at the call site.
uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) {
int bottom_pac_bit = GetBottomPACBit(ptr, ttbr);
int top_pac_bit = GetTopPACBit(ptr, type);
return ExtractUnsignedBitfield64(top_pac_bit,
bottom_pac_bit,
0xffffffffffffffff & ~kTTBRMask)
<< bottom_pac_bit;
}
uint64_t Simulator::AuthPAC(uint64_t ptr,
uint64_t context,
PACKey key,
PointerType type) {
VIXL_ASSERT((key.number == 0) || (key.number == 1));
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
uint64_t original_ptr =
((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
uint64_t pac = ComputePAC(original_ptr, context, key);
uint64_t error_code = 1 << key.number;
if ((pac & pac_mask) == (ptr & pac_mask)) {
return original_ptr;
} else {
int error_lsb = GetTopPACBit(ptr, type) - 2;
uint64_t error_mask = UINT64_C(0x3) << error_lsb;
return (original_ptr & ~error_mask) | (error_code << error_lsb);
}
}
uint64_t Simulator::AddPAC(uint64_t ptr,
uint64_t context,
PACKey key,
PointerType type) {
int top_pac_bit = GetTopPACBit(ptr, type);
// TODO: Properly handle the case where extension bits are bad and TBI is
// turned off, and also test me.
VIXL_ASSERT(HasTBI(ptr, type));
int ttbr = (ptr >> 55) & 1;
uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr);
uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
uint64_t pac = ComputePAC(ext_ptr, context, key);
// If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt
// the resulting code.
if (((ptr & (pac_mask | kTTBRMask)) != 0x0) &&
((~ptr & (pac_mask | kTTBRMask)) != 0x0)) {
pac ^= UINT64_C(1) << (top_pac_bit - 1);
}
uint64_t ttbr_shifted = static_cast<uint64_t>(ttbr) << 55;
return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask);
}
uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) {
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
}
} // namespace aarch64
} // namespace vixl
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
// Debug instructions.
//
// VIXL's macro-assembler and simulator support a few pseudo instructions to
// make debugging easier. These pseudo instructions do not exist on real
// hardware.
//
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
// simulator, so that users can check that the input is a valid native code.
// (This isn't possible in all cases. Printf won't work, for example.)
//
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
// immediate field is used to identify the type of debug pseudo instruction.
enum DebugHltOpcode {
kUnreachableOpcode = 0xdeb0,
kPrintfOpcode,
kTraceOpcode,
kLogOpcode,
kRuntimeCallOpcode,
kSetCPUFeaturesOpcode,
kEnableCPUFeaturesOpcode,
kDisableCPUFeaturesOpcode,
kSaveCPUFeaturesOpcode,
kRestoreCPUFeaturesOpcode,
// Aliases.
kDebugHltFirstOpcode = kUnreachableOpcode,
kDebugHltLastOpcode = kLogOpcode
};
VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes);
// Each pseudo instruction uses a custom encoding for additional arguments, as
// described below.
// Unreachable - kUnreachableOpcode
//
// Instruction which should never be executed. This is used as a guard in parts
// of the code that should not be reachable, such as in data encoded inline in
// the instructions.
// Printf - kPrintfOpcode
// - arg_count: The number of arguments.
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
//
// Simulate a call to printf.
//
// Floating-point and integer arguments are passed in separate sets of registers
// in AAPCS64 (even for varargs functions), so it is not possible to determine
// the type of each argument without some information about the values that were
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
//
// Also, the following registers are populated (as if for a native Aarch64
// call):
// x0: The format string
// x1-x7: Optional arguments, if type == CPURegister::kRegister
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
const unsigned kPrintfLength = 3 * kInstructionSize;
const unsigned kPrintfMaxArgCount = 4;
// The argument pattern is a set of two-bit-fields, each with one of the
// following values:
enum PrintfArgPattern {
kPrintfArgW = 1,
kPrintfArgX = 2,
// There is no kPrintfArgS because floats are always converted to doubles in C
// varargs calls.
kPrintfArgD = 3
};
static const unsigned kPrintfArgPatternBits = 2;
// Trace - kTraceOpcode
// - parameter: TraceParameter stored as a uint32_t
// - command: TraceCommand stored as a uint32_t
//
// Allow for trace management in the generated code. This enables or disables
// automatic tracing of the specified information for every simulated
// instruction.
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
const unsigned kTraceLength = 3 * kInstructionSize;
// Trace parameters.
enum TraceParameters {
LOG_DISASM = 1 << 0, // Log disassembly.
LOG_REGS = 1 << 1, // Log general purpose registers.
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
LOG_WRITE = 1 << 4, // Log writes to memory.
LOG_BRANCH = 1 << 5, // Log taken branches.
LOG_NONE = 0,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH
};
// Trace commands.
enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 };
// Log - kLogOpcode
// - parameter: TraceParameter stored as a uint32_t
//
// Print the specified information once. This mechanism is separate from Trace.
// In particular, _all_ of the specified registers are printed, rather than just
// the registers that the instruction writes.
//
// Any combination of the TraceParameters values can be used, except that
// LOG_DISASM is not supported for Log.
const unsigned kLogParamsOffset = 1 * kInstructionSize;
const unsigned kLogLength = 2 * kInstructionSize;
// Runtime call simulation - kRuntimeCallOpcode
enum RuntimeCallType { kCallRuntime, kTailCallRuntime };
const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize;
// The size of a pointer on host.
const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t);
const unsigned kRuntimeCallFunctionOffset =
kRuntimeCallWrapperOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallTypeOffset =
kRuntimeCallFunctionOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t);
// Enable or disable CPU features - kSetCPUFeaturesOpcode
// - kEnableCPUFeaturesOpcode
// - kDisableCPUFeaturesOpcode
// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as
// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone.
// - [Padding to align to kInstructionSize.]
//
// 'Set' completely overwrites the existing CPU features.
// 'Enable' and 'Disable' update the existing CPU features.
//
// These mechanisms allows users to strictly check the use of CPU features in
// different regions of code.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
typedef uint8_t ConfigureCPUFeaturesElementType;
const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize;
// Save or restore CPU features - kSaveCPUFeaturesOpcode
// - kRestoreCPUFeaturesOpcode
//
// These mechanisms provide a stack-like mechanism for preserving the CPU
// features, or restoring the last-preserved features. These pseudo-instructions
// take no arguments.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_ASSEMBLER_BASE_H
#define VIXL_ASSEMBLER_BASE_H
#include "code-buffer-vixl.h"
namespace vixl {
class CodeBufferCheckScope;
namespace internal {
class AssemblerBase {
public:
AssemblerBase() : allow_assembler_(false) {}
explicit AssemblerBase(size_t capacity)
: buffer_(capacity), allow_assembler_(false) {}
AssemblerBase(byte* buffer, size_t capacity)
: buffer_(buffer, capacity), allow_assembler_(false) {}
virtual ~AssemblerBase() {}
// Finalize a code buffer of generated instructions. This function must be
// called before executing or copying code from the buffer.
void FinalizeCode() { GetBuffer()->SetClean(); }
ptrdiff_t GetCursorOffset() const { return GetBuffer().GetCursorOffset(); }
// Return the address of the cursor.
template <typename T>
T GetCursorAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetBuffer().GetOffsetAddress<T>(GetCursorOffset());
}
size_t GetSizeOfCodeGenerated() const { return GetCursorOffset(); }
// Accessors.
CodeBuffer* GetBuffer() { return &buffer_; }
const CodeBuffer& GetBuffer() const { return buffer_; }
bool AllowAssembler() const { return allow_assembler_; }
protected:
void SetAllowAssembler(bool allow) { allow_assembler_ = allow; }
// CodeBufferCheckScope must be able to temporarily allow the assembler.
friend class vixl::CodeBufferCheckScope;
// Buffer where the code is emitted.
CodeBuffer buffer_;
private:
bool allow_assembler_;
public:
// Deprecated public interface.
// Return the address of an offset in the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetOffsetAddress<T>(offset)",
T GetOffsetAddress(ptrdiff_t offset) const) {
return GetBuffer().GetOffsetAddress<T>(offset);
}
// Return the address of the start of the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetStartAddress<T>()",
T GetStartAddress() const) {
return GetBuffer().GetOffsetAddress<T>(0);
}
};
} // namespace internal
} // namespace vixl
#endif // VIXL_ASSEMBLER_BASE_H
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <sys/mman.h>
}
#include "code-buffer-vixl.h"
#include "utils-vixl.h"
namespace vixl {
CodeBuffer::CodeBuffer(size_t capacity)
: buffer_(NULL),
managed_(true),
cursor_(NULL),
dirty_(false),
capacity_(capacity) {
if (capacity_ == 0) {
return;
}
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = reinterpret_cast<byte*>(malloc(capacity_));
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = reinterpret_cast<byte*>(mmap(NULL,
capacity,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1,
0));
#else
#error Unknown code buffer allocator.
#endif
VIXL_CHECK(buffer_ != NULL);
// Aarch64 instructions must be word aligned, we assert the default allocator
// always returns word align memory.
VIXL_ASSERT(IsWordAligned(buffer_));
cursor_ = buffer_;
}
CodeBuffer::CodeBuffer(byte *buffer, size_t capacity)
: buffer_(reinterpret_cast<byte *>(buffer)),
managed_(false),
cursor_(reinterpret_cast<byte *>(buffer)),
dirty_(false),
capacity_(capacity) {
VIXL_ASSERT(buffer_ != NULL);
}
CodeBuffer::~CodeBuffer() {
VIXL_ASSERT(!IsDirty());
if (managed_) {
#ifdef VIXL_CODE_BUFFER_MALLOC
free(buffer_);
#elif defined(VIXL_CODE_BUFFER_MMAP)
munmap(buffer_, capacity_);
#else
#error Unknown code buffer allocator.
#endif
}
}
#ifdef VIXL_CODE_BUFFER_MMAP
void CodeBuffer::SetExecutable() {
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC);
VIXL_CHECK(ret == 0);
}
#endif
#ifdef VIXL_CODE_BUFFER_MMAP
void CodeBuffer::SetWritable() {
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE);
VIXL_CHECK(ret == 0);
}
#endif
#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
char *
stpcpy(char *to, const char *from) {
for (; (*to = *from) != '\0'; ++from, ++to);
return (to);
}
#endif
void CodeBuffer::EmitString(const char *string) {
VIXL_ASSERT(HasSpaceFor(strlen(string) + 1));
char *dst = reinterpret_cast<char *>(cursor_);
dirty_ = true;
char *null_char = stpcpy(dst, string);
cursor_ = reinterpret_cast<byte *>(null_char) + 1;
}
void CodeBuffer::EmitData(const void *data, size_t size) {
VIXL_ASSERT(HasSpaceFor(size));
dirty_ = true;
memcpy(cursor_, data, size);
cursor_ = cursor_ + size;
}
void CodeBuffer::UpdateData(size_t offset, const void *data, size_t size) {
dirty_ = true;
byte *dst = buffer_ + offset;
VIXL_ASSERT(dst + size <= cursor_);
memcpy(dst, data, size);
}
void CodeBuffer::Align() {
byte *end = AlignUp(cursor_, 4);
const size_t padding_size = end - cursor_;
VIXL_ASSERT(padding_size <= 4);
EmitZeroedBytes(static_cast<int>(padding_size));
}
void CodeBuffer::EmitZeroedBytes(int n) {
EnsureSpaceFor(n);
dirty_ = true;
memset(cursor_, 0, n);
cursor_ += n;
}
void CodeBuffer::Reset() {
#ifdef VIXL_DEBUG
if (managed_) {
// Fill with zeros (there is no useful value common to A32 and T32).
memset(buffer_, 0, capacity_);
}
#endif
cursor_ = buffer_;
SetClean();
}
void CodeBuffer::Grow(size_t new_capacity) {
VIXL_ASSERT(managed_);
VIXL_ASSERT(new_capacity > capacity_);
ptrdiff_t cursor_offset = GetCursorOffset();
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = static_cast<byte*>(realloc(buffer_, new_capacity));
VIXL_CHECK(buffer_ != NULL);
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = static_cast<byte*>(
mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE));
VIXL_CHECK(buffer_ != MAP_FAILED);
#else
#error Unknown code buffer allocator.
#endif
cursor_ = buffer_ + cursor_offset;
capacity_ = new_capacity;
}
} // namespace vixl
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_BUFFER_H
#define VIXL_CODE_BUFFER_H
#include <cstring>
#include "globals-vixl.h"
#include "utils-vixl.h"
namespace vixl {
class CodeBuffer {
public:
static const size_t kDefaultCapacity = 4 * KBytes;
explicit CodeBuffer(size_t capacity = kDefaultCapacity);
CodeBuffer(byte* buffer, size_t capacity);
~CodeBuffer();
void Reset();
#ifdef VIXL_CODE_BUFFER_MMAP
void SetExecutable();
void SetWritable();
#else
// These require page-aligned memory blocks, which we can only guarantee with
// mmap.
VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); }
VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); }
#endif
ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const {
ptrdiff_t cursor_offset = cursor_ - buffer_;
VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset));
return cursor_offset - offset;
}
VIXL_DEPRECATED("GetOffsetFrom",
ptrdiff_t OffsetFrom(ptrdiff_t offset) const) {
return GetOffsetFrom(offset);
}
ptrdiff_t GetCursorOffset() const { return GetOffsetFrom(0); }
VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) {
return GetCursorOffset();
}
void Rewind(ptrdiff_t offset) {
byte* rewound_cursor = buffer_ + offset;
VIXL_ASSERT((buffer_ <= rewound_cursor) && (rewound_cursor <= cursor_));
cursor_ = rewound_cursor;
}
template <typename T>
T GetOffsetAddress(ptrdiff_t offset) const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_)));
return reinterpret_cast<T>(buffer_ + offset);
}
// Return the address of the start or end of the emitted code.
template <typename T>
T GetStartAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0);
}
template <typename T>
T GetEndAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(GetSizeInBytes());
}
size_t GetRemainingBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return (buffer_ + capacity_) - cursor_;
}
VIXL_DEPRECATED("GetRemainingBytes", size_t RemainingBytes() const) {
return GetRemainingBytes();
}
size_t GetSizeInBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return cursor_ - buffer_;
}
// A code buffer can emit:
// * 8, 16, 32 or 64-bit data: constant.
// * 16 or 32-bit data: instruction.
// * string: debug info.
void Emit8(uint8_t data) { Emit(data); }
void Emit16(uint16_t data) { Emit(data); }
void Emit32(uint32_t data) { Emit(data); }
void Emit64(uint64_t data) { Emit(data); }
void EmitString(const char* string);
void EmitData(const void* data, size_t size);
template <typename T>
void Emit(T value) {
VIXL_ASSERT(HasSpaceFor(sizeof(value)));
dirty_ = true;
memcpy(cursor_, &value, sizeof(value));
cursor_ += sizeof(value);
}
void UpdateData(size_t offset, const void* data, size_t size);
// Align to 32bit.
void Align();
// Ensure there is enough space for and emit 'n' zero bytes.
void EmitZeroedBytes(int n);
bool Is16bitAligned() const { return IsAligned<2>(cursor_); }
bool Is32bitAligned() const { return IsAligned<4>(cursor_); }
size_t GetCapacity() const { return capacity_; }
VIXL_DEPRECATED("GetCapacity", size_t capacity() const) {
return GetCapacity();
}
bool IsManaged() const { return managed_; }
void Grow(size_t new_capacity);
bool IsDirty() const { return dirty_; }
void SetClean() { dirty_ = false; }
bool HasSpaceFor(size_t amount) const {
return GetRemainingBytes() >= amount;
}
void EnsureSpaceFor(size_t amount, bool* has_grown) {
bool is_full = !HasSpaceFor(amount);
if (is_full) Grow(capacity_ * 2 + amount);
VIXL_ASSERT(has_grown != NULL);
*has_grown = is_full;
}
void EnsureSpaceFor(size_t amount) {
bool dummy;
EnsureSpaceFor(amount, &dummy);
}
private:
// Backing store of the buffer.
byte* buffer_;
// If true the backing store is allocated and deallocated by the buffer. The
// backing store can then grow on demand. If false the backing store is
// provided by the user and cannot be resized internally.
bool managed_;
// Pointer to the next location to be written.
byte* cursor_;
// True if there has been any write since the buffer was created or cleaned.
bool dirty_;
// Capacity in bytes of the backing store.
size_t capacity_;
};
} // namespace vixl
#endif // VIXL_CODE_BUFFER_H
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_GENERATION_SCOPES_H_
#define VIXL_CODE_GENERATION_SCOPES_H_
#include "assembler-base-vixl.h"
#include "macro-assembler-interface.h"
namespace vixl {
// This scope will:
// - Allow code emission from the specified `Assembler`.
// - Optionally reserve space in the `CodeBuffer` (if it is managed by VIXL).
// - Optionally, on destruction, check the size of the generated code.
// (The size can be either exact or a maximum size.)
class CodeBufferCheckScope {
public:
// Tell whether or not the scope needs to ensure the associated CodeBuffer
// has enough space for the requested size.
enum BufferSpacePolicy {
kReserveBufferSpace,
kDontReserveBufferSpace,
// Deprecated, but kept for backward compatibility.
kCheck = kReserveBufferSpace,
kNoCheck = kDontReserveBufferSpace
};
// Tell whether or not the scope should assert the amount of code emitted
// within the scope is consistent with the requested amount.
enum SizePolicy {
kNoAssert, // Do not check the size of the code emitted.
kExactSize, // The code emitted must be exactly size bytes.
kMaximumSize // The code emitted must be at most size bytes.
};
// This constructor implicitly calls `Open` to initialise the scope
// (`assembler` must not be `NULL`), so it is ready to use immediately after
// it has been constructed.
CodeBufferCheckScope(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize)
: assembler_(NULL), initialised_(false) {
Open(assembler, size, check_policy, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
CodeBufferCheckScope() : assembler_(NULL), initialised_(false) {
// Nothing to do.
}
virtual ~CodeBufferCheckScope() { Close(); }
// This function performs the actual initialisation work.
void Open(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize) {
VIXL_ASSERT(!initialised_);
VIXL_ASSERT(assembler != NULL);
assembler_ = assembler;
if (check_policy == kReserveBufferSpace) {
assembler->GetBuffer()->EnsureSpaceFor(size);
}
#ifdef VIXL_DEBUG
limit_ = assembler_->GetSizeOfCodeGenerated() + size;
assert_policy_ = size_policy;
previous_allow_assembler_ = assembler_->AllowAssembler();
assembler_->SetAllowAssembler(true);
#else
USE(size_policy);
#endif
initialised_ = true;
}
// This function performs the cleaning-up work. It must succeed even if the
// scope has not been opened. It is safe to call multiple times.
void Close() {
#ifdef VIXL_DEBUG
if (!initialised_) {
return;
}
assembler_->SetAllowAssembler(previous_allow_assembler_);
switch (assert_policy_) {
case kNoAssert:
break;
case kExactSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() == limit_);
break;
case kMaximumSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() <= limit_);
break;
default:
VIXL_UNREACHABLE();
}
#endif
initialised_ = false;
}
protected:
internal::AssemblerBase* assembler_;
SizePolicy assert_policy_;
size_t limit_;
bool previous_allow_assembler_;
bool initialised_;
};
// This scope will:
// - Do the same as `CodeBufferCheckSCope`, but:
// - If managed by VIXL, always reserve space in the `CodeBuffer`.
// - Always check the size (exact or maximum) of the generated code on
// destruction.
// - Emit pools if the specified size would push them out of range.
// - Block pools emission for the duration of the scope.
// This scope allows the `Assembler` and `MacroAssembler` to be freely and
// safely mixed for its duration.
class EmissionCheckScope : public CodeBufferCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
EmissionCheckScope() {}
virtual ~EmissionCheckScope() { Close(); }
enum PoolPolicy {
// Do not forbid pool emission inside the scope. Pools will not be emitted
// on `Open` either.
kIgnorePools,
// Force pools to be generated on `Open` if necessary and block their
// emission inside the scope.
kBlockPools,
// Deprecated, but kept for backward compatibility.
kCheckPools = kBlockPools
};
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
// Perform the opposite of `Open`, which is:
// - Check the code generation limit was not exceeded.
// - Release the pools.
CodeBufferCheckScope::Close();
if (pool_policy_ == kBlockPools) {
masm_->ReleasePools();
}
VIXL_ASSERT(!initialised_);
}
protected:
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
if (masm == NULL) {
// Nothing to do.
// We may reach this point in a context of conditional code generation.
// See `aarch64::MacroAssembler::MoveImmediateHelper()` for an example.
return;
}
masm_ = masm;
pool_policy_ = pool_policy;
if (pool_policy_ == kBlockPools) {
// To avoid duplicating the work to check that enough space is available
// in the buffer, do not use the more generic `EnsureEmitFor()`. It is
// done below when opening `CodeBufferCheckScope`.
masm->EnsureEmitPoolsFor(size);
masm->BlockPools();
}
// The buffer should be checked *after* we emit the pools.
CodeBufferCheckScope::Open(masm->AsAssemblerBase(),
size,
kReserveBufferSpace,
size_policy);
VIXL_ASSERT(initialised_);
}
// This constructor should only be used from code that is *currently
// generating* the pools, to avoid an infinite loop.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
Open(masm, size, size_policy, pool_policy);
}
MacroAssemblerInterface* masm_;
PoolPolicy pool_policy_;
};
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope will:
// - Do the same as `EmissionCheckScope`.
// - Block access to the MacroAssemblerInterface (using run-time assertions).
class ExactAssemblyScope : public EmissionCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
ExactAssemblyScope() {}
virtual ~ExactAssemblyScope() { Close(); }
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
#ifdef VIXL_DEBUG
masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_);
#else
USE(previous_allow_macro_assembler_);
#endif
EmissionCheckScope::Close();
}
protected:
// This protected constructor allows overriding the pool policy. It is
// available to allow this scope to be used in code that handles generation
// of pools.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy assert_policy,
PoolPolicy pool_policy) {
Open(masm, size, assert_policy, pool_policy);
}
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
VIXL_ASSERT(size_policy != kNoAssert);
if (masm == NULL) {
// Nothing to do.
return;
}
// Rely on EmissionCheckScope::Open to initialise `masm_` and
// `pool_policy_`.
EmissionCheckScope::Open(masm, size, size_policy, pool_policy);
#ifdef VIXL_DEBUG
previous_allow_macro_assembler_ = masm->AllowMacroInstructions();
masm->SetAllowMacroInstructions(false);
#endif
}
private:
bool previous_allow_macro_assembler_;
};
} // namespace vixl
#endif // VIXL_CODE_GENERATION_SCOPES_H_
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "compiler-intrinsics-vixl.h"
namespace vixl {
int CountLeadingSignBitsFallBack(int64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}
int CountLeadingZerosFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
if (value == 0) {
return width;
}
int count = 0;
value = value << (64 - width);
if ((value & UINT64_C(0xffffffff00000000)) == 0) {
count += 32;
value = value << 32;
}
if ((value & UINT64_C(0xffff000000000000)) == 0) {
count += 16;
value = value << 16;
}
if ((value & UINT64_C(0xff00000000000000)) == 0) {
count += 8;
value = value << 8;
}
if ((value & UINT64_C(0xf000000000000000)) == 0) {
count += 4;
value = value << 4;
}
if ((value & UINT64_C(0xc000000000000000)) == 0) {
count += 2;
value = value << 2;
}
if ((value & UINT64_C(0x8000000000000000)) == 0) {
count += 1;
}
count += (value == 0);
return count;
}
int CountSetBitsFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
// Mask out unused bits to ensure that they are not counted.
value &= (UINT64_C(0xffffffffffffffff) >> (64 - width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
const uint64_t kMasks[] = {
UINT64_C(0x5555555555555555),
UINT64_C(0x3333333333333333),
UINT64_C(0x0f0f0f0f0f0f0f0f),
UINT64_C(0x00ff00ff00ff00ff),
UINT64_C(0x0000ffff0000ffff),
UINT64_C(0x00000000ffffffff),
};
for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) {
int shift = 1 << i;
value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
}
return static_cast<int>(value);
}
int CountTrailingZerosFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
int count = 0;
value = value << (64 - width);
if ((value & UINT64_C(0xffffffff)) == 0) {
count += 32;
value = value >> 32;
}
if ((value & 0xffff) == 0) {
count += 16;
value = value >> 16;
}
if ((value & 0xff) == 0) {
count += 8;
value = value >> 8;
}
if ((value & 0xf) == 0) {
count += 4;
value = value >> 4;
}
if ((value & 0x3) == 0) {
count += 2;
value = value >> 2;
}
if ((value & 0x1) == 0) {
count += 1;
}
count += (value == 0);
return count - (64 - width);
}
} // namespace vixl
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_COMPILER_INTRINSICS_H
#define VIXL_COMPILER_INTRINSICS_H
#include "globals-vixl.h"
namespace vixl {
// Helper to check whether the version of GCC used is greater than the specified
// requirement.
#define MAJOR 1000000
#define MINOR 1000
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR) + __GNUC_PATCHLEVEL__) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR)) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#else
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
#endif
#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// clang-format off
#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
// clang-format on
#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// The documentation for these builtins is available at:
// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
// clang-format off
# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
// clang-format on
#else
// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
// implemented C++ methods.
// clang-format off
#define COMPILER_HAS_BUILTIN_BSWAP false
#define COMPILER_HAS_BUILTIN_CLRSB false
#define COMPILER_HAS_BUILTIN_CLZ false
#define COMPILER_HAS_BUILTIN_CTZ false
#define COMPILER_HAS_BUILTIN_FFS false
#define COMPILER_HAS_BUILTIN_POPCOUNT false
// clang-format on
#endif
template <typename V>
inline bool IsPowerOf2(V value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
// Declaration of fallback functions.
int CountLeadingSignBitsFallBack(int64_t value, int width);
int CountLeadingZerosFallBack(uint64_t value, int width);
int CountSetBitsFallBack(uint64_t value, int width);
int CountTrailingZerosFallBack(uint64_t value, int width);
// Implementation of intrinsics functions.
// TODO: The implementations could be improved for sizes different from 32bit
// and 64bit: we could mask the values and call the appropriate builtin.
template <typename V>
inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLRSB
if (width == 32) {
return __builtin_clrsb(value);
} else if (width == 64) {
return __builtin_clrsbll(value);
}
#endif
return CountLeadingSignBitsFallBack(value, width);
}
template <typename V>
inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_clzll(value);
}
#endif
return CountLeadingZerosFallBack(value, width);
}
template <typename V>
inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_POPCOUNT
if (width == 32) {
return __builtin_popcount(static_cast<unsigned>(value));
} else if (width == 64) {
return __builtin_popcountll(value);
}
#endif
return CountSetBitsFallBack(value, width);
}
template <typename V>
inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CTZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_ctzll(value);
}
#endif
return CountTrailingZerosFallBack(value, width);
}
} // namespace vixl
#endif // VIXL_COMPILER_INTRINSICS_H
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <ostream>
#include "cpu-features.h"
#include "globals-vixl.h"
#include "utils-vixl.h"
namespace vixl {
static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) {
if (feature == CPUFeatures::kNone) {
return 0;
} else {
// Check that the shift is well-defined, and that the feature is valid.
VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <=
(sizeof(uint64_t) * 8));
VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures);
return UINT64_C(1) << feature;
}
}
CPUFeatures::CPUFeatures(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3)
: features_(0) {
Combine(feature0, feature1, feature2, feature3);
}
CPUFeatures CPUFeatures::All() {
CPUFeatures all;
// Check that the shift is well-defined.
VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8));
all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1;
return all;
}
CPUFeatures CPUFeatures::InferFromOS() {
// TODO: Actually infer features from the OS.
return CPUFeatures();
}
void CPUFeatures::Combine(const CPUFeatures& other) {
features_ |= other.features_;
}
void CPUFeatures::Combine(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) {
features_ |= MakeFeatureMask(feature0);
features_ |= MakeFeatureMask(feature1);
features_ |= MakeFeatureMask(feature2);
features_ |= MakeFeatureMask(feature3);
}
void CPUFeatures::Remove(const CPUFeatures& other) {
features_ &= ~other.features_;
}
void CPUFeatures::Remove(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) {
features_ &= ~MakeFeatureMask(feature0);
features_ &= ~MakeFeatureMask(feature1);
features_ &= ~MakeFeatureMask(feature2);
features_ &= ~MakeFeatureMask(feature3);
}
CPUFeatures CPUFeatures::With(const CPUFeatures& other) const {
CPUFeatures f(*this);
f.Combine(other);
return f;
}
CPUFeatures CPUFeatures::With(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
CPUFeatures f(*this);
f.Combine(feature0, feature1, feature2, feature3);
return f;
}
CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const {
CPUFeatures f(*this);
f.Remove(other);
return f;
}
CPUFeatures CPUFeatures::Without(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
CPUFeatures f(*this);
f.Remove(feature0, feature1, feature2, feature3);
return f;
}
bool CPUFeatures::Has(const CPUFeatures& other) const {
return (features_ & other.features_) == other.features_;
}
bool CPUFeatures::Has(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) |
MakeFeatureMask(feature2) | MakeFeatureMask(feature3);
return (features_ & mask) == mask;
}
size_t CPUFeatures::Count() const { return CountSetBits(features_); }
std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) {
// clang-format off
switch (feature) {
#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \
case CPUFeatures::SYMBOL: \
return os << NAME;
VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE)
#undef VIXL_FORMAT_FEATURE
case CPUFeatures::kNone:
return os << "none";
case CPUFeatures::kNumberOfFeatures:
VIXL_UNREACHABLE();
}
// clang-format on
VIXL_UNREACHABLE();
return os;
}
CPUFeatures::const_iterator CPUFeatures::begin() const {
if (features_ == 0) return const_iterator(this, kNone);
int feature_number = CountTrailingZeros(features_);
vixl::CPUFeatures::Feature feature =
static_cast<CPUFeatures::Feature>(feature_number);
return const_iterator(this, feature);
}
CPUFeatures::const_iterator CPUFeatures::end() const {
return const_iterator(this, kNone);
}
std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) {
CPUFeatures::const_iterator it = features.begin();
while (it != features.end()) {
os << *it;
++it;
if (it != features.end()) os << ", ";
}
return os;
}
bool CPUFeaturesConstIterator::operator==(
const CPUFeaturesConstIterator& other) const {
VIXL_ASSERT(IsValid());
return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_);
}
CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix
VIXL_ASSERT(IsValid());
do {
// Find the next feature. The order is unspecified.
feature_ = static_cast<CPUFeatures::Feature>(feature_ + 1);
if (feature_ == CPUFeatures::kNumberOfFeatures) {
feature_ = CPUFeatures::kNone;
VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1);
}
VIXL_ASSERT(CPUFeatures::kNone <= feature_);
VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures);
// cpu_features_->Has(kNone) is always true, so this will terminate even if
// the features list is empty.
} while (!cpu_features_->Has(feature_));
return feature_;
}
CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix
CPUFeatures::Feature result = feature_;
++(*this);
return result;
}
} // namespace vixl
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_FEATURES_H
#define VIXL_CPU_FEATURES_H
#include <ostream>
#include "globals-vixl.h"
namespace vixl {
// clang-format off
#define VIXL_CPU_FEATURE_LIST(V) \
/* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \
/* registers, so that the detailed feature registers can be read */ \
/* directly. */ \
V(kIDRegisterEmulation, "ID register emulation", "cpuid") \
\
V(kFP, "FP", "fp") \
V(kNEON, "NEON", "asimd") \
V(kCRC32, "CRC32", "crc32") \
/* Cryptographic support instructions. */ \
V(kAES, "AES", "aes") \
V(kSHA1, "SHA1", "sha1") \
V(kSHA2, "SHA2", "sha2") \
/* A form of PMULL{2} with a 128-bit (1Q) result. */ \
V(kPmull1Q, "Pmull1Q", "pmull") \
/* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \
V(kAtomics, "Atomics", "atomics") \
/* Limited ordering regions: LDLAR, STLLR and their variants. */ \
V(kLORegions, "LORegions", NULL) \
/* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \
V(kRDM, "RDM", "asimdrdm") \
/* Scalable Vector Extension. */ \
V(kSVE, "SVE", "sve") \
/* SDOT and UDOT support (in NEON). */ \
V(kDotProduct, "DotProduct", "asimddp") \
/* Half-precision (FP16) support for FP and NEON, respectively. */ \
V(kFPHalf, "FPHalf", "fphp") \
V(kNEONHalf, "NEONHalf", "asimdhp") \
/* The RAS extension, including the ESB instruction. */ \
V(kRAS, "RAS", NULL) \
/* Data cache clean to the point of persistence: DC CVAP. */ \
V(kDCPoP, "DCPoP", "dcpop") \
/* Cryptographic support instructions. */ \
V(kSHA3, "SHA3", "sha3") \
V(kSHA512, "SHA512", "sha512") \
V(kSM3, "SM3", "sm3") \
V(kSM4, "SM4", "sm4") \
/* Pointer authentication for addresses. */ \
V(kPAuth, "PAuth", NULL) \
/* Pointer authentication for addresses uses QARMA. */ \
V(kPAuthQARMA, "PAuthQARMA", NULL) \
/* Generic authentication (using the PACGA instruction). */ \
V(kPAuthGeneric, "PAuthGeneric", NULL) \
/* Generic authentication uses QARMA. */ \
V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \
/* JavaScript-style FP -> integer conversion instruction: FJCVTZS. */ \
V(kJSCVT, "JSCVT", "jscvt") \
/* Complex number support for NEON: FCMLA and FCADD. */ \
V(kFcma, "Fcma", "fcma") \
/* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \
V(kRCpc, "RCpc", "lrcpc") \
V(kRCpcImm, "RCpc (imm)", "ilrcpc") \
/* Flag manipulation instructions: SETF{8,16}, CFINV, RMIF. */ \
V(kFlagM, "FlagM", "flagm") \
/* Unaligned single-copy atomicity. */ \
V(kUSCAT, "USCAT", "uscat") \
/* FP16 fused multiply-add or -subtract long: FMLAL{2}, FMLSL{2}. */ \
V(kFHM, "FHM", "asimdfhm") \
/* Data-independent timing (for selected instructions). */ \
V(kDIT, "DIT", "dit") \
/* Branch target identification. */ \
V(kBTI, "BTI", NULL) \
/* Flag manipulation instructions: {AX,XA}FLAG */ \
V(kAXFlag, "AXFlag", NULL)
// clang-format on
class CPUFeaturesConstIterator;
// A representation of the set of features known to be supported by the target
// device. Each feature is represented by a simple boolean flag.
//
// - When the Assembler is asked to assemble an instruction, it asserts (in
// debug mode) that the necessary features are available.
//
// - TODO: The MacroAssembler relies on the Assembler's assertions, but in
// some cases it may be useful for macros to generate a fall-back sequence
// in case features are not available.
//
// - The Simulator assumes by default that all features are available, but it
// is possible to configure it to fail if the simulated code uses features
// that are not enabled.
//
// The Simulator also offers pseudo-instructions to allow features to be
// enabled and disabled dynamically. This is useful when you want to ensure
// that some features are constrained to certain areas of code.
//
// - The base Disassembler knows nothing about CPU features, but the
// PrintDisassembler can be configured to annotate its output with warnings
// about unavailable features. The Simulator uses this feature when
// instruction trace is enabled.
//
// - The Decoder-based components -- the Simulator and PrintDisassembler --
// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of
// features actually encountered so that a large block of code can be
// examined (either directly or through simulation), and the required
// features analysed later.
//
// Expected usage:
//
// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for
// // compatibility with older version of VIXL.
// MacroAssembler masm;
//
// // Generate code only for the current CPU.
// masm.SetCPUFeatures(CPUFeatures::InferFromOS());
//
// // Turn off feature checking entirely.
// masm.SetCPUFeatures(CPUFeatures::All());
//
// Feature set manipulation:
//
// CPUFeatures f; // The default constructor gives an empty set.
// // Individual features can be added (or removed).
// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES);
// f.Remove(CPUFeatures::kNEON);
//
// // Some helpers exist for extensions that provide several features.
// f.Remove(CPUFeatures::All());
// f.Combine(CPUFeatures::AArch64LegacyBaseline());
//
// // Chained construction is also possible.
// CPUFeatures g =
// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32);
//
// // Features can be queried. Where multiple features are given, they are
// // combined with logical AND.
// if (h.Has(CPUFeatures::kNEON)) { ... }
// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... }
// if (h.Has(g)) { ... }
// // If the empty set is requested, the result is always 'true'.
// VIXL_ASSERT(h.Has(CPUFeatures()));
//
// // For debug and reporting purposes, features can be enumerated (or
// // printed directly):
// std::cout << CPUFeatures::kNEON; // Prints something like "NEON".
// std::cout << f; // Prints something like "FP, NEON, CRC32".
class CPUFeatures {
public:
// clang-format off
// Individual features.
// These should be treated as opaque tokens. User code should not rely on
// specific numeric values or ordering.
enum Feature {
// Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that
// this class supports.
kNone = -1,
#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL,
VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE)
#undef VIXL_DECLARE_FEATURE
kNumberOfFeatures
};
// clang-format on
// By default, construct with no features enabled.
CPUFeatures() : features_(0) {}
// Construct with some features already enabled.
CPUFeatures(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Construct with all features enabled. This can be used to disable feature
// checking: `Has(...)` returns true regardless of the argument.
static CPUFeatures All();
// Construct an empty CPUFeatures. This is equivalent to the default
// constructor, but is provided for symmetry and convenience.
static CPUFeatures None() { return CPUFeatures(); }
// The presence of these features was assumed by version of VIXL before this
// API was added, so using this set by default ensures API compatibility.
static CPUFeatures AArch64LegacyBaseline() {
return CPUFeatures(kFP, kNEON, kCRC32);
}
// Construct a new CPUFeatures object based on what the OS reports.
static CPUFeatures InferFromOS();
// Combine another CPUFeatures object into this one. Features that already
// exist in this set are left unchanged.
void Combine(const CPUFeatures& other);
// Combine specific features into this set. Features that already exist in
// this set are left unchanged.
void Combine(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Remove features in another CPUFeatures object from this one.
void Remove(const CPUFeatures& other);
// Remove specific features from this set.
void Remove(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Chaining helpers for convenient construction.
CPUFeatures With(const CPUFeatures& other) const;
CPUFeatures With(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
CPUFeatures Without(const CPUFeatures& other) const;
CPUFeatures Without(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Query features.
// Note that an empty query (like `Has(kNone)`) always returns true.
bool Has(const CPUFeatures& other) const;
bool Has(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Return the number of enabled features.
size_t Count() const;
// Check for equivalence.
bool operator==(const CPUFeatures& other) const {
return Has(other) && other.Has(*this);
}
bool operator!=(const CPUFeatures& other) const { return !(*this == other); }
typedef CPUFeaturesConstIterator const_iterator;
const_iterator begin() const;
const_iterator end() const;
private:
// Each bit represents a feature. This field will be replaced as needed if
// features are added.
uint64_t features_;
friend std::ostream& operator<<(std::ostream& os,
const vixl::CPUFeatures& features);
};
std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature);
std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features);
// This is not a proper C++ iterator type, but it simulates enough of
// ForwardIterator that simple loops can be written.
class CPUFeaturesConstIterator {
public:
CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL,
CPUFeatures::Feature start = CPUFeatures::kNone)
: cpu_features_(cpu_features), feature_(start) {
VIXL_ASSERT(IsValid());
}
bool operator==(const CPUFeaturesConstIterator& other) const;
bool operator!=(const CPUFeaturesConstIterator& other) const {
return !(*this == other);
}
CPUFeatures::Feature operator++();
CPUFeatures::Feature operator++(int);
CPUFeatures::Feature operator*() const {
VIXL_ASSERT(IsValid());
return feature_;
}
// For proper support of C++'s simplest "Iterator" concept, this class would
// have to define member types (such as CPUFeaturesIterator::pointer) to make
// it appear as if it iterates over Feature objects in memory. That is, we'd
// need CPUFeatures::iterator to behave like std::vector<Feature>::iterator.
// This is at least partially possible -- the std::vector<bool> specialisation
// does something similar -- but it doesn't seem worthwhile for a
// special-purpose debug helper, so they are omitted here.
private:
const CPUFeatures* cpu_features_;
CPUFeatures::Feature feature_;
bool IsValid() const {
return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
cpu_features_->Has(feature_);
}
};
// A convenience scope for temporarily modifying a CPU features object. This
// allows features to be enabled for short sequences.
//
// Expected usage:
//
// {
// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32);
// // This scope can now use CRC32, as well as anything else that was enabled
// // before the scope.
//
// ...
//
// // At the end of the scope, the original CPU features are restored.
// }
class CPUFeaturesScope {
public:
// Start a CPUFeaturesScope on any object that implements
// `CPUFeatures* GetCPUFeatures()`.
template <typename T>
explicit CPUFeaturesScope(T* cpu_features_wrapper,
CPUFeatures::Feature feature0 = CPUFeatures::kNone,
CPUFeatures::Feature feature1 = CPUFeatures::kNone,
CPUFeatures::Feature feature2 = CPUFeatures::kNone,
CPUFeatures::Feature feature3 = CPUFeatures::kNone)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(feature0, feature1, feature2, feature3);
}
template <typename T>
CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(other);
}
~CPUFeaturesScope() { *cpu_features_ = old_features_; }
// For advanced usage, the CPUFeatures object can be accessed directly.
// The scope will restore the original state when it ends.
CPUFeatures* GetCPUFeatures() const { return cpu_features_; }
void SetCPUFeatures(const CPUFeatures& cpu_features) {
*cpu_features_ = cpu_features;
}
private:
CPUFeatures* const cpu_features_;
const CPUFeatures old_features_;
};
} // namespace vixl
#endif // VIXL_CPU_FEATURES_H
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_GLOBALS_H
#define VIXL_GLOBALS_H
// Get standard C99 macros for integer types.
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
extern "C" {
#include <inttypes.h>
#include <stdint.h>
}
#include <cassert>
#include <cstdarg>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include "platform-vixl.h"
#ifdef VIXL_NEGATIVE_TESTING
#include <sstream>
#include <stdexcept>
#include <string>
#endif
namespace vixl {
typedef uint8_t byte;
const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
const int kBitsPerByte = 8;
template <int SizeInBits>
struct Unsigned;
template <>
struct Unsigned<32> {
typedef uint32_t type;
};
template <>
struct Unsigned<64> {
typedef uint64_t type;
};
} // namespace vixl
// Detect the host's pointer size.
#if (UINTPTR_MAX == UINT32_MAX)
#define VIXL_HOST_POINTER_32
#elif (UINTPTR_MAX == UINT64_MAX)
#define VIXL_HOST_POINTER_64
#else
#error "Unsupported host pointer size."
#endif
#ifdef VIXL_NEGATIVE_TESTING
#define VIXL_ABORT() \
do { \
std::ostringstream oss; \
oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
std::ostringstream oss; \
oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
std::ostringstream oss; \
oss << "Assertion failed (" #condition ")\nin "; \
oss << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} \
} while (false)
#else
#define VIXL_ABORT() \
do { \
printf("Aborting in %s, line %i\n", __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
printf("%sin %s, line %i\n", (msg), __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
printf("Assertion failed (%s)\nin %s, line %i\n", \
#condition, \
__FILE__, \
__LINE__); \
abort(); \
} \
} while (false)
#endif
#ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) VIXL_CHECK(condition)
#define VIXL_UNIMPLEMENTED() \
do { \
VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
} while (false)
#define VIXL_UNREACHABLE() \
do { \
VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
} while (false)
#else
#define VIXL_ASSERT(condition) ((void)0)
#define VIXL_UNIMPLEMENTED() ((void)0)
#define VIXL_UNREACHABLE() ((void)0)
#endif
// This is not as powerful as template based assertions, but it is simple.
// It assumes that the descriptions are unique. If this starts being a problem,
// we can switch to a different implemention.
#define VIXL_CONCAT(a, b) a##b
#if __cplusplus >= 201103L
#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
static_assert(condition, message)
#else
#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
__attribute__((unused))
#endif
#define VIXL_STATIC_ASSERT(condition) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
#define VIXL_WARNING(message) \
do { \
printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
} while (false)
template <typename T1>
inline void USE(const T1&) {}
template <typename T1, typename T2>
inline void USE(const T1&, const T2&) {}
template <typename T1, typename T2, typename T3>
inline void USE(const T1&, const T2&, const T3&) {}
template <typename T1, typename T2, typename T3, typename T4>
inline void USE(const T1&, const T2&, const T3&, const T4&) {}
#define VIXL_ALIGNMENT_EXCEPTION() \
do { \
VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \
} while (0)
// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
// argument to annotate intentional fall-through between switch labels.
// For more information please refer to:
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
#ifndef __has_warning
#define __has_warning(x) 0
#endif
// Fallthrough annotation for Clang and C++11(201103L).
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
// Fallthrough annotation for GCC >= 7.
#elif __GNUC__ >= 7
#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
#else
#define VIXL_FALLTHROUGH() \
do { \
} while (0)
#endif
#if __cplusplus >= 201103L
#define VIXL_NO_RETURN [[noreturn]]
#else
#define VIXL_NO_RETURN __attribute__((noreturn))
#endif
#ifdef VIXL_DEBUG
#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
#else
#define VIXL_NO_RETURN_IN_DEBUG_MODE
#endif
#if __cplusplus >= 201103L
#define VIXL_OVERRIDE override
#else
#define VIXL_OVERRIDE
#endif
// Some functions might only be marked as "noreturn" for the DEBUG build. This
// macro should be used for such cases (for more details see what
// VIXL_UNREACHABLE expands to).
#ifdef VIXL_DEBUG
#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN
#else
#define VIXL_DEBUG_NO_RETURN
#endif
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
#endif
#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#warning "Generating Simulator instructions without Simulator support."
#endif
#endif
// We do not have a simulator for AArch32, although we can pretend we do so that
// tests that require running natively can be skipped.
#ifndef __arm__
#define VIXL_INCLUDE_SIMULATOR_AARCH32
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
#endif
#endif
#ifdef USE_SIMULATOR
#error "Please see the release notes for USE_SIMULATOR."
#endif
// Target Architecture/ISA
#ifdef VIXL_INCLUDE_TARGET_A64
#define VIXL_INCLUDE_TARGET_AARCH64
#endif
//#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
#define VIXL_INCLUDE_TARGET_AARCH32
//#elif defined(VIXL_INCLUDE_TARGET_A32)
//#define VIXL_INCLUDE_TARGET_A32_ONLY
//#else
//#define VIXL_INCLUDE_TARGET_T32_ONLY
//#endif
#endif // VIXL_GLOBALS_H
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_INVALSET_H_
#define VIXL_INVALSET_H_
#include <cstring>
#include <algorithm>
#include <vector>
#include "globals-vixl.h"
namespace vixl {
// We define a custom data structure template and its iterator as `std`
// containers do not fit the performance requirements for some of our use cases.
//
// The structure behaves like an iterable unordered set with special properties
// and restrictions. "InvalSet" stands for "Invalidatable Set".
//
// Restrictions and requirements:
// - Adding an element already present in the set is illegal. In debug mode,
// this is checked at insertion time.
// - The templated class `ElementType` must provide comparison operators so that
// `std::sort()` can be used.
// - A key must be available to represent invalid elements.
// - Elements with an invalid key must compare higher or equal to any other
// element.
//
// Use cases and performance considerations:
// Our use cases present two specificities that allow us to design this
// structure to provide fast insertion *and* fast search and deletion
// operations:
// - Elements are (generally) inserted in order (sorted according to their key).
// - A key is available to mark elements as invalid (deleted).
// The backing `std::vector` allows for fast insertions. When
// searching for an element we ensure the elements are sorted (this is generally
// the case) and perform a binary search. When deleting an element we do not
// free the associated memory immediately. Instead, an element to be deleted is
// marked with the 'invalid' key. Other methods of the container take care of
// ignoring entries marked as invalid.
// To avoid the overhead of the `std::vector` container when only few entries
// are used, a number of elements are preallocated.
// 'ElementType' and 'KeyType' are respectively the types of the elements and
// their key. The structure only reclaims memory when safe to do so, if the
// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
// greater than `<total number of elements> / RECLAIM_FACTOR.
// clang-format off
#define TEMPLATE_INVALSET_P_DECL \
class ElementType, \
unsigned N_PREALLOCATED_ELEMENTS, \
class KeyType, \
KeyType INVALID_KEY, \
size_t RECLAIM_FROM, \
unsigned RECLAIM_FACTOR
// clang-format on
#define TEMPLATE_INVALSET_P_DEF \
ElementType, N_PREALLOCATED_ELEMENTS, KeyType, INVALID_KEY, RECLAIM_FROM, \
RECLAIM_FACTOR
template <class S>
class InvalSetIterator; // Forward declaration.
template <TEMPLATE_INVALSET_P_DECL>
class InvalSet {
public:
InvalSet();
~InvalSet();
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
static const KeyType kInvalidKey = INVALID_KEY;
// C++ STL iterator interface.
typedef InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> > iterator;
iterator begin();
iterator end();
// It is illegal to insert an element already present in the set.
void insert(const ElementType& element);
// Looks for the specified element in the set and - if found - deletes it.
// The return value is the number of elements erased: either 0 or 1.
size_t erase(const ElementType& element);
// This indicates the number of (valid) elements stored in this set.
size_t size() const;
// Returns true if no elements are stored in the set.
// Note that this does not mean the the backing storage is empty: it can still
// contain invalid elements.
bool empty() const;
void clear();
const ElementType GetMinElement();
// This returns the key of the minimum element in the set.
KeyType GetMinElementKey();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
static void SetKey(ElementType* element, KeyType key);
typedef ElementType _ElementType;
typedef KeyType _KeyType;
protected:
// Returns a pointer to the element in vector_ if it was found, or NULL
// otherwise.
ElementType* Search(const ElementType& element);
// The argument *must* point to an element stored in *this* set.
// This function is not allowed to move elements in the backing vector
// storage.
void EraseInternal(ElementType* element);
// The elements in the range searched must be sorted.
ElementType* BinarySearch(const ElementType& element,
ElementType* start,
ElementType* end) const;
// Sort the elements.
enum SortType {
// The 'hard' version guarantees that invalid elements are moved to the end
// of the container.
kHardSort,
// The 'soft' version only guarantees that the elements will be sorted.
// Invalid elements may still be present anywhere in the set.
kSoftSort
};
void Sort(SortType sort_type);
// Delete the elements that have an invalid key. The complexity is linear
// with the size of the vector.
void Clean();
const ElementType Front() const;
const ElementType Back() const;
// Delete invalid trailing elements and return the last valid element in the
// set.
const ElementType CleanBack();
// Returns a pointer to the start or end of the backing storage.
const ElementType* StorageBegin() const;
const ElementType* StorageEnd() const;
ElementType* StorageBegin();
ElementType* StorageEnd();
// Returns the index of the element within the backing storage. The element
// must belong to the backing storage.
size_t GetElementIndex(const ElementType* element) const;
// Returns the element at the specified index in the backing storage.
const ElementType* GetElementAt(size_t index) const;
ElementType* GetElementAt(size_t index);
static const ElementType* GetFirstValidElement(const ElementType* from,
const ElementType* end);
void CacheMinElement();
const ElementType GetCachedMinElement() const;
bool ShouldReclaimMemory() const;
void ReclaimMemory();
bool IsUsingVector() const { return vector_ != NULL; }
void SetSorted(bool sorted) { sorted_ = sorted; }
// We cache some data commonly required by users to improve performance.
// We cannot cache pointers to elements as we do not control the backing
// storage.
bool valid_cached_min_;
size_t cached_min_index_; // Valid iff `valid_cached_min_` is true.
KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true.
// Indicates whether the elements are sorted.
bool sorted_;
// This represents the number of (valid) elements in this set.
size_t size_;
// The backing storage is either the array of preallocated elements or the
// vector. The structure starts by using the preallocated elements, and
// transitions (permanently) to using the vector once more than
// kNPreallocatedElements are used.
// Elements are only invalidated when using the vector. The preallocated
// storage always only contains valid elements.
ElementType preallocated_[kNPreallocatedElements];
std::vector<ElementType>* vector_;
// Iterators acquire and release this monitor. While a set is acquired,
// certain operations are illegal to ensure that the iterator will
// correctly iterate over the elements in the set.
int monitor_;
#ifdef VIXL_DEBUG
int monitor() const { return monitor_; }
void Acquire() { monitor_++; }
void Release() {
monitor_--;
VIXL_ASSERT(monitor_ >= 0);
}
#endif
private:
// The copy constructor and assignment operator are not used and the defaults
// are unsafe, so disable them (without an implementation).
#if __cplusplus >= 201103L
InvalSet(const InvalSet& other) = delete;
InvalSet operator=(const InvalSet& other) = delete;
#else
InvalSet(const InvalSet& other);
InvalSet operator=(const InvalSet& other);
#endif
friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
};
template <class S>
class InvalSetIterator : public std::iterator<std::forward_iterator_tag,
typename S::_ElementType> {
private:
// Redefine types to mirror the associated set types.
typedef typename S::_ElementType ElementType;
typedef typename S::_KeyType KeyType;
public:
explicit InvalSetIterator(S* inval_set = NULL);
// This class implements the standard copy-swap idiom.
~InvalSetIterator();
InvalSetIterator(const InvalSetIterator<S>& other);
InvalSetIterator<S>& operator=(InvalSetIterator<S> other);
#if __cplusplus >= 201103L
InvalSetIterator(InvalSetIterator<S>&& other) noexcept;
#endif
friend void swap(InvalSetIterator<S>& a, InvalSetIterator<S>& b) {
using std::swap;
swap(a.using_vector_, b.using_vector_);
swap(a.index_, b.index_);
swap(a.inval_set_, b.inval_set_);
}
// Return true if the iterator is at the end of the set.
bool Done() const;
// Move this iterator to the end of the set.
void Finish();
// Delete the current element and advance the iterator to point to the next
// element.
void DeleteCurrentAndAdvance();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
// Extra helpers to support the forward-iterator interface.
InvalSetIterator<S>& operator++(); // Pre-increment.
InvalSetIterator<S> operator++(int); // Post-increment.
bool operator==(const InvalSetIterator<S>& rhs) const;
bool operator!=(const InvalSetIterator<S>& rhs) const {
return !(*this == rhs);
}
ElementType& operator*() { return *Current(); }
const ElementType& operator*() const { return *Current(); }
ElementType* operator->() { return Current(); }
const ElementType* operator->() const { return Current(); }
protected:
void MoveToValidElement();
// Indicates if the iterator is looking at the vector or at the preallocated
// elements.
bool using_vector_;
// Used when looking at the preallocated elements, or in debug mode when using
// the vector to track how many times the iterator has advanced.
size_t index_;
typename std::vector<ElementType>::iterator iterator_;
S* inval_set_;
// TODO: These helpers are deprecated and will be removed in future versions
// of VIXL.
ElementType* Current() const;
void Advance();
};
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
: valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) {
#ifdef VIXL_DEBUG
monitor_ = 0;
#endif
}
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
VIXL_ASSERT(monitor_ == 0);
delete vector_;
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::begin() {
return iterator(this);
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::end() {
iterator end(this);
end.Finish();
return end;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
VIXL_ASSERT(Search(element) == NULL);
SetSorted(empty() || (sorted_ && (element > CleanBack())));
if (IsUsingVector()) {
vector_->push_back(element);
} else {
if (size_ < kNPreallocatedElements) {
preallocated_[size_] = element;
} else {
// Transition to using the vector.
vector_ =
new std::vector<ElementType>(preallocated_, preallocated_ + size_);
vector_->push_back(element);
}
}
size_++;
if (valid_cached_min_ && (element < GetMinElement())) {
cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
cached_min_key_ = GetKey(element);
valid_cached_min_ = true;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
ElementType* local_element = Search(element);
if (local_element != NULL) {
EraseInternal(local_element);
return 1;
}
return 0;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return NULL;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
if (!sorted_) {
Sort(kHardSort);
}
if (!valid_cached_min_) {
CacheMinElement();
}
return BinarySearch(element, GetElementAt(cached_min_index_), StorageEnd());
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
return size_;
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
return size_ == 0;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
VIXL_ASSERT(monitor() == 0);
size_ = 0;
if (IsUsingVector()) {
vector_->clear();
}
SetSorted(true);
valid_cached_min_ = false;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
CacheMinElement();
return *GetElementAt(cached_min_index_);
}
template <TEMPLATE_INVALSET_P_DECL>
KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElementKey() {
VIXL_ASSERT(monitor() == 0);
if (valid_cached_min_) {
return cached_min_key_;
} else {
return GetKey(GetMinElement());
}
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
return GetKey(element) != kInvalidKey;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
// Note that this function must be safe even while an iterator has acquired
// this set.
VIXL_ASSERT(element != NULL);
size_t deleted_index = GetElementIndex(element);
if (IsUsingVector()) {
VIXL_ASSERT((&(vector_->front()) <= element) &&
(element <= &(vector_->back())));
SetKey(element, kInvalidKey);
} else {
VIXL_ASSERT((preallocated_ <= element) &&
(element < (preallocated_ + kNPreallocatedElements)));
ElementType* end = preallocated_ + kNPreallocatedElements;
size_t copy_size = sizeof(*element) * (end - element - 1);
memmove(element, element + 1, copy_size);
}
size_--;
if (valid_cached_min_ && (deleted_index == cached_min_index_)) {
if (sorted_ && !empty()) {
const ElementType* min = GetFirstValidElement(element, StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
valid_cached_min_ = false;
}
}
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
const ElementType& element, ElementType* start, ElementType* end) const {
if (start == end) {
return NULL;
}
VIXL_ASSERT(sorted_);
VIXL_ASSERT(start < end);
VIXL_ASSERT(!empty());
// Perform a binary search through the elements while ignoring invalid
// elements.
ElementType* elements = start;
size_t low = 0;
size_t high = (end - start) - 1;
while (low < high) {
// Find valid bounds.
while (!IsValid(elements[low]) && (low < high)) ++low;
while (!IsValid(elements[high]) && (low < high)) --high;
VIXL_ASSERT(low <= high);
// Avoid overflow when computing the middle index.
size_t middle = low + (high - low) / 2;
if ((middle == low) || (middle == high)) {
break;
}
while ((middle < high - 1) && !IsValid(elements[middle])) ++middle;
while ((low + 1 < middle) && !IsValid(elements[middle])) --middle;
if (!IsValid(elements[middle])) {
break;
}
if (elements[middle] < element) {
low = middle;
} else {
high = middle;
}
}
if (elements[low] == element) return &elements[low];
if (elements[high] == element) return &elements[high];
return NULL;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
if (sort_type == kSoftSort) {
if (sorted_) {
return;
}
}
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return;
}
Clean();
std::sort(StorageBegin(), StorageEnd());
SetSorted(true);
cached_min_index_ = 0;
cached_min_key_ = GetKey(Front());
valid_cached_min_ = true;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
VIXL_ASSERT(monitor() == 0);
if (empty() || !IsUsingVector()) {
return;
}
// Manually iterate through the vector storage to discard invalid elements.
ElementType* start = &(vector_->front());
ElementType* end = start + vector_->size();
ElementType* c = start;
ElementType* first_invalid;
ElementType* first_valid;
ElementType* next_invalid;
while ((c < end) && IsValid(*c)) c++;
first_invalid = c;
while (c < end) {
while ((c < end) && !IsValid(*c)) c++;
first_valid = c;
while ((c < end) && IsValid(*c)) c++;
next_invalid = c;
ptrdiff_t n_moved_elements = (next_invalid - first_valid);
memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c));
first_invalid = first_invalid + n_moved_elements;
c = next_invalid;
}
// Delete the trailing invalid elements.
vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
VIXL_ASSERT(vector_->size() == size_);
if (sorted_) {
valid_cached_min_ = true;
cached_min_index_ = 0;
cached_min_key_ = GetKey(*GetElementAt(0));
} else {
valid_cached_min_ = false;
}
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->front() : preallocated_[0];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
VIXL_ASSERT(monitor() == 0);
if (IsUsingVector()) {
// Delete the invalid trailing elements.
typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
while (!IsValid(*it)) {
it++;
}
vector_->erase(it.base(), vector_->end());
}
return Back();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementIndex(
const ElementType* element) const {
VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
return element - StorageBegin();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(
size_t index) const {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(size_t index) {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetFirstValidElement(
const ElementType* from, const ElementType* end) {
while ((from < end) && !IsValid(*from)) {
from++;
}
return from;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
if (valid_cached_min_) {
return;
}
if (sorted_) {
const ElementType* min = GetFirstValidElement(StorageBegin(), StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
Sort(kHardSort);
}
VIXL_ASSERT(valid_cached_min_);
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
if (!IsUsingVector()) {
return false;
}
size_t n_invalid_elements = vector_->size() - size_;
return (n_invalid_elements > RECLAIM_FROM) &&
(n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
VIXL_ASSERT(monitor() == 0);
Clean();
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(S* inval_set)
: using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
index_(0),
inval_set_(inval_set) {
if (inval_set != NULL) {
inval_set->Sort(S::kSoftSort);
#ifdef VIXL_DEBUG
inval_set->Acquire();
#endif
if (using_vector_) {
iterator_ = typename std::vector<ElementType>::iterator(
inval_set_->vector_->begin());
}
MoveToValidElement();
}
}
template <class S>
InvalSetIterator<S>::~InvalSetIterator() {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Release();
#endif
}
template <class S>
typename S::_ElementType* InvalSetIterator<S>::Current() const {
VIXL_ASSERT(!Done());
if (using_vector_) {
return &(*iterator_);
} else {
return &(inval_set_->preallocated_[index_]);
}
}
template <class S>
void InvalSetIterator<S>::Advance() {
++(*this);
}
template <class S>
bool InvalSetIterator<S>::Done() const {
if (using_vector_) {
bool done = (iterator_ == inval_set_->vector_->end());
VIXL_ASSERT(done == (index_ == inval_set_->size()));
return done;
} else {
return index_ == inval_set_->size();
}
}
template <class S>
void InvalSetIterator<S>::Finish() {
VIXL_ASSERT(inval_set_->sorted_);
if (using_vector_) {
iterator_ = inval_set_->vector_->end();
}
index_ = inval_set_->size();
}
template <class S>
void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
if (using_vector_) {
inval_set_->EraseInternal(&(*iterator_));
MoveToValidElement();
} else {
inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
}
}
template <class S>
bool InvalSetIterator<S>::IsValid(const ElementType& element) {
return S::IsValid(element);
}
template <class S>
typename S::_KeyType InvalSetIterator<S>::GetKey(const ElementType& element) {
return S::GetKey(element);
}
template <class S>
void InvalSetIterator<S>::MoveToValidElement() {
if (using_vector_) {
while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
iterator_++;
}
} else {
VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
// Nothing to do.
}
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(const InvalSetIterator<S>& other)
: using_vector_(other.using_vector_),
index_(other.index_),
inval_set_(other.inval_set_) {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Acquire();
#endif
}
#if __cplusplus >= 201103L
template <class S>
InvalSetIterator<S>::InvalSetIterator(InvalSetIterator<S>&& other) noexcept
: using_vector_(false),
index_(0),
inval_set_(NULL) {
swap(*this, other);
}
#endif
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator=(InvalSetIterator<S> other) {
swap(*this, other);
return *this;
}
template <class S>
bool InvalSetIterator<S>::operator==(const InvalSetIterator<S>& rhs) const {
bool equal = (inval_set_ == rhs.inval_set_);
// If the inval_set_ matches, using_vector_ must also match.
VIXL_ASSERT(!equal || (using_vector_ == rhs.using_vector_));
if (using_vector_) {
equal = equal && (iterator_ == rhs.iterator_);
// In debug mode, index_ is maintained even with using_vector_.
VIXL_ASSERT(!equal || (index_ == rhs.index_));
} else {
equal = equal && (index_ == rhs.index_);
#ifdef DEBUG
// If not using_vector_, iterator_ should be default-initialised.
typename std::vector<ElementType>::iterator default_iterator;
VIXL_ASSERT(iterator_ == default_iterator);
VIXL_ASSERT(rhs.iterator_ == default_iterator);
#endif
}
return equal;
}
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator++() {
// Pre-increment.
VIXL_ASSERT(!Done());
if (using_vector_) {
iterator_++;
#ifdef VIXL_DEBUG
index_++;
#endif
MoveToValidElement();
} else {
index_++;
}
return *this;
}
template <class S>
InvalSetIterator<S> InvalSetIterator<S>::operator++(int /* unused */) {
// Post-increment.
VIXL_ASSERT(!Done());
InvalSetIterator<S> old(*this);
++(*this);
return old;
}
#undef TEMPLATE_INVALSET_P_DECL
#undef TEMPLATE_INVALSET_P_DEF
} // namespace vixl
#endif // VIXL_INVALSET_H_
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_MACRO_ASSEMBLER_INTERFACE_H
#define VIXL_MACRO_ASSEMBLER_INTERFACE_H
#include "assembler-base-vixl.h"
namespace vixl {
class MacroAssemblerInterface {
public:
virtual internal::AssemblerBase* AsAssemblerBase() = 0;
virtual ~MacroAssemblerInterface() {}
virtual bool AllowMacroInstructions() const = 0;
virtual bool ArePoolsBlocked() const = 0;
protected:
virtual void SetAllowMacroInstructions(bool allow) = 0;
virtual void BlockPools() = 0;
virtual void ReleasePools() = 0;
virtual void EnsureEmitPoolsFor(size_t size) = 0;
// Emit the branch over a literal/veneer pool, and any necessary padding
// before it.
virtual void EmitPoolHeader() = 0;
// When this is called, the label used for branching over the pool is bound.
// This can also generate additional padding, which must correspond to the
// alignment_ value passed to the PoolManager (which needs to keep track of
// the exact size of the generated pool).
virtual void EmitPoolFooter() = 0;
// Emit n bytes of padding that does not have to be executable.
virtual void EmitPaddingBytes(int n) = 0;
// Emit n bytes of padding that has to be executable. Implementations must
// make sure this is a multiple of the instruction size.
virtual void EmitNopBytes(int n) = 0;
// The following scopes need access to the above method in order to implement
// pool blocking and temporarily disable the macro-assembler.
friend class ExactAssemblyScope;
friend class EmissionCheckScope;
template <typename T>
friend class PoolManager;
};
} // namespace vixl
#endif // VIXL_MACRO_ASSEMBLER_INTERFACE_H
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PLATFORM_H
#define PLATFORM_H
// Define platform specific functionalities.
extern "C" {
#include <signal.h>
}
namespace vixl {
inline void HostBreakpoint() { raise(SIGINT); }
} // namespace vixl
#endif
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_IMPL_H_
#define VIXL_POOL_MANAGER_IMPL_H_
#include "pool-manager.h"
#include <algorithm>
#include "assembler-base-vixl.h"
namespace vixl {
template <typename T>
T PoolManager<T>::Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
EmitOption option) {
// Make sure that the buffer still has the alignment we think it does.
VIXL_ASSERT(IsAligned(masm->AsAssemblerBase()
->GetBuffer()
->GetStartAddress<uintptr_t>(),
buffer_alignment_));
// We should not call this method when the pools are blocked.
VIXL_ASSERT(!IsBlocked());
if (objects_.empty()) return pc;
// Emit header.
if (option == kBranchRequired) {
masm->EmitPoolHeader();
// TODO: The pc at this point might not actually be aligned according to
// alignment_. This is to support the current AARCH32 MacroAssembler which
// does not have a fixed size instruction set. In practice, the pc will be
// aligned to the alignment instructions need for the current instruction
// set, so we do not need to align it here. All other calculations do take
// the alignment into account, which only makes the checkpoint calculations
// more conservative when we use T32. Uncomment the following assertion if
// the AARCH32 MacroAssembler is modified to only support one ISA at the
// time.
// VIXL_ASSERT(pc == AlignUp(pc, alignment_));
pc += header_size_;
} else {
// If the header is optional, we might need to add some extra padding to
// meet the minimum location of the first object.
if (pc < objects_[0].min_location_) {
int32_t padding = objects_[0].min_location_ - pc;
masm->EmitNopBytes(padding);
pc += padding;
}
}
PoolObject<T>* existing_object = GetObjectIfTracked(new_object);
// Go through all objects and emit one by one.
for (objects_iter iter = objects_.begin(); iter != objects_.end();) {
PoolObject<T>& current = *iter;
if (ShouldSkipObject(&current,
pc,
num_bytes,
new_reference,
new_object,
existing_object)) {
++iter;
continue;
}
LocationBase<T>* label_base = current.label_base_;
T aligned_pc = AlignUp(pc, current.alignment_);
masm->EmitPaddingBytes(aligned_pc - pc);
pc = aligned_pc;
VIXL_ASSERT(pc >= current.min_location_);
VIXL_ASSERT(pc <= current.max_location_);
// First call SetLocation, which will also resolve the references, and then
// call EmitPoolObject, which might add a new reference.
label_base->SetLocation(masm->AsAssemblerBase(), pc);
label_base->EmitPoolObject(masm);
int object_size = label_base->GetPoolObjectSizeInBytes();
if (label_base->ShouldDeletePoolObjectOnPlacement()) {
label_base->MarkBound();
iter = RemoveAndDelete(iter);
} else {
VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement());
current.label_base_->UpdatePoolObject(&current);
VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment());
++iter;
}
pc += object_size;
}
// Recalculate the checkpoint before emitting the footer. The footer might
// call Bind() which will check if we need to emit.
RecalculateCheckpoint();
// Always emit footer - this might add some padding.
masm->EmitPoolFooter();
pc = AlignUp(pc, alignment_);
return pc;
}
template <typename T>
bool PoolManager<T>::ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const {
// We assume that all objects before this have been skipped and all objects
// after this will be emitted, therefore we will emit the whole pool. Add
// the header size and alignment, as well as the number of bytes we are
// planning to emit.
T max_actual_location = pc + num_bytes + max_pool_size_;
if (new_reference != NULL) {
// If we're adding a new object, also assume that it will have to be emitted
// before the object we are considering to skip.
VIXL_ASSERT(new_object != NULL);
T new_object_alignment = std::max(new_reference->object_alignment_,
new_object->GetPoolObjectAlignment());
if ((existing_object != NULL) &&
(existing_object->alignment_ > new_object_alignment)) {
new_object_alignment = existing_object->alignment_;
}
max_actual_location +=
(new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1);
}
// Hard limit.
if (max_actual_location >= pool_object->max_location_) return false;
// Use heuristic.
return (pc < pool_object->skip_until_location_hint_);
}
template <typename T>
T PoolManager<T>::UpdateCheckpointForObject(T checkpoint,
const PoolObject<T>* object) {
checkpoint -= object->label_base_->GetPoolObjectSizeInBytes();
if (checkpoint > object->max_location_) checkpoint = object->max_location_;
checkpoint = AlignDown(checkpoint, object->alignment_);
return checkpoint;
}
template <typename T>
static T MaxCheckpoint() {
return std::numeric_limits<T>::max();
}
template <typename T>
static inline bool CheckCurrentPC(T pc, T checkpoint) {
VIXL_ASSERT(pc <= checkpoint);
// We must emit the pools if we are at the checkpoint now.
return pc == checkpoint;
}
template <typename T>
static inline bool CheckFuturePC(T pc, T checkpoint) {
// We do not need to emit the pools now if the projected future PC will be
// equal to the checkpoint (we will need to emit the pools then).
return pc > checkpoint;
}
template <typename T>
bool PoolManager<T>::MustEmit(T pc,
int num_bytes,
ForwardReference<T>* reference,
LocationBase<T>* label_base) const {
// Check if we are at or past the checkpoint.
if (CheckCurrentPC(pc, checkpoint_)) return true;
// Check if the future PC will be past the checkpoint.
pc += num_bytes;
if (CheckFuturePC(pc, checkpoint_)) return true;
// No new reference - nothing to do.
if (reference == NULL) {
VIXL_ASSERT(label_base == NULL);
return false;
}
if (objects_.empty()) {
// Basic assertions that restrictions on the new (and only) reference are
// possible to satisfy.
VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >=
reference->min_object_location_);
VIXL_ASSERT(pc <= reference->max_object_location_);
return false;
}
// Check if the object is already being tracked.
const PoolObject<T>* existing_object = GetObjectIfTracked(label_base);
if (existing_object != NULL) {
// If the existing_object is already in existing_objects_ and its new
// alignment and new location restrictions are not stricter, skip the more
// expensive check.
if ((reference->min_object_location_ <= existing_object->min_location_) &&
(reference->max_object_location_ >= existing_object->max_location_) &&
(reference->object_alignment_ <= existing_object->alignment_)) {
return false;
}
}
// Create a temporary object.
PoolObject<T> temp(label_base);
temp.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
temp.RestrictAlignment(reference->object_alignment_);
if (existing_object != NULL) {
temp.RestrictRange(existing_object->min_location_,
existing_object->max_location_);
temp.RestrictAlignment(existing_object->alignment_);
}
// Check if the new reference can be added after the end of the current pool.
// If yes, we don't need to emit.
T last_reachable = AlignDown(temp.max_location_, temp.alignment_);
const PoolObject<T>& last = objects_.back();
T after_pool = AlignDown(last.max_location_, last.alignment_) +
last.label_base_->GetPoolObjectSizeInBytes();
// The current object can be placed at the end of the pool, even if the last
// object is placed at the last possible location.
if (last_reachable >= after_pool) return false;
// The current object can be placed after the code we are about to emit and
// after the existing pool (with a pessimistic size estimate).
if (last_reachable >= pc + num_bytes + max_pool_size_) return false;
// We're not in a trivial case, so we need to recalculate the checkpoint.
// Check (conservatively) if we can fit it into the objects_ array, without
// breaking our assumptions. Here we want to recalculate the checkpoint as
// if the new reference was added to the PoolManager but without actually
// adding it (as removing it is non-trivial).
T checkpoint = MaxCheckpoint<T>();
// Will temp be the last object in objects_?
if (PoolObjectLessThan(last, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
bool tempNotPlacedYet = true;
for (int i = static_cast<int>(objects_.size()) - 1; i >= 0; --i) {
const PoolObject<T>& current = objects_[i];
if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
tempNotPlacedYet = false;
}
if (current.label_base_ == label_base) continue;
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
if (checkpoint < current.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
}
// temp is the object with the smallest max_location_.
if (tempNotPlacedYet) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
return CheckFuturePC(pc, checkpoint);
}
template <typename T>
void PoolManager<T>::RecalculateCheckpoint(SortOption sort_option) {
// TODO: Improve the max_pool_size_ estimate by starting from the
// min_location_ of the first object, calculating the end of the pool as if
// all objects were placed starting from there, and in the end adding the
// maximum object alignment found minus one (which is the maximum extra
// padding we would need if we were to relocate the pool to a different
// address).
max_pool_size_ = 0;
if (objects_.empty()) {
checkpoint_ = MaxCheckpoint<T>();
return;
}
// Sort objects by their max_location_.
if (sort_option == kSortRequired) {
std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan);
}
// Add the header size and header and footer max alignment to the maximum
// pool size.
max_pool_size_ += header_size_ + 2 * (alignment_ - 1);
T checkpoint = MaxCheckpoint<T>();
int last_object_index = static_cast<int>(objects_.size()) - 1;
for (int i = last_object_index; i >= 0; --i) {
// Bring back the checkpoint by the size of the current object, unless
// we need to bring it back more, then align.
PoolObject<T>& current = objects_[i];
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
VIXL_ASSERT(checkpoint >= current.min_location_);
max_pool_size_ += (current.alignment_ - 1 +
current.label_base_->GetPoolObjectSizeInBytes());
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
// Update the checkpoint of the pool manager.
checkpoint_ = checkpoint;
// NOTE: To handle min_location_ in the generic case, we could make a second
// pass of the objects_ vector, increasing the checkpoint as needed, while
// maintaining the alignment requirements.
// It should not be possible to have any issues with min_location_ with actual
// code, since there should always be some kind of branch over the pool,
// whether introduced by the pool emission or by the user, which will make
// sure the min_location_ requirement is satisfied. It's possible that the
// user could emit code in the literal pool and intentionally load the first
// value and then fall-through into the pool, but that is not a supported use
// of VIXL and we will assert in that case.
}
template <typename T>
bool PoolManager<T>::PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b) {
if (a.max_location_ != b.max_location_)
return (a.max_location_ < b.max_location_);
int a_size = a.label_base_->GetPoolObjectSizeInBytes();
int b_size = b.label_base_->GetPoolObjectSizeInBytes();
if (a_size != b_size) return (a_size < b_size);
if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_);
if (a.min_location_ != b.min_location_)
return (a.min_location_ < b.min_location_);
return false;
}
template <typename T>
void PoolManager<T>::AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* label_base) {
VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_);
VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_);
PoolObject<T>* object = GetObjectIfTracked(label_base);
if (object == NULL) {
PoolObject<T> new_object(label_base);
new_object.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
new_object.RestrictAlignment(reference->object_alignment_);
Insert(new_object);
} else {
object->RestrictRange(reference->min_object_location_,
reference->max_object_location_);
object->RestrictAlignment(reference->object_alignment_);
// Move the object, if needed.
if (objects_.size() != 1) {
PoolObject<T> new_object(*object);
ptrdiff_t distance = std::distance(objects_.data(), object);
objects_.erase(objects_.begin() + distance);
Insert(new_object);
}
}
// No need to sort, we inserted the object in an already sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
template <typename T>
void PoolManager<T>::Insert(const PoolObject<T>& new_object) {
bool inserted = false;
// Place the object in the right position.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (!PoolObjectLessThan(current, new_object)) {
objects_.insert(iter, new_object);
inserted = true;
break;
}
}
if (!inserted) {
objects_.push_back(new_object);
}
}
template <typename T>
void PoolManager<T>::RemoveAndDelete(PoolObject<T>* object) {
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (current.label_base_ == object->label_base_) {
(void)RemoveAndDelete(iter);
return;
}
}
VIXL_UNREACHABLE();
}
template <typename T>
typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete(
objects_iter iter) {
PoolObject<T>& object = *iter;
LocationBase<T>* label_base = object.label_base_;
// Check if we also need to delete the LocationBase object.
if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) {
delete_on_destruction_.push_back(label_base);
}
if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) {
VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction());
delete label_base;
}
return objects_.erase(iter);
}
template <typename T>
T PoolManager<T>::Bind(MacroAssemblerInterface* masm,
LocationBase<T>* object,
T location) {
PoolObject<T>* existing_object = GetObjectIfTracked(object);
int alignment;
T min_location;
if (existing_object == NULL) {
alignment = object->GetMaxAlignment();
min_location = object->GetMinLocation();
} else {
alignment = existing_object->alignment_;
min_location = existing_object->min_location_;
}
// Align if needed, and add necessary padding to reach the min_location_.
T aligned_location = AlignUp(location, alignment);
masm->EmitNopBytes(aligned_location - location);
location = aligned_location;
while (location < min_location) {
masm->EmitNopBytes(alignment);
location += alignment;
}
object->SetLocation(masm->AsAssemblerBase(), location);
object->MarkBound();
if (existing_object != NULL) {
RemoveAndDelete(existing_object);
// No need to sort, we removed the object from a sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
// We assume that the maximum padding we can possibly add here is less
// than the header alignment - hence that we're not going to go past our
// checkpoint.
VIXL_ASSERT(!CheckFuturePC(location, checkpoint_));
return location;
}
template <typename T>
void PoolManager<T>::Release(T pc) {
USE(pc);
if (--monitor_ == 0) {
// Ensure the pool has not been blocked for too long.
VIXL_ASSERT(pc <= checkpoint_);
}
}
template <typename T>
PoolManager<T>::~PoolManager<T>() {
#ifdef VIXL_DEBUG
// Check for unbound objects.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
// There should not be any bound objects left in the pool. For unbound
// objects, we will check in the destructor of the object itself.
VIXL_ASSERT(!(*iter).label_base_->IsBound());
}
#endif
// Delete objects the pool manager owns.
for (typename std::vector<LocationBase<T> *>::iterator
iter = delete_on_destruction_.begin(),
end = delete_on_destruction_.end();
iter != end;
++iter) {
delete *iter;
}
}
template <typename T>
int PoolManager<T>::GetPoolSizeForTest() const {
// Iterate over objects and return their cumulative size. This does not take
// any padding into account, just the size of the objects themselves.
int size = 0;
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
size += (*iter).label_base_->GetPoolObjectSizeInBytes();
}
return size;
}
}
#endif // VIXL_POOL_MANAGER_IMPL_H_
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_H_
#define VIXL_POOL_MANAGER_H_
#include <stdint.h>
#include <cstddef>
#include <limits>
#include <map>
#include <vector>
#include "globals-vixl.h"
#include "macro-assembler-interface.h"
#include "utils-vixl.h"
namespace vixl {
class TestPoolManager;
// There are four classes declared in this header file:
// PoolManager, PoolObject, ForwardReference and LocationBase.
// The PoolManager manages both literal and veneer pools, and is designed to be
// shared between AArch32 and AArch64. A pool is represented as an abstract
// collection of references to objects. The manager does not need to know
// architecture-specific details about literals and veneers; the actual
// emission of the pool objects is delegated.
//
// Literal and Label will derive from LocationBase. The MacroAssembler will
// create these objects as instructions that reference pool objects are
// encountered, and ask the PoolManager to track them. The PoolManager will
// create an internal PoolObject object for each object derived from
// LocationBase. Some of these PoolObject objects will be deleted when placed
// (e.g. the ones corresponding to Literals), whereas others will be updated
// with a new range when placed (e.g. Veneers) and deleted when Bind() is
// called on the PoolManager with their corresponding object as a parameter.
//
// A ForwardReference represents a reference to a PoolObject that will be
// placed later in the instruction stream. Each ForwardReference may only refer
// to one PoolObject, but many ForwardReferences may refer to the same
// object.
//
// A PoolObject represents an object that has not yet been placed. The final
// location of a PoolObject (and hence the LocationBase object to which it
// corresponds) is constrained mostly by the instructions that refer to it, but
// PoolObjects can also have inherent constraints, such as alignment.
//
// LocationBase objects, unlike PoolObject objects, can be used outside of the
// pool manager (e.g. as manually placed literals, which may still have
// forward references that need to be resolved).
//
// At the moment, each LocationBase will have at most one PoolObject that keeps
// the relevant information for placing this object in the pool. When that
// object is placed, all forward references of the object are resolved. For
// that reason, we do not need to keep track of the ForwardReference objects in
// the PoolObject.
// T is an integral type used for representing locations. For a 32-bit
// architecture it will typically be int32_t, whereas for a 64-bit
// architecture it will be int64_t.
template <typename T>
class ForwardReference;
template <typename T>
class PoolObject;
template <typename T>
class PoolManager;
// Represents an object that has a size and alignment, and either has a known
// location or has not been placed yet. An object of a subclass of LocationBase
// will typically keep track of a number of ForwardReferences when it has not
// yet been placed, but LocationBase does not assume or implement that
// functionality. LocationBase provides virtual methods for emitting the
// object, updating all the forward references, and giving the PoolManager
// information on the lifetime of this object and the corresponding PoolObject.
template <typename T>
class LocationBase {
public:
// The size of a LocationBase object is restricted to 4KB, in order to avoid
// situations where the size of the pool becomes larger than the range of
// an unconditional branch. This cannot happen without having large objects,
// as typically the range of an unconditional branch is the larger range
// an instruction supports.
// TODO: This would ideally be an architecture-specific value, perhaps
// another template parameter.
static const int kMaxObjectSize = 4 * KBytes;
// By default, LocationBase objects are aligned naturally to their size.
LocationBase(uint32_t type, int size)
: pool_object_size_(size),
pool_object_alignment_(size),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(size));
}
// Allow alignment to be specified, as long as it is smaller than the size.
LocationBase(uint32_t type, int size, int alignment)
: pool_object_size_(size),
pool_object_alignment_(alignment),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(alignment <= size);
}
// Constructor for locations that are already bound.
explicit LocationBase(T location)
: pool_object_size_(-1),
pool_object_alignment_(-1),
pool_object_type_(0),
is_bound_(true),
location_(location) {}
virtual ~LocationBase() {}
// The PoolManager should assume ownership of some objects, and delete them
// after they have been placed. This can happen for example for literals that
// are created internally to the MacroAssembler and the user doesn't get a
// handle to. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const { return false; }
// The PoolManager should assume ownership of some objects, and delete them
// when it is destroyed. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const { return false; }
// Emit the PoolObject. Derived classes will implement this method to emit
// the necessary data and/or code (for example, to emit a literal or a
// veneer). This should not add padding, as it is added explicitly by the pool
// manager.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) = 0;
// Resolve the references to this object. Will encode the necessary offset
// in the instruction corresponding to each reference and then delete it.
// TODO: An alternative here would be to provide a ResolveReference()
// method that only asks the LocationBase to resolve a specific reference
// (thus allowing the pool manager to resolve some of the references only).
// This would mean we need to have some kind of API to get all the references
// to a LabelObject.
virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0;
// Returns true when the PoolObject corresponding to this LocationBase object
// needs to be removed from the pool once placed, and false if it needs to
// be updated instead (in which case UpdatePoolObject will be called).
virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; }
// Update the PoolObject after placing it, if necessary. This will happen for
// example in the case of a placed veneer, where we need to use a new updated
// range and a new reference (from the newly added branch instruction).
// By default, this does nothing, to avoid forcing objects that will not need
// this to have an empty implementation.
virtual void UpdatePoolObject(PoolObject<T>*) {}
// Implement heuristics for emitting this object. If a margin is to be used
// as a hint during pool emission, we will try not to emit the object if we
// are further away from the maximum reachable location by more than the
// margin.
virtual bool UsePoolObjectEmissionMargin() const { return false; }
virtual T GetPoolObjectEmissionMargin() const {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == false);
return 0;
}
int GetPoolObjectSizeInBytes() const { return pool_object_size_; }
int GetPoolObjectAlignment() const { return pool_object_alignment_; }
uint32_t GetPoolObjectType() const { return pool_object_type_; }
bool IsBound() const { return is_bound_; }
T GetLocation() const { return location_; }
// This function can be called multiple times before the object is marked as
// bound with MarkBound() below. This is because some objects (e.g. the ones
// used to represent labels) can have veneers; every time we place a veneer
// we need to keep track of the location in order to resolve the references
// to the object. Reusing the location_ field for this is convenient.
void SetLocation(internal::AssemblerBase* assembler, T location) {
VIXL_ASSERT(!is_bound_);
location_ = location;
ResolveReferences(assembler);
}
void MarkBound() {
VIXL_ASSERT(!is_bound_);
is_bound_ = true;
}
// The following two functions are used when an object is bound by a call to
// PoolManager<T>::Bind().
virtual int GetMaxAlignment() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 1;
}
virtual T GetMinLocation() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 0;
}
private:
// The size of the corresponding PoolObject, in bytes.
int pool_object_size_;
// The alignment of the corresponding PoolObject; this must be a power of two.
int pool_object_alignment_;
// Different derived classes should have different type values. This can be
// used internally by the PoolManager for grouping of objects.
uint32_t pool_object_type_;
// Has the object been bound to a location yet?
bool is_bound_;
protected:
// See comment on SetLocation() for the use of this field.
T location_;
};
template <typename T>
class PoolObject {
public:
// By default, PoolObjects have no inherent position constraints.
explicit PoolObject(LocationBase<T>* parent)
: label_base_(parent),
min_location_(0),
max_location_(std::numeric_limits<T>::max()),
alignment_(parent->GetPoolObjectAlignment()),
skip_until_location_hint_(0),
type_(parent->GetPoolObjectType()) {
VIXL_ASSERT(IsPowerOf2(alignment_));
UpdateLocationHint();
}
// Reset the minimum and maximum location and the alignment of the object.
// This function is public in order to allow the LocationBase corresponding to
// this PoolObject to update the PoolObject when placed, e.g. in the case of
// veneers. The size and type of the object cannot be modified.
void Update(T min, T max, int alignment) {
// We don't use RestrictRange here as the new range is independent of the
// old range (and the maximum location is typically larger).
min_location_ = min;
max_location_ = max;
RestrictAlignment(alignment);
UpdateLocationHint();
}
private:
void RestrictRange(T min, T max) {
VIXL_ASSERT(min <= max_location_);
VIXL_ASSERT(max >= min_location_);
min_location_ = std::max(min_location_, min);
max_location_ = std::min(max_location_, max);
UpdateLocationHint();
}
void RestrictAlignment(int alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(IsPowerOf2(alignment_));
alignment_ = std::max(alignment_, alignment);
}
void UpdateLocationHint() {
if (label_base_->UsePoolObjectEmissionMargin()) {
skip_until_location_hint_ =
max_location_ - label_base_->GetPoolObjectEmissionMargin();
}
}
// The LocationBase that this pool object represents.
LocationBase<T>* label_base_;
// Hard, precise location constraints for the start location of the object.
// They are both inclusive, that is the start location of the object can be
// at any location between min_location_ and max_location_, themselves
// included.
T min_location_;
T max_location_;
// The alignment must be a power of two.
int alignment_;
// Avoid generating this object until skip_until_location_hint_. This
// supports cases where placing the object in the pool has an inherent cost
// that could be avoided in some other way. Veneers are a typical example; we
// would prefer to branch directly (over a pool) rather than use veneers, so
// this value can be set using some heuristic to leave them in the pool.
// This value is only a hint, which will be ignored if it has to in order to
// meet the hard constraints we have.
T skip_until_location_hint_;
// Used only to group objects of similar type together. The PoolManager does
// not know what the types represent.
uint32_t type_;
friend class PoolManager<T>;
};
// Class that represents a forward reference. It is the responsibility of
// LocationBase objects to keep track of forward references and patch them when
// an object is placed - this class is only used by the PoolManager in order to
// restrict the requirements on PoolObjects it is tracking.
template <typename T>
class ForwardReference {
public:
ForwardReference(T location,
int size,
T min_object_location,
T max_object_location,
int object_alignment = 1)
: location_(location),
size_(size),
object_alignment_(object_alignment),
min_object_location_(min_object_location),
max_object_location_(max_object_location) {
VIXL_ASSERT(AlignDown(max_object_location, object_alignment) >=
min_object_location);
}
bool LocationIsEncodable(T location) const {
return location >= min_object_location_ &&
location <= max_object_location_ &&
IsAligned(location, object_alignment_);
}
T GetLocation() const { return location_; }
T GetMinLocation() const { return min_object_location_; }
T GetMaxLocation() const { return max_object_location_; }
int GetAlignment() const { return object_alignment_; }
// Needed for InvalSet.
void SetLocationToInvalidateOnly(T location) { location_ = location; }
private:
// The location of the thing that contains the reference. For example, this
// can be the location of the branch or load instruction.
T location_;
// The size of the instruction that makes the reference, in bytes.
int size_;
// The alignment that the object must satisfy for this reference - must be a
// power of two.
int object_alignment_;
// Specify the possible locations where the object could be stored. AArch32's
// PC offset, and T32's PC alignment calculations should be applied by the
// Assembler, not here. The PoolManager deals only with simple locationes.
// Including min_object_adddress_ is necessary to handle AArch32 some
// instructions which have a minimum offset of 0, but also have the implicit
// PC offset.
// Note that this structure cannot handle sparse ranges, such as A32's ADR,
// but doing so is costly and probably not useful in practice. The min and
// and max object location both refer to the beginning of the object, are
// inclusive and are not affected by the object size. E.g. if
// max_object_location_ is equal to X, we can place the object at location X
// regardless of its size.
T min_object_location_;
T max_object_location_;
friend class PoolManager<T>;
};
template <typename T>
class PoolManager {
public:
PoolManager(int header_size, int alignment, int buffer_alignment)
: header_size_(header_size),
alignment_(alignment),
buffer_alignment_(buffer_alignment),
checkpoint_(std::numeric_limits<T>::max()),
max_pool_size_(0),
monitor_(0) {}
~PoolManager();
// Check if we will need to emit the pool at location 'pc', when planning to
// generate a certain number of bytes. This optionally takes a
// ForwardReference we are about to generate, in which case the size of the
// reference must be included in 'num_bytes'.
bool MustEmit(T pc,
int num_bytes = 0,
ForwardReference<T>* reference = NULL,
LocationBase<T>* object = NULL) const;
enum EmitOption { kBranchRequired, kNoBranchRequired };
// Emit the pool at location 'pc', using 'masm' as the macroassembler.
// The branch over the header can be optionally omitted using 'option'.
// Returns the new PC after pool emission.
// This expects a number of bytes that are about to be emitted, to be taken
// into account in heuristics for pool object emission.
// This also optionally takes a forward reference and an object as
// parameters, to be used in the case where emission of the pool is triggered
// by adding a new reference to the pool that does not fit. The pool manager
// will need this information in order to apply its heuristics correctly.
T Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes = 0,
ForwardReference<T>* new_reference = NULL,
LocationBase<T>* new_object = NULL,
EmitOption option = kBranchRequired);
// Add 'reference' to 'object'. Should not be preceded by a call to MustEmit()
// that returned true, unless Emit() has been successfully afterwards.
void AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* object);
// This is to notify the pool that a LocationBase has been bound to a location
// and does not need to be tracked anymore.
// This will happen, for example, for Labels, which are manually bound by the
// user.
// This can potentially add some padding bytes in order to meet the object
// requirements, and will return the new location.
T Bind(MacroAssemblerInterface* masm, LocationBase<T>* object, T location);
// Functions for blocking and releasing the pools.
void Block() { monitor_++; }
void Release(T pc);
bool IsBlocked() const { return monitor_ != 0; }
private:
typedef typename std::vector<PoolObject<T> >::iterator objects_iter;
typedef
typename std::vector<PoolObject<T> >::const_iterator const_objects_iter;
PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) {
return const_cast<PoolObject<T>*>(
static_cast<const PoolManager<T>*>(this)->GetObjectIfTracked(label));
}
const PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) const {
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
const PoolObject<T>& current = *iter;
if (current.label_base_ == label) return &current;
}
return NULL;
}
// Helper function for calculating the checkpoint.
enum SortOption { kSortRequired, kNoSortRequired };
void RecalculateCheckpoint(SortOption sort_option = kSortRequired);
// Comparison function for using std::sort() on objects_. PoolObject A is
// ordered before PoolObject B when A should be emitted before B. The
// comparison depends on the max_location_, size_, alignment_ and
// min_location_.
static bool PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b);
// Helper function used in the checkpoint calculation. 'checkpoint' is the
// current checkpoint, which is modified to take 'object' into account. The
// new checkpoint is returned.
static T UpdateCheckpointForObject(T checkpoint, const PoolObject<T>* object);
// Helper function to add a new object into a sorted objects_ array.
void Insert(const PoolObject<T>& new_object);
// Helper functions to remove an object from objects_ and delete the
// corresponding LocationBase object, if necessary. This will be called
// either after placing the object, or when Bind() is called.
void RemoveAndDelete(PoolObject<T>* object);
objects_iter RemoveAndDelete(objects_iter iter);
// Helper function to check if we should skip emitting an object.
bool ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const;
// Used only for debugging.
void DumpCurrentState(T pc) const;
// Methods used for testing only, via the test friend classes.
bool PoolIsEmptyForTest() const { return objects_.empty(); }
T GetCheckpointForTest() const { return checkpoint_; }
int GetPoolSizeForTest() const;
// The objects we are tracking references to. The objects_ vector is sorted
// at all times between calls to the public members of the PoolManager. It
// is sorted every time we add, delete or update a PoolObject.
// TODO: Consider a more efficient data structure here, to allow us to delete
// elements as we emit them.
std::vector<PoolObject<T> > objects_;
// Objects to be deleted on pool destruction.
std::vector<LocationBase<T>*> delete_on_destruction_;
// The header_size_ and alignment_ values are hardcoded for each instance of
// PoolManager. The PoolManager does not know how to emit the header, and
// relies on the EmitPoolHeader and EndPool methods of the
// MacroAssemblerInterface for that. It will also emit padding if necessary,
// both for the header and at the end of the pool, according to alignment_,
// and using the EmitNopBytes and EmitPaddingBytes method of the
// MacroAssemblerInterface.
// The size of the header, in bytes.
int header_size_;
// The alignment of the header - must be a power of two.
int alignment_;
// The alignment of the buffer - we cannot guarantee any object alignment
// larger than this alignment. When a buffer is grown, this alignment has
// to be guaranteed.
// TODO: Consider extending this to describe the guaranteed alignment as the
// modulo of a known number.
int buffer_alignment_;
// The current checkpoint. This is the latest location at which the pool
// *must* be emitted. This should not be visible outside the pool manager
// and should only be updated in RecalculateCheckpoint.
T checkpoint_;
// Maximum size of the pool, assuming we need the maximum possible padding
// for each object and for the header. It is only updated in
// RecalculateCheckpoint.
T max_pool_size_;
// Indicates whether the emission of this pool is blocked.
int monitor_;
friend class vixl::TestPoolManager;
};
} // namespace vixl
#endif // VIXL_POOL_MANAGER_H_
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdio>
#include "utils-vixl.h"
namespace vixl {
// The default NaN values (for FPCR.DN=1).
const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00);
// Floating-point zero values.
const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0);
const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000);
// Floating-point infinity values.
const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00);
const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00);
const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
const double kFP64PositiveInfinity =
RawbitsToDouble(UINT64_C(0x7ff0000000000000));
const double kFP64NegativeInfinity =
RawbitsToDouble(UINT64_C(0xfff0000000000000));
bool IsZero(Float16 value) {
uint16_t bits = Float16ToRawbits(value);
return (bits == Float16ToRawbits(kFP16PositiveZero) ||
bits == Float16ToRawbits(kFP16NegativeZero));
}
uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; }
uint32_t FloatToRawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
uint64_t DoubleToRawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
Float16 RawbitsToFloat16(uint16_t bits) {
Float16 f;
f.rawbits_ = bits;
return f;
}
float RawbitsToFloat(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
double RawbitsToDouble(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
uint32_t Float16Sign(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(15, 15, rawbits);
}
uint32_t Float16Exp(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(14, 10, rawbits);
}
uint32_t Float16Mantissa(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(9, 0, rawbits);
}
uint32_t FloatSign(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(31, 31, rawbits);
}
uint32_t FloatExp(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(30, 23, rawbits);
}
uint32_t FloatMantissa(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(22, 0, rawbits);
}
uint32_t DoubleSign(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return static_cast<uint32_t>(ExtractUnsignedBitfield64(63, 63, rawbits));
}
uint32_t DoubleExp(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return static_cast<uint32_t>(ExtractUnsignedBitfield64(62, 52, rawbits));
}
uint64_t DoubleMantissa(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return ExtractUnsignedBitfield64(51, 0, rawbits);
}
internal::SimFloat16 Float16Pack(uint16_t sign,
uint16_t exp,
uint16_t mantissa) {
uint16_t bits = (sign << 15) | (exp << 10) | mantissa;
return RawbitsToFloat16(bits);
}
float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
return RawbitsToFloat(bits);
}
double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
return RawbitsToDouble(bits);
}
int Float16Classify(Float16 value) {
uint16_t bits = Float16ToRawbits(value);
uint16_t exponent_max = (1 << 5) - 1;
uint16_t exponent_mask = exponent_max << 10;
uint16_t mantissa_mask = (1 << 10) - 1;
uint16_t exponent = (bits & exponent_mask) >> 10;
uint16_t mantissa = bits & mantissa_mask;
if (exponent == 0) {
if (mantissa == 0) {
return FP_ZERO;
}
return FP_SUBNORMAL;
} else if (exponent == exponent_max) {
if (mantissa == 0) {
return FP_INFINITE;
}
return FP_NAN;
}
return FP_NORMAL;
}
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
VIXL_ASSERT((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
count++;
}
imm >>= 16;
}
return count;
}
int BitCount(uint64_t value) { return CountSetBits(value); }
// Float16 definitions.
Float16::Float16(double dvalue) {
rawbits_ =
Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN));
}
namespace internal {
SimFloat16 SimFloat16::operator-() const {
return RawbitsToFloat16(rawbits_ ^ 0x8000);
}
// SimFloat16 definitions.
SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const {
return static_cast<double>(*this) + static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const {
return static_cast<double>(*this) - static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const {
return static_cast<double>(*this) * static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const {
return static_cast<double>(*this) / static_cast<double>(rhs);
}
bool SimFloat16::operator<(SimFloat16 rhs) const {
return static_cast<double>(*this) < static_cast<double>(rhs);
}
bool SimFloat16::operator>(SimFloat16 rhs) const {
return static_cast<double>(*this) > static_cast<double>(rhs);
}
bool SimFloat16::operator==(SimFloat16 rhs) const {
if (IsNaN(*this) || IsNaN(rhs)) {
return false;
} else if (IsZero(rhs) && IsZero(*this)) {
// +0 and -0 should be treated as equal.
return true;
}
return this->rawbits_ == rhs.rawbits_;
}
bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); }
bool SimFloat16::operator==(double rhs) const {
return static_cast<double>(*this) == static_cast<double>(rhs);
}
SimFloat16::operator double() const {
return FPToDouble(*this, kIgnoreDefaultNaN);
}
Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); }
} // namespace internal
float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) {
uint16_t bits = Float16ToRawbits(value);
uint32_t sign = bits >> 15;
uint32_t exponent =
ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
kFloat16MantissaBits,
bits);
uint32_t mantissa =
ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits);
switch (Float16Classify(value)) {
case FP_ZERO:
return (sign == 0) ? 0.0f : -0.0f;
case FP_INFINITE:
return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
case FP_SUBNORMAL: {
// Calculate shift required to put mantissa into the most-significant bits
// of the destination mantissa.
int shift = CountLeadingZeros(mantissa << (32 - 10));
// Shift mantissa and discard implicit '1'.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
mantissa &= (1 << kFloatMantissaBits) - 1;
// Adjust the exponent for the shift applied, and rebias.
exponent = exponent - shift + (-15 + 127);
break;
}
case FP_NAN:
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred entirely, except that the top
// bit is forced to '1', making the result a quiet NaN. The unused
// (low-order) payload bits are set to 0.
exponent = (1 << kFloatExponentBits) - 1;
// Increase bits in mantissa, making low-order bits 0.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
mantissa |= 1 << 22; // Force a quiet NaN.
break;
case FP_NORMAL:
// Increase bits in mantissa, making low-order bits 0.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
// Change exponent bias.
exponent += (-15 + 127);
break;
default:
VIXL_UNREACHABLE();
}
return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) |
mantissa);
}
float FPToFloat(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
USE(round_mode);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint64_t raw = DoubleToRawbits(value);
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
uint32_t payload =
static_cast<uint32_t>(ExtractUnsignedBitfield64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return RawbitsToFloat((sign << 31) | (exponent << 23) | payload);
}
case FP_ZERO:
case FP_INFINITE: {
// In a C++ cast, any value representable in the target type will be
// unchanged. This is always the case for +/-0.0 and infinities.
return static_cast<float>(value);
}
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert double-to-float as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
uint64_t raw = DoubleToRawbits(value);
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent =
static_cast<int32_t>(ExtractUnsignedBitfield64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
mantissa |= (UINT64_C(1) << 52);
}
return FPRoundToFloat(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return value;
}
// TODO: We should consider implementing a full FPToDouble(Float16)
// conversion function (for performance reasons).
double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) {
// We can rely on implicit float to double conversion here.
return FPToFloat(value, DN, exception);
}
double FPToDouble(float value, UseDefaultNaN DN, bool* exception) {
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP64DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred entirely, except that the top
// bit is forced to '1', making the result a quiet NaN. The unused
// (low-order) payload bits are set to 0.
uint32_t raw = FloatToRawbits(value);
uint64_t sign = raw >> 31;
uint64_t exponent = (1 << 11) - 1;
uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw);
payload <<= (52 - 23); // The unused low-order bits should be 0.
payload |= (UINT64_C(1) << 51); // Force a quiet NaN.
return RawbitsToDouble((sign << 63) | (exponent << 52) | payload);
}
case FP_ZERO:
case FP_NORMAL:
case FP_SUBNORMAL:
case FP_INFINITE: {
// All other inputs are preserved in a standard cast, because every value
// representable using an IEEE-754 float is also representable using an
// IEEE-754 double.
return static_cast<double>(value);
}
}
VIXL_UNREACHABLE();
return static_cast<double>(value);
}
Float16 FPToFloat16(float value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT(round_mode == FPTieEven);
USE(round_mode);
uint32_t raw = FloatToRawbits(value);
int32_t sign = raw >> 31;
int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127;
uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
: Float16ToRawbits(kFP16NegativeInfinity);
result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
result |= (1 << 9); // Force a quiet NaN;
return RawbitsToFloat16(result);
}
case FP_ZERO:
return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
case FP_INFINITE:
return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert float-to-half as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
// Add the implicit '1' bit to the mantissa.
mantissa += (1 << 23);
return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return kFP16PositiveZero;
}
Float16 FPToFloat16(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT(round_mode == FPTieEven);
USE(round_mode);
uint64_t raw = DoubleToRawbits(value);
int32_t sign = raw >> 63;
int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023;
uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
: Float16ToRawbits(kFP16NegativeInfinity);
result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
result |= (1 << 9); // Force a quiet NaN;
return RawbitsToFloat16(result);
}
case FP_ZERO:
return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
case FP_INFINITE:
return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert double-to-half as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
// Add the implicit '1' bit to the mantissa.
mantissa += (UINT64_C(1) << 52);
return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return kFP16PositiveZero;
}
} // namespace vixl
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_UTILS_H
#define VIXL_UTILS_H
#include <cmath>
#include <cstring>
#include <limits>
#include <vector>
#include "compiler-intrinsics-vixl.h"
#include "globals-vixl.h"
namespace vixl {
// Macros for compile-time format checking.
#if GCC_VERSION_OR_NEWER(4, 4, 0)
#define PRINTF_CHECK(format_index, varargs_index) \
__attribute__((format(gnu_printf, format_index, varargs_index)))
#else
#define PRINTF_CHECK(format_index, varargs_index)
#endif
#ifdef __GNUC__
#define VIXL_HAS_DEPRECATED_WITH_MSG
#elif defined(__clang__)
#ifdef __has_extension(attribute_deprecated_with_message)
#define VIXL_HAS_DEPRECATED_WITH_MSG
#endif
#endif
#ifdef VIXL_HAS_DEPRECATED_WITH_MSG
#define VIXL_DEPRECATED(replaced_by, declarator) \
__attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
#else
#define VIXL_DEPRECATED(replaced_by, declarator) declarator
#endif
#ifdef VIXL_DEBUG
#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
#else
#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
#endif
template <typename T, size_t n>
size_t ArrayLength(const T (&)[n]) {
return n;
}
// Check number width.
// TODO: Refactor these using templates.
inline bool IsIntN(unsigned n, uint32_t x) {
VIXL_ASSERT((0 < n) && (n < 32));
uint32_t limit = UINT32_C(1) << (n - 1);
return x < limit;
}
inline bool IsIntN(unsigned n, int32_t x) {
VIXL_ASSERT((0 < n) && (n < 32));
int32_t limit = INT32_C(1) << (n - 1);
return (-limit <= x) && (x < limit);
}
inline bool IsIntN(unsigned n, uint64_t x) {
VIXL_ASSERT((0 < n) && (n < 64));
uint64_t limit = UINT64_C(1) << (n - 1);
return x < limit;
}
inline bool IsIntN(unsigned n, int64_t x) {
VIXL_ASSERT((0 < n) && (n < 64));
int64_t limit = INT64_C(1) << (n - 1);
return (-limit <= x) && (x < limit);
}
VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
return IsIntN(n, x);
}
inline bool IsUintN(unsigned n, uint32_t x) {
VIXL_ASSERT((0 < n) && (n < 32));
return !(x >> n);
}
inline bool IsUintN(unsigned n, int32_t x) {
VIXL_ASSERT((0 < n) && (n < 32));
// Convert to an unsigned integer to avoid implementation-defined behavior.
return !(static_cast<uint32_t>(x) >> n);
}
inline bool IsUintN(unsigned n, uint64_t x) {
VIXL_ASSERT((0 < n) && (n < 64));
return !(x >> n);
}
inline bool IsUintN(unsigned n, int64_t x) {
VIXL_ASSERT((0 < n) && (n < 64));
// Convert to an unsigned integer to avoid implementation-defined behavior.
return !(static_cast<uint64_t>(x) >> n);
}
VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
return IsUintN(n, x);
}
inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
VIXL_ASSERT((0 < n) && (n < 64));
return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
}
VIXL_DEPRECATED("TruncateToUintN",
inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
return TruncateToUintN(n, x);
}
// clang-format off
#define INT_1_TO_32_LIST(V) \
V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
#define INT_33_TO_63_LIST(V) \
V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
V(57) V(58) V(59) V(60) V(61) V(62) V(63)
#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
// clang-format on
#define DECLARE_IS_INT_N(N) \
inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \
VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
return IsIntN(N, x); \
}
#define DECLARE_IS_UINT_N(N) \
inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \
VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
return IsUintN(N, x); \
}
#define DECLARE_TRUNCATE_TO_UINT_32(N) \
inline uint32_t TruncateToUint##N(uint64_t x) { \
return static_cast<uint32_t>(TruncateToUintN(N, x)); \
} \
VIXL_DEPRECATED("TruncateToUint" #N, \
inline uint32_t truncate_to_int##N(int64_t x)) { \
return TruncateToUint##N(x); \
}
INT_1_TO_63_LIST(DECLARE_IS_INT_N)
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
#undef DECLARE_IS_INT_N
#undef DECLARE_IS_UINT_N
#undef DECLARE_TRUNCATE_TO_INT_N
// Bit field extraction.
inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
if ((msb == 63) && (lsb == 0)) return x;
return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
}
inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
}
inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
// If the highest extracted bit is set, sign extend.
if ((temp >> (msb - lsb)) == 1) {
temp |= ~UINT64_C(0) << (msb - lsb);
}
int64_t result;
memcpy(&result, &temp, sizeof(result));
return result;
}
inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
int32_t result;
memcpy(&result, &temp, sizeof(result));
return result;
}
inline uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
VIXL_ASSERT((width > 0) && (width <= 64));
uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
rotate &= 63;
if (rotate > 0) {
value &= width_mask;
value = (value << (width - rotate)) | (value >> rotate);
}
return value & width_mask;
}
// Wrapper class for passing FP16 values through the assembler.
// This is purely to aid with type checking/casting.
class Float16 {
public:
explicit Float16(double dvalue);
Float16() : rawbits_(0x0) {}
friend uint16_t Float16ToRawbits(Float16 value);
friend Float16 RawbitsToFloat16(uint16_t bits);
protected:
uint16_t rawbits_;
};
// Floating point representation.
uint16_t Float16ToRawbits(Float16 value);
uint32_t FloatToRawbits(float value);
VIXL_DEPRECATED("FloatToRawbits",
inline uint32_t float_to_rawbits(float value)) {
return FloatToRawbits(value);
}
uint64_t DoubleToRawbits(double value);
VIXL_DEPRECATED("DoubleToRawbits",
inline uint64_t double_to_rawbits(double value)) {
return DoubleToRawbits(value);
}
Float16 RawbitsToFloat16(uint16_t bits);
float RawbitsToFloat(uint32_t bits);
VIXL_DEPRECATED("RawbitsToFloat",
inline float rawbits_to_float(uint32_t bits)) {
return RawbitsToFloat(bits);
}
double RawbitsToDouble(uint64_t bits);
VIXL_DEPRECATED("RawbitsToDouble",
inline double rawbits_to_double(uint64_t bits)) {
return RawbitsToDouble(bits);
}
namespace internal {
// Internal simulation class used solely by the simulator to
// provide an abstraction layer for any half-precision arithmetic.
class SimFloat16 : public Float16 {
public:
// TODO: We should investigate making this constructor explicit.
// This is currently difficult to do due to a number of templated
// functions in the simulator which rely on returning double values.
SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit)
SimFloat16(Float16 f) { // NOLINT(runtime/explicit)
this->rawbits_ = Float16ToRawbits(f);
}
SimFloat16() : Float16() {}
SimFloat16 operator-() const;
SimFloat16 operator+(SimFloat16 rhs) const;
SimFloat16 operator-(SimFloat16 rhs) const;
SimFloat16 operator*(SimFloat16 rhs) const;
SimFloat16 operator/(SimFloat16 rhs) const;
bool operator<(SimFloat16 rhs) const;
bool operator>(SimFloat16 rhs) const;
bool operator==(SimFloat16 rhs) const;
bool operator!=(SimFloat16 rhs) const;
// This is necessary for conversions peformed in (macro asm) Fmov.
bool operator==(double rhs) const;
operator double() const;
};
} // namespace internal
uint32_t Float16Sign(internal::SimFloat16 value);
uint32_t Float16Exp(internal::SimFloat16 value);
uint32_t Float16Mantissa(internal::SimFloat16 value);
uint32_t FloatSign(float value);
VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
return FloatSign(value);
}
uint32_t FloatExp(float value);
VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
return FloatExp(value);
}
uint32_t FloatMantissa(float value);
VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
return FloatMantissa(value);
}
uint32_t DoubleSign(double value);
VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
return DoubleSign(value);
}
uint32_t DoubleExp(double value);
VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
return DoubleExp(value);
}
uint64_t DoubleMantissa(double value);
VIXL_DEPRECATED("DoubleMantissa",
inline uint64_t double_mantissa(double value)) {
return DoubleMantissa(value);
}
internal::SimFloat16 Float16Pack(uint16_t sign,
uint16_t exp,
uint16_t mantissa);
float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
VIXL_DEPRECATED("FloatPack",
inline float float_pack(uint32_t sign,
uint32_t exp,
uint32_t mantissa)) {
return FloatPack(sign, exp, mantissa);
}
double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
VIXL_DEPRECATED("DoublePack",
inline double double_pack(uint32_t sign,
uint32_t exp,
uint64_t mantissa)) {
return DoublePack(sign, exp, mantissa);
}
// An fpclassify() function for 16-bit half-precision floats.
int Float16Classify(Float16 value);
VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
return Float16Classify(RawbitsToFloat16(value));
}
bool IsZero(Float16 value);
inline bool IsNaN(float value) { return std::isnan(value); }
inline bool IsNaN(double value) { return std::isnan(value); }
inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
inline bool IsInf(float value) { return std::isinf(value); }
inline bool IsInf(double value) { return std::isinf(value); }
inline bool IsInf(Float16 value) {
return Float16Classify(value) == FP_INFINITE;
}
// NaN tests.
inline bool IsSignallingNaN(double num) {
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
uint64_t raw = DoubleToRawbits(num);
if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float num) {
const uint32_t kFP32QuietNaNMask = 0x00400000;
uint32_t raw = FloatToRawbits(num);
if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(Float16 num) {
const uint16_t kFP16QuietNaNMask = 0x0200;
return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
}
template <typename T>
inline bool IsQuietNaN(T num) {
return IsNaN(num) && !IsSignallingNaN(num);
}
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
VIXL_ASSERT(IsNaN(num));
return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
}
inline float ToQuietNaN(float num) {
const uint32_t kFP32QuietNaNMask = 0x00400000;
VIXL_ASSERT(IsNaN(num));
return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
}
inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
const uint16_t kFP16QuietNaNMask = 0x0200;
VIXL_ASSERT(IsNaN(num));
return internal::SimFloat16(
RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
}
// Fused multiply-add.
inline double FusedMultiplyAdd(double op1, double op2, double a) {
return fma(op1, op2, a);
}
inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
template <typename T>
inline int HighestSetBitPosition(T value) {
VIXL_ASSERT(value != 0);
return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
}
template <typename V>
inline int WhichPowerOf2(V value) {
VIXL_ASSERT(IsPowerOf2(value));
return CountTrailingZeros(value);
}
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
int BitCount(uint64_t value);
template <typename T>
T ReverseBits(T value) {
VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
template <typename T>
inline T SignExtend(T val, int bitSize) {
VIXL_ASSERT(bitSize > 0);
T mask = (T(2) << (bitSize - 1)) - T(1);
val &= mask;
T sign_bits = -((val >> (bitSize - 1)) << bitSize);
val |= sign_bits;
return val;
}
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = UINT64_C(0xff00000000000000);
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[0] is used by REV16_x, REV16_w
// permute_table[1] is used by REV32_x, REV_w
// permute_table[2] is used by REV_x
VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
uint64_t temp = 0;
for (int i = 0; i < 8; i++) {
temp <<= 8;
temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
}
T result;
VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
memcpy(&result, &temp, sizeof(result));
return result;
}
template <unsigned MULTIPLE, typename T>
inline bool IsMultiple(T value) {
VIXL_ASSERT(IsPowerOf2(MULTIPLE));
return (value & (MULTIPLE - 1)) == 0;
}
template <typename T>
inline bool IsMultiple(T value, unsigned multiple) {
VIXL_ASSERT(IsPowerOf2(multiple));
return (value & (multiple - 1)) == 0;
}
template <typename T>
inline bool IsAligned(T pointer, int alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
return (pointer & (alignment - 1)) == 0;
}
// Pointer alignment
// TODO: rename/refactor to make it specific to instructions.
template <unsigned ALIGN, typename T>
inline bool IsAligned(T pointer) {
VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
return IsAligned((intptr_t)(pointer), ALIGN);
}
template <typename T>
bool IsWordAligned(T pointer) {
return IsAligned<4>(pointer);
}
// Increment a pointer until it has the specified alignment. The alignment must
// be a power of two.
template <class T>
T AlignUp(T pointer,
typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
(typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t mask = alignment - 1;
T result = (T)((pointer_raw + mask) & ~mask);
VIXL_ASSERT(result >= pointer);
return result;
}
// Decrement a pointer until it has the specified alignment. The alignment must
// be a power of two.
template <class T>
T AlignDown(T pointer,
typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
(typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t mask = alignment - 1;
return (T)(pointer_raw & ~mask);
}
template <typename T>
inline T ExtractBit(T value, unsigned bit) {
return (value >> bit) & T(1);
}
template <typename Ts, typename Td>
inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
return Td((value >> least_significant_bit) & Ts(mask));
}
template <typename Ts, typename Td>
inline void AssignBit(Td& dst, // NOLINT(runtime/references)
int bit,
Ts value) {
VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
VIXL_ASSERT(bit >= 0);
VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
Td mask(1);
dst &= ~(mask << bit);
dst |= Td(value) << bit;
}
template <typename Td, typename Ts>
inline void AssignBits(Td& dst, // NOLINT(runtime/references)
int least_significant_bit,
Ts mask,
Ts value) {
VIXL_ASSERT(least_significant_bit >= 0);
VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
Td(mask));
VIXL_ASSERT((value & mask) == value);
dst &= ~(Td(mask) << least_significant_bit);
dst |= Td(value) << least_significant_bit;
}
class VFP {
public:
static uint32_t FP32ToImm8(float imm) {
// bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
uint32_t bits = FloatToRawbits(imm);
// bit7: a000.0000
uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
// bit6: 0b00.0000
uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
}
static uint32_t FP64ToImm8(double imm) {
// bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = DoubleToRawbits(imm);
// bit7: a000.0000
uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
}
static float Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return RawbitsToFloat(result);
}
static double Imm8ToFP64(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return RawbitsToDouble(result);
}
static bool IsImmFP32(float imm) {
// Valid values will have the form:
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
uint32_t bits = FloatToRawbits(imm);
// bits[19..0] are cleared.
if ((bits & 0x7ffff) != 0) {
return false;
}
// bits[29..25] are all set or all cleared.
uint32_t b_pattern = (bits >> 16) & 0x3e00;
if (b_pattern != 0 && b_pattern != 0x3e00) {
return false;
}
// bit[30] and bit[29] are opposite.
if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
return false;
}
return true;
}
static bool IsImmFP64(double imm) {
// Valid values will have the form:
// aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = DoubleToRawbits(imm);
// bits[47..0] are cleared.
if ((bits & 0x0000ffffffffffff) != 0) {
return false;
}
// bits[61..54] are all set or all cleared.
uint32_t b_pattern = (bits >> 48) & 0x3fc0;
if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
return false;
}
// bit[62] and bit[61] are opposite.
if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
return false;
}
return true;
}
};
class BitField {
// ForEachBitHelper is a functor that will call
// bool ForEachBitHelper::execute(ElementType id) const
// and expects a boolean in return whether to continue (if true)
// or stop (if false)
// check_set will check if the bits are on (true) or off(false)
template <typename ForEachBitHelper, bool check_set>
bool ForEachBit(const ForEachBitHelper& helper) {
for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
if (bitfield_[i] == check_set)
if (!helper.execute(i)) return false;
}
return true;
}
public:
explicit BitField(unsigned size) : bitfield_(size, 0) {}
void Set(int i) {
VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
bitfield_[i] = true;
}
void Unset(int i) {
VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
bitfield_[i] = true;
}
bool IsSet(int i) const { return bitfield_[i]; }
// For each bit not set in the bitfield call the execute functor
// execute.
// ForEachBitSetHelper::execute returns true if the iteration through
// the bits can continue, otherwise it will stop.
// struct ForEachBitSetHelper {
// bool execute(int /*id*/) { return false; }
// };
template <typename ForEachBitNotSetHelper>
bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
return ForEachBit<ForEachBitNotSetHelper, false>(helper);
}
// For each bit set in the bitfield call the execute functor
// execute.
template <typename ForEachBitSetHelper>
bool ForEachBitSet(const ForEachBitSetHelper& helper) {
return ForEachBit<ForEachBitSetHelper, true>(helper);
}
private:
std::vector<bool> bitfield_;
};
namespace internal {
typedef int64_t Int64;
class Uint64;
class Uint128;
class Uint32 {
uint32_t data_;
public:
// Unlike uint32_t, Uint32 has a default constructor.
Uint32() { data_ = 0; }
explicit Uint32(uint32_t data) : data_(data) {}
inline explicit Uint32(Uint64 data);
uint32_t Get() const { return data_; }
template <int N>
int32_t GetSigned() const {
return ExtractSignedBitfield32(N - 1, 0, data_);
}
int32_t GetSigned() const { return data_; }
Uint32 operator~() const { return Uint32(~data_); }
Uint32 operator-() const { return Uint32(-data_); }
bool operator==(Uint32 value) const { return data_ == value.data_; }
bool operator!=(Uint32 value) const { return data_ != value.data_; }
bool operator>(Uint32 value) const { return data_ > value.data_; }
Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
Uint32 operator&=(Uint32 value) {
data_ &= value.data_;
return *this;
}
Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
Uint32 operator^=(Uint32 value) {
data_ ^= value.data_;
return *this;
}
Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
Uint32 operator|=(Uint32 value) {
data_ |= value.data_;
return *this;
}
// Unlike uint32_t, the shift functions can accept negative shift and
// return 0 when the shift is too big.
Uint32 operator>>(int shift) const {
if (shift == 0) return *this;
if (shift < 0) {
int tmp = -shift;
if (tmp >= 32) return Uint32(0);
return Uint32(data_ << tmp);
}
int tmp = shift;
if (tmp >= 32) return Uint32(0);
return Uint32(data_ >> tmp);
}
Uint32 operator<<(int shift) const {
if (shift == 0) return *this;
if (shift < 0) {
int tmp = -shift;
if (tmp >= 32) return Uint32(0);
return Uint32(data_ >> tmp);
}
int tmp = shift;
if (tmp >= 32) return Uint32(0);
return Uint32(data_ << tmp);
}
};
class Uint64 {
uint64_t data_;
public:
// Unlike uint64_t, Uint64 has a default constructor.
Uint64() { data_ = 0; }
explicit Uint64(uint64_t data) : data_(data) {}
explicit Uint64(Uint32 data) : data_(data.Get()) {}
inline explicit Uint64(Uint128 data);
uint64_t Get() const { return data_; }
int64_t GetSigned(int N) const {
return ExtractSignedBitfield64(N - 1, 0, data_);
}
int64_t GetSigned() const { return data_; }
Uint32 ToUint32() const {
VIXL_ASSERT((data_ >> 32) == 0);
return Uint32(static_cast<uint32_t>(data_));
}
Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
Uint64 operator~() const { return Uint64(~data_); }
Uint64 operator-() const { return Uint64(-data_); }
bool operator==(Uint64 value) const { return data_ == value.data_; }
bool operator!=(Uint64 value) const { return data_ != value.data_; }
Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
Uint64 operator&=(Uint64 value) {
data_ &= value.data_;
return *this;
}
Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
Uint64 operator^=(Uint64 value) {
data_ ^= value.data_;
return *this;
}
Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
Uint64 operator|=(Uint64 value) {
data_ |= value.data_;
return *this;
}
// Unlike uint64_t, the shift functions can accept negative shift and
// return 0 when the shift is too big.
Uint64 operator>>(int shift) const {
if (shift == 0) return *this;
if (shift < 0) {
int tmp = -shift;
if (tmp >= 64) return Uint64(0);
return Uint64(data_ << tmp);
}
int tmp = shift;
if (tmp >= 64) return Uint64(0);
return Uint64(data_ >> tmp);
}
Uint64 operator<<(int shift) const {
if (shift == 0) return *this;
if (shift < 0) {
int tmp = -shift;
if (tmp >= 64) return Uint64(0);
return Uint64(data_ >> tmp);
}
int tmp = shift;
if (tmp >= 64) return Uint64(0);
return Uint64(data_ << tmp);
}
};
class Uint128 {
uint64_t data_high_;
uint64_t data_low_;
public:
Uint128() : data_high_(0), data_low_(0) {}
explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
explicit Uint128(Uint64 data_low)
: data_high_(0), data_low_(data_low.Get()) {}
Uint128(uint64_t data_high, uint64_t data_low)
: data_high_(data_high), data_low_(data_low) {}
Uint64 ToUint64() const {
VIXL_ASSERT(data_high_ == 0);
return Uint64(data_low_);
}
Uint64 GetHigh64() const { return Uint64(data_high_); }
Uint64 GetLow64() const { return Uint64(data_low_); }
Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
bool operator==(Uint128 value) const {
return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
}
Uint128 operator&(Uint128 value) const {
return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
}
Uint128 operator&=(Uint128 value) {
data_high_ &= value.data_high_;
data_low_ &= value.data_low_;
return *this;
}
Uint128 operator|=(Uint128 value) {
data_high_ |= value.data_high_;
data_low_ |= value.data_low_;
return *this;
}
Uint128 operator>>(int shift) const {
VIXL_ASSERT((shift >= 0) && (shift < 128));
if (shift == 0) return *this;
if (shift >= 64) {
return Uint128(0, data_high_ >> (shift - 64));
}
uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
return Uint128(data_high_ >> shift, tmp);
}
Uint128 operator<<(int shift) const {
VIXL_ASSERT((shift >= 0) && (shift < 128));
if (shift == 0) return *this;
if (shift >= 64) {
return Uint128(data_low_ << (shift - 64), 0);
}
uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
return Uint128(tmp, data_low_ << shift);
}
};
Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
Int64 BitCount(Uint32 value);
} // namespace internal
// The default NaN values (for FPCR.DN=1).
extern const double kFP64DefaultNaN;
extern const float kFP32DefaultNaN;
extern const Float16 kFP16DefaultNaN;
// Floating-point infinity values.
extern const Float16 kFP16PositiveInfinity;
extern const Float16 kFP16NegativeInfinity;
extern const float kFP32PositiveInfinity;
extern const float kFP32NegativeInfinity;
extern const double kFP64PositiveInfinity;
extern const double kFP64NegativeInfinity;
// Floating-point zero values.
extern const Float16 kFP16PositiveZero;
extern const Float16 kFP16NegativeZero;
// AArch64 floating-point specifics. These match IEEE-754.
const unsigned kDoubleMantissaBits = 52;
const unsigned kDoubleExponentBits = 11;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
const unsigned kFloat16MantissaBits = 10;
const unsigned kFloat16ExponentBits = 5;
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
FPTieEven = 0x0,
FPPositiveInfinity = 0x1,
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding modes are only available when explicitly specified by
// the instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway,
FPRoundOdd
};
enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
// Assemble the specified IEEE-754 components into the target type and apply
// appropriate rounding.
// sign: 0 = positive, 1 = negative
// exponent: Unbiased IEEE-754 exponent.
// mantissa: The mantissa of the input. The top bit (which is not encoded for
// normal IEEE-754 values) must not be omitted. This bit has the
// value 'pow(2, exponent)'.
//
// The input value is assumed to be a normalized value. That is, the input may
// not be infinity or NaN. If the source value is subnormal, it must be
// normalized before calling this function such that the highest set bit in the
// mantissa has the value 'pow(2, exponent)'.
//
// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
// calling a templated FPRound.
template <class T, int ebits, int mbits>
T FPRound(int64_t sign,
int64_t exponent,
uint64_t mantissa,
FPRounding round_mode) {
VIXL_ASSERT((sign == 0) || (sign == 1));
// Only FPTieEven and FPRoundOdd rounding modes are implemented.
VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
// Rounding can promote subnormals to normals, and normals to infinities. For
// example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
// encodable as a float, but rounding based on the low-order mantissa bits
// could make it overflow. With ties-to-even rounding, this value would become
// an infinity.
// ---- Rounding Method ----
//
// The exponent is irrelevant in the rounding operation, so we treat the
// lowest-order bit that will fit into the result ('onebit') as having
// the value '1'. Similarly, the highest-order bit that won't fit into
// the result ('halfbit') has the value '0.5'. The 'point' sits between
// 'onebit' and 'halfbit':
//
// These bits fit into the result.
// |---------------------|
// mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// ||
// / |
// / halfbit
// onebit
//
// For subnormal outputs, the range of representable bits is smaller and
// the position of onebit and halfbit depends on the exponent of the
// input, but the method is otherwise similar.
//
// onebit(frac)
// |
// | halfbit(frac) halfbit(adjusted)
// | / /
// | | |
// 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
// 0b00.0... -> 0b00.0... -> 0b00
// 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
// 0b00.1... -> 0b00.1... -> 0b01
// 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
// 0b01.0... -> 0b01.0... -> 0b01
// 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
// 0b01.1... -> 0b01.1... -> 0b10
// 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
// 0b10.0... -> 0b10.0... -> 0b10
// 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
// 0b10.1... -> 0b10.1... -> 0b11
// 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
// ... / | / |
// / | / |
// / |
// adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
//
// mantissa = (mantissa >> shift) + halfbit(adjusted);
static const int mantissa_offset = 0;
static const int exponent_offset = mantissa_offset + mbits;
static const int sign_offset = exponent_offset + ebits;
VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
// Bail out early for zero inputs.
if (mantissa == 0) {
return static_cast<T>(sign << sign_offset);
}
// If all bits in the exponent are set, the value is infinite or NaN.
// This is true for all binary IEEE-754 formats.
static const int infinite_exponent = (1 << ebits) - 1;
static const int max_normal_exponent = infinite_exponent - 1;
// Apply the exponent bias to encode it for the result. Doing this early makes
// it easy to detect values that will be infinite or subnormal.
exponent += max_normal_exponent >> 1;
if (exponent > max_normal_exponent) {
// Overflow: the input is too large for the result type to represent.
if (round_mode == FPTieEven) {
// FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent;
mantissa = 0;
} else {
VIXL_ASSERT(round_mode == FPRoundOdd);
// FPRoundOdd rounding mode handles overflows using the largest magnitude
// normal number.
exponent = max_normal_exponent;
mantissa = (UINT64_C(1) << exponent_offset) - 1;
}
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset));
}
// Calculate the shift required to move the top mantissa bit to the proper
// place in the destination type.
const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
int shift = highest_significant_bit - mbits;
if (exponent <= 0) {
// The output will be subnormal (before rounding).
// For subnormal outputs, the shift must be adjusted by the exponent. The +1
// is necessary because the exponent of a subnormal value (encoded as 0) is
// the same as the exponent of the smallest normal value (encoded as 1).
shift += -exponent + 1;
// Handle inputs that would produce a zero output.
//
// Shifts higher than highest_significant_bit+1 will always produce a zero
// result. A shift of exactly highest_significant_bit+1 might produce a
// non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) {
if (round_mode == FPTieEven) {
// The result will always be +/-0.0.
return static_cast<T>(sign << sign_offset);
} else {
VIXL_ASSERT(round_mode == FPRoundOdd);
VIXL_ASSERT(mantissa != 0);
// For FPRoundOdd, if the mantissa is too small to represent and
// non-zero return the next "odd" value.
return static_cast<T>((sign << sign_offset) | 1);
}
}
// Properly encode the exponent for a subnormal output.
exponent = 0;
} else {
// Clear the topmost mantissa bit, since this is not encoded in IEEE-754
// normal values.
mantissa &= ~(UINT64_C(1) << highest_significant_bit);
}
// The casts below are only well-defined for unsigned integers.
VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
if (shift > 0) {
if (round_mode == FPTieEven) {
// We have to shift the mantissa to the right. Some precision is lost, so
// we need to apply rounding.
uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
uint64_t adjusted = mantissa - adjustment;
T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
T result =
static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens,
// the exponent should be incremented and the mantissa set to 1.0
// (encoded as 0). Applying halfbit_adjusted after assembling the float
// has the nice side-effect that this case is handled for free.
//
// This also handles cases where a very large finite value overflows to
// infinity, or where a very large subnormal value overflows to become
// normal.
return result + halfbit_adjusted;
} else {
VIXL_ASSERT(round_mode == FPRoundOdd);
// If any bits at position halfbit or below are set, onebit (ie. the
// bottom bit of the resulting mantissa) must be set.
uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
if (fractional_bits != 0) {
mantissa |= UINT64_C(1) << shift;
}
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset));
}
} else {
// We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no
// rounding correction.
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset));
}
}
// See FPRound for a description of this function.
inline double FPRoundToDouble(int64_t sign,
int64_t exponent,
uint64_t mantissa,
FPRounding round_mode) {
uint64_t bits =
FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
exponent,
mantissa,
round_mode);
return RawbitsToDouble(bits);
}
// See FPRound for a description of this function.
inline Float16 FPRoundToFloat16(int64_t sign,
int64_t exponent,
uint64_t mantissa,
FPRounding round_mode) {
return RawbitsToFloat16(
FPRound<uint16_t,
kFloat16ExponentBits,
kFloat16MantissaBits>(sign, exponent, mantissa, round_mode));
}
// See FPRound for a description of this function.
static inline float FPRoundToFloat(int64_t sign,
int64_t exponent,
uint64_t mantissa,
FPRounding round_mode) {
uint32_t bits =
FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
exponent,
mantissa,
round_mode);
return RawbitsToFloat(bits);
}
float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
float FPToFloat(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception = NULL);
double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
Float16 FPToFloat16(float value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception = NULL);
Float16 FPToFloat16(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception = NULL);
} // namespace vixl
#endif // VIXL_UTILS_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment