Move dead submodules in-tree

Signed-off-by: swurl <swurl@swurl.xyz>
This commit is contained in:
swurl 2025-05-31 02:33:02 -04:00
parent c0cceff365
commit 6c655321e6
Signed by: crueter
GPG key ID: A5A7629F109C8FD1
4081 changed files with 1185566 additions and 45 deletions

2156
externals/dynarmic/tests/A64/a64.cpp vendored Normal file

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,168 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2023 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <array>
#include <exception>
#include <map>
#include <catch2/catch_test_macros.hpp>
#include <mcl/stdint.hpp>
#include <oaknut/oaknut.hpp>
#include "dynarmic/interface/A64/a64.h"
using namespace Dynarmic;
namespace {
class MyEnvironment final : public A64::UserCallbacks {
public:
u64 ticks_left = 0;
std::map<u64, u8> memory{};
u8 MemoryRead8(u64 vaddr) override {
return memory[vaddr];
}
u16 MemoryRead16(u64 vaddr) override {
return u16(MemoryRead8(vaddr)) | u16(MemoryRead8(vaddr + 1)) << 8;
}
u32 MemoryRead32(u64 vaddr) override {
return u32(MemoryRead16(vaddr)) | u32(MemoryRead16(vaddr + 2)) << 16;
}
u64 MemoryRead64(u64 vaddr) override {
return u64(MemoryRead32(vaddr)) | u64(MemoryRead32(vaddr + 4)) << 32;
}
std::array<u64, 2> MemoryRead128(u64 vaddr) override {
return {MemoryRead64(vaddr), MemoryRead64(vaddr + 8)};
}
void MemoryWrite8(u64 vaddr, u8 value) override {
memory[vaddr] = value;
}
void MemoryWrite16(u64 vaddr, u16 value) override {
MemoryWrite8(vaddr, u8(value));
MemoryWrite8(vaddr + 1, u8(value >> 8));
}
void MemoryWrite32(u64 vaddr, u32 value) override {
MemoryWrite16(vaddr, u16(value));
MemoryWrite16(vaddr + 2, u16(value >> 16));
}
void MemoryWrite64(u64 vaddr, u64 value) override {
MemoryWrite32(vaddr, u32(value));
MemoryWrite32(vaddr + 4, u32(value >> 32));
}
void MemoryWrite128(u64 vaddr, std::array<u64, 2> value) override {
MemoryWrite64(vaddr, value[0]);
MemoryWrite64(vaddr + 8, value[1]);
}
void InterpreterFallback(u64, size_t) override {
// This is never called in practice.
std::terminate();
}
void CallSVC(u32) override {
// Do something.
}
void ExceptionRaised(u64, A64::Exception) override {
cpu->HaltExecution();
}
void AddTicks(u64) override {
}
u64 GetTicksRemaining() override {
return 1000000000000;
}
std::uint64_t GetCNTPCT() override {
return 0;
}
A64::Jit* cpu;
};
} // namespace
TEST_CASE("A64: fibonacci", "[a64]") {
MyEnvironment env;
A64::UserConfig user_config;
user_config.callbacks = &env;
A64::Jit cpu{user_config};
env.cpu = &cpu;
std::vector<u32> instructions(1024);
oaknut::CodeGenerator code{instructions.data(), nullptr};
using namespace oaknut::util;
oaknut::Label start, end, zero, recurse;
code.l(start);
code.STP(X29, X30, SP, PRE_INDEXED, -32);
code.STP(X20, X19, SP, 16);
code.MOV(X29, SP);
code.MOV(W19, W0);
code.SUBS(W0, W0, 1);
code.B(LT, zero);
code.B(NE, recurse);
code.MOV(W0, 1);
code.B(end);
code.l(zero);
code.MOV(W0, WZR);
code.B(end);
code.l(recurse);
code.BL(start);
code.MOV(W20, W0);
code.SUB(W0, W19, 2);
code.BL(start);
code.ADD(W0, W0, W20);
code.l(end);
code.LDP(X20, X19, SP, 16);
code.LDP(X29, X30, SP, POST_INDEXED, 32);
code.RET();
for (size_t i = 0; i < 1024; i++) {
env.MemoryWrite32(i * 4, instructions[i]);
}
env.MemoryWrite32(8888, 0xd4200000);
cpu.SetRegister(30, 8888);
cpu.SetRegister(0, 10);
cpu.SetSP(0xffff0000);
cpu.SetPC(0);
cpu.Run();
REQUIRE(cpu.GetRegister(0) == 55);
cpu.SetRegister(0, 20);
cpu.SetSP(0xffff0000);
cpu.SetPC(0);
cpu.Run();
REQUIRE(cpu.GetRegister(0) == 6765);
cpu.SetRegister(0, 30);
cpu.SetSP(0xffff0000);
cpu.SetPC(0);
cpu.Run();
REQUIRE(cpu.GetRegister(0) == 832040);
}

View file

@ -0,0 +1,185 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <mcl/stdint.hpp>
#include "./testenv.h"
using namespace Dynarmic;
namespace {
struct TestCase {
u32 a;
u32 b;
u32 fmax;
u32 fmaxnm;
u32 fmin;
u32 fminnm;
};
const std::vector test_cases{
// a b fmax fmaxnm fmin fminnm
TestCase{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, // +0.0
TestCase{0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, // -0.0
TestCase{0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000}, // +1.0
TestCase{0xbf800000, 0xbf800000, 0xbf800000, 0xbf800000, 0xbf800000, 0xbf800000}, // -1.0
TestCase{0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000}, // +Inf
TestCase{0xff800000, 0xff800000, 0xff800000, 0xff800000, 0xff800000, 0xff800000}, // -Inf
TestCase{0x7fc00041, 0x7fc00041, 0x7fc00041, 0x7fc00041, 0x7fc00041, 0x7fc00041}, // QNaN
TestCase{0x7f800042, 0x7f800042, 0x7fc00042, 0x7fc00042, 0x7fc00042, 0x7fc00042}, // SNaN
TestCase{0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x80000000, 0x80000000}, // (+0.0, -0.0)
TestCase{0x3f800000, 0xbf800000, 0x3f800000, 0x3f800000, 0xbf800000, 0xbf800000}, // (+1.0, -1.0)
TestCase{0x3f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x3f800000, 0x3f800000}, // (+1.0, +Inf)
TestCase{0x3f800000, 0xff800000, 0x3f800000, 0x3f800000, 0xff800000, 0xff800000}, // (+1.0, -Inf)
TestCase{0x7f800000, 0xff800000, 0x7f800000, 0x7f800000, 0xff800000, 0xff800000}, // (+Inf, -Inf)
TestCase{0x3f800000, 0x7fc00041, 0x7fc00041, 0x3f800000, 0x7fc00041, 0x3f800000}, // (+1.0, QNaN)
TestCase{0x3f800000, 0x7f800042, 0x7fc00042, 0x7fc00042, 0x7fc00042, 0x7fc00042}, // (+1.0, SNaN)
TestCase{0x7f800000, 0x7fc00041, 0x7fc00041, 0x7f800000, 0x7fc00041, 0x7f800000}, // (+Inf, QNaN)
TestCase{0x7f800000, 0x7f800042, 0x7fc00042, 0x7fc00042, 0x7fc00042, 0x7fc00042}, // (+Inf, SNaN)
TestCase{0x7fc00041, 0x7f800042, 0x7fc00042, 0x7fc00042, 0x7fc00042, 0x7fc00042}, // (QNaN, SNaN)
TestCase{0xffa57454, 0xe343a6b3, 0xffe57454, 0xffe57454, 0xffe57454, 0xffe57454},
};
const std::vector unidirectional_test_cases{
TestCase{0x7fc00041, 0x7fc00043, 0x7fc00041, 0x7fc00041, 0x7fc00041, 0x7fc00041}, // (QNaN, QNaN)
TestCase{0x7f800042, 0x7f800044, 0x7fc00042, 0x7fc00042, 0x7fc00042, 0x7fc00042}, // (SNaN, SNaN)
};
constexpr u32 default_nan = 0x7fc00000;
bool is_nan(u32 value) {
return (value & 0x7f800000) == 0x7f800000 && (value & 0x007fffff) != 0;
}
u32 force_default_nan(u32 value) {
return is_nan(value) ? default_nan : value;
}
template<typename Fn>
void run_test(u32 instruction, Fn fn) {
A64TestEnv env;
A64::Jit jit{A64::UserConfig{&env}};
env.code_mem.emplace_back(instruction); // FMAX S0, S1, S2
env.code_mem.emplace_back(0x14000000); // B .
for (const auto base_fpcr : {0, 0x01000000}) {
for (const auto test_case : test_cases) {
INFO(test_case.a);
INFO(test_case.b);
jit.SetFpcr(base_fpcr);
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.a, 0});
jit.SetVector(2, {test_case.b, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == fn(test_case));
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.b, 0});
jit.SetVector(2, {test_case.a, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == fn(test_case));
jit.SetFpcr(base_fpcr | 0x02000000);
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.a, 0});
jit.SetVector(2, {test_case.b, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == force_default_nan(fn(test_case)));
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.b, 0});
jit.SetVector(2, {test_case.a, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == force_default_nan(fn(test_case)));
}
for (const auto test_case : unidirectional_test_cases) {
INFO(test_case.a);
INFO(test_case.b);
jit.SetFpcr(base_fpcr);
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.a, 0});
jit.SetVector(2, {test_case.b, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == fn(test_case));
jit.SetFpcr(base_fpcr | 0x02000000);
jit.SetVector(0, {42, 0});
jit.SetVector(1, {test_case.a, 0});
jit.SetVector(2, {test_case.b, 0});
jit.SetPC(0);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(0)[0] == force_default_nan(fn(test_case)));
}
}
}
} // namespace
TEST_CASE("A64: FMAX (scalar)", "[a64]") {
run_test(0x1e224820, [](const TestCase& test_case) { return test_case.fmax; });
}
TEST_CASE("A64: FMIN (scalar)", "[a64]") {
run_test(0x1e225820, [](const TestCase& test_case) { return test_case.fmin; });
}
TEST_CASE("A64: FMAXNM (scalar)", "[a64]") {
run_test(0x1e226820, [](const TestCase& test_case) { return test_case.fmaxnm; });
}
TEST_CASE("A64: FMINNM (scalar)", "[a64]") {
run_test(0x1e227820, [](const TestCase& test_case) { return test_case.fminnm; });
}
TEST_CASE("A64: FMAX (vector)", "[a64]") {
run_test(0x4e22f420, [](const TestCase& test_case) { return test_case.fmax; });
}
TEST_CASE("A64: FMIN (vector)", "[a64]") {
run_test(0x4ea2f420, [](const TestCase& test_case) { return test_case.fmin; });
}
TEST_CASE("A64: FMAXNM (vector)", "[a64]") {
run_test(0x4e22c420, [](const TestCase& test_case) { return test_case.fmaxnm; });
}
TEST_CASE("A64: FMINNM (vector)", "[a64]") {
run_test(0x4ea2c420, [](const TestCase& test_case) { return test_case.fminnm; });
}

View file

@ -0,0 +1,519 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <algorithm>
#include <cstring>
#include <string>
#include <vector>
#include <catch2/catch_test_macros.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include "../fuzz_util.h"
#include "../rand_int.h"
#include "../unicorn_emu/a64_unicorn.h"
#include "./testenv.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/llvm_disassemble.h"
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
#include "dynarmic/frontend/A64/a64_types.h"
#include "dynarmic/frontend/A64/decoder/a64.h"
#include "dynarmic/frontend/A64/translate/a64_translate.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/opcodes.h"
#include "dynarmic/ir/opt/passes.h"
// Must be declared last for all necessary operator<< to be declared prior to this.
#include <fmt/format.h>
#include <fmt/ostream.h>
using namespace Dynarmic;
static bool ShouldTestInst(u32 instruction, u64 pc, bool is_last_inst) {
const A64::LocationDescriptor location{pc, {}};
IR::Block block{location};
bool should_continue = A64::TranslateSingleInstruction(block, location, instruction);
if (!should_continue && !is_last_inst)
return false;
if (auto terminal = block.GetTerminal(); boost::get<IR::Term::Interpret>(&terminal))
return false;
for (const auto& ir_inst : block) {
switch (ir_inst.GetOpcode()) {
case IR::Opcode::A64ExceptionRaised:
case IR::Opcode::A64CallSupervisor:
case IR::Opcode::A64DataCacheOperationRaised:
case IR::Opcode::A64GetCNTPCT:
return false;
default:
continue;
}
}
return true;
}
static u32 GenRandomInst(u64 pc, bool is_last_inst) {
static const struct InstructionGeneratorInfo {
std::vector<InstructionGenerator> generators;
std::vector<InstructionGenerator> invalid;
} instructions = [] {
const std::vector<std::tuple<std::string, const char*>> list{
#define INST(fn, name, bitstring) {#fn, bitstring},
#include "dynarmic/frontend/A64/decoder/a64.inc"
#undef INST
};
std::vector<InstructionGenerator> generators;
std::vector<InstructionGenerator> invalid;
// List of instructions not to test
const std::vector<std::string> do_not_test{
// Unimplemented in QEMU
"STLLR",
// Unimplemented in QEMU
"LDLAR",
// Dynarmic and QEMU currently differ on how the exclusive monitor's address range works.
"STXR",
"STLXR",
"STXP",
"STLXP",
"LDXR",
"LDAXR",
"LDXP",
"LDAXP",
// Behaviour differs from QEMU
"MSR_reg",
"MSR_imm",
"MRS",
};
for (const auto& [fn, bitstring] : list) {
if (fn == "UnallocatedEncoding") {
continue;
}
if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) {
invalid.emplace_back(InstructionGenerator{bitstring});
continue;
}
generators.emplace_back(InstructionGenerator{bitstring});
}
return InstructionGeneratorInfo{generators, invalid};
}();
while (true) {
const size_t index = RandInt<size_t>(0, instructions.generators.size() - 1);
const u32 inst = instructions.generators[index].Generate();
if (std::any_of(instructions.invalid.begin(), instructions.invalid.end(), [inst](const auto& invalid) { return invalid.Match(inst); })) {
continue;
}
if (ShouldTestInst(inst, pc, is_last_inst)) {
return inst;
}
}
}
static u32 GenFloatInst(u64 pc, bool is_last_inst) {
static const std::vector<InstructionGenerator> instruction_generators = [] {
const std::vector<std::tuple<std::string, std::string, const char*>> list{
#define INST(fn, name, bitstring) {#fn, #name, bitstring},
#include "dynarmic/frontend/A64/decoder/a64.inc"
#undef INST
};
// List of instructions not to test
const std::vector<std::string> do_not_test{};
std::vector<InstructionGenerator> result;
for (const auto& [fn, name, bitstring] : list) {
(void)name;
if (fn[0] != 'F') {
continue;
} else if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) {
continue;
}
result.emplace_back(InstructionGenerator{bitstring});
}
return result;
}();
while (true) {
const size_t index = RandInt<size_t>(0, instruction_generators.size() - 1);
const u32 instruction = instruction_generators[index].Generate();
if (ShouldTestInst(instruction, pc, is_last_inst)) {
return instruction;
}
}
}
static Dynarmic::A64::UserConfig GetUserConfig(A64TestEnv& jit_env) {
Dynarmic::A64::UserConfig jit_user_config{&jit_env};
jit_user_config.optimizations &= ~OptimizationFlag::FastDispatch;
// The below corresponds to the settings for qemu's aarch64_max_initfn
jit_user_config.dczid_el0 = 7;
jit_user_config.ctr_el0 = 0x80038003;
return jit_user_config;
}
static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv& jit_env, A64TestEnv& uni_env, const A64Unicorn::RegisterArray& regs, const A64Unicorn::VectorArray& vecs, const size_t instructions_start, const std::vector<u32>& instructions, const u32 pstate, const u32 fpcr) {
jit_env.code_mem = instructions;
uni_env.code_mem = instructions;
jit_env.code_mem.emplace_back(0x14000000); // B .
uni_env.code_mem.emplace_back(0x14000000); // B .
jit_env.code_mem_start_address = instructions_start;
uni_env.code_mem_start_address = instructions_start;
jit_env.modified_memory.clear();
uni_env.modified_memory.clear();
jit_env.interrupts.clear();
uni_env.interrupts.clear();
const u64 initial_sp = RandInt<u64>(0x30'0000'0000, 0x40'0000'0000) * 4;
jit.SetRegisters(regs);
jit.SetVectors(vecs);
jit.SetPC(instructions_start);
jit.SetSP(initial_sp);
jit.SetFpcr(fpcr);
jit.SetFpsr(0);
jit.SetPstate(pstate);
jit.ClearCache();
uni.SetRegisters(regs);
uni.SetVectors(vecs);
uni.SetPC(instructions_start);
uni.SetSP(initial_sp);
uni.SetFpcr(fpcr);
uni.SetFpsr(0);
uni.SetPstate(pstate);
uni.ClearPageCache();
jit_env.ticks_left = instructions.size();
jit.Run();
uni_env.ticks_left = instructions.size();
uni.Run();
SCOPE_FAIL {
fmt::print("Instruction Listing:\n");
for (u32 instruction : instructions) {
fmt::print("{:08x} {}\n", instruction, Common::DisassembleAArch64(instruction));
}
fmt::print("\n");
fmt::print("Initial register listing:\n");
for (size_t i = 0; i < regs.size(); ++i) {
fmt::print("{:3s}: {:016x}\n", A64::RegToString(static_cast<A64::Reg>(i)), regs[i]);
}
for (size_t i = 0; i < vecs.size(); ++i) {
fmt::print("{:3s}: {:016x}{:016x}\n", A64::VecToString(static_cast<A64::Vec>(i)), vecs[i][1], vecs[i][0]);
}
fmt::print("sp : {:016x}\n", initial_sp);
fmt::print("pc : {:016x}\n", instructions_start);
fmt::print("p : {:08x}\n", pstate);
fmt::print("fpcr {:08x}\n", fpcr);
fmt::print("fpcr.AHP {}\n", FP::FPCR{fpcr}.AHP());
fmt::print("fpcr.DN {}\n", FP::FPCR{fpcr}.DN());
fmt::print("fpcr.FZ {}\n", FP::FPCR{fpcr}.FZ());
fmt::print("fpcr.RMode {}\n", static_cast<size_t>(FP::FPCR{fpcr}.RMode()));
fmt::print("fpcr.FZ16 {}\n", FP::FPCR{fpcr}.FZ16());
fmt::print("\n");
fmt::print("Final register listing:\n");
fmt::print(" unicorn dynarmic\n");
const auto uni_regs = uni.GetRegisters();
for (size_t i = 0; i < regs.size(); ++i) {
fmt::print("{:3s}: {:016x} {:016x} {}\n", A64::RegToString(static_cast<A64::Reg>(i)), uni_regs[i], jit.GetRegisters()[i], uni_regs[i] != jit.GetRegisters()[i] ? "*" : "");
}
const auto uni_vecs = uni.GetVectors();
for (size_t i = 0; i < vecs.size(); ++i) {
fmt::print("{:3s}: {:016x}{:016x} {:016x}{:016x} {}\n", A64::VecToString(static_cast<A64::Vec>(i)),
uni_vecs[i][1], uni_vecs[i][0],
jit.GetVectors()[i][1], jit.GetVectors()[i][0],
uni_vecs[i] != jit.GetVectors()[i] ? "*" : "");
}
fmt::print("sp : {:016x} {:016x} {}\n", uni.GetSP(), jit.GetSP(), uni.GetSP() != jit.GetSP() ? "*" : "");
fmt::print("pc : {:016x} {:016x} {}\n", uni.GetPC(), jit.GetPC(), uni.GetPC() != jit.GetPC() ? "*" : "");
fmt::print("p : {:08x} {:08x} {}\n", uni.GetPstate(), jit.GetPstate(), (uni.GetPstate() & 0xF0000000) != (jit.GetPstate() & 0xF0000000) ? "*" : "");
fmt::print("qc : {:08x} {:08x} {}\n", uni.GetFpsr(), jit.GetFpsr(), FP::FPSR{uni.GetFpsr()}.QC() != FP::FPSR{jit.GetFpsr()}.QC() ? "*" : "");
fmt::print("\n");
fmt::print("Modified memory:\n");
fmt::print(" uni dyn\n");
auto uni_iter = uni_env.modified_memory.begin();
auto jit_iter = jit_env.modified_memory.begin();
while (uni_iter != uni_env.modified_memory.end() || jit_iter != jit_env.modified_memory.end()) {
if (uni_iter == uni_env.modified_memory.end() || (jit_iter != jit_env.modified_memory.end() && uni_iter->first > jit_iter->first)) {
fmt::print("{:016x}: {:02x} *\n", jit_iter->first, jit_iter->second);
jit_iter++;
} else if (jit_iter == jit_env.modified_memory.end() || jit_iter->first > uni_iter->first) {
fmt::print("{:016x}: {:02x} *\n", uni_iter->first, uni_iter->second);
uni_iter++;
} else if (uni_iter->first == jit_iter->first) {
fmt::print("{:016x}: {:02x} {:02x} {}\n", uni_iter->first, uni_iter->second, jit_iter->second, uni_iter->second != jit_iter->second ? "*" : "");
uni_iter++;
jit_iter++;
}
}
fmt::print("\n");
const auto get_code = [&jit_env](u64 vaddr) { return jit_env.MemoryReadCode(vaddr); };
IR::Block ir_block = A64::Translate({instructions_start, FP::FPCR{fpcr}}, get_code, {});
Optimization::A64CallbackConfigPass(ir_block, GetUserConfig(jit_env));
Optimization::NamingPass(ir_block);
fmt::print("IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
Optimization::A64GetSetElimination(ir_block);
Optimization::DeadCodeElimination(ir_block);
Optimization::ConstantPropagation(ir_block);
Optimization::DeadCodeElimination(ir_block);
fmt::print("Optimized IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
fmt::print("x86_64:\n");
jit.DumpDisassembly();
fmt::print("Interrupts:\n");
for (auto& i : uni_env.interrupts) {
puts(i.c_str());
}
};
REQUIRE(uni_env.code_mem_modified_by_guest == jit_env.code_mem_modified_by_guest);
if (uni_env.code_mem_modified_by_guest) {
return;
}
REQUIRE(uni.GetPC() == jit.GetPC());
REQUIRE(uni.GetRegisters() == jit.GetRegisters());
REQUIRE(uni.GetVectors() == jit.GetVectors());
REQUIRE(uni.GetSP() == jit.GetSP());
REQUIRE((uni.GetPstate() & 0xF0000000) == (jit.GetPstate() & 0xF0000000));
REQUIRE(uni_env.modified_memory == jit_env.modified_memory);
REQUIRE(uni_env.interrupts.empty());
REQUIRE(FP::FPSR{uni.GetFpsr()}.QC() == FP::FPSR{jit.GetFpsr()}.QC());
}
TEST_CASE("A64: Single random instruction", "[a64]") {
A64TestEnv jit_env{};
A64TestEnv uni_env{};
Dynarmic::A64::Jit jit{GetUserConfig(jit_env)};
A64Unicorn uni{uni_env};
A64Unicorn::RegisterArray regs;
A64Unicorn::VectorArray vecs;
std::vector<u32> instructions(1);
for (size_t iteration = 0; iteration < 100000; ++iteration) {
std::generate(regs.begin(), regs.end(), [] { return RandInt<u64>(0, ~u64(0)); });
std::generate(vecs.begin(), vecs.end(), RandomVector);
instructions[0] = GenRandomInst(0, true);
const u64 start_address = RandInt<u64>(0, 0x10'0000'0000) * 4;
const u32 pstate = RandInt<u32>(0, 0xF) << 28;
const u32 fpcr = RandomFpcr();
INFO("Instruction: 0x" << std::hex << instructions[0]);
RunTestInstance(jit, uni, jit_env, uni_env, regs, vecs, start_address, instructions, pstate, fpcr);
}
}
TEST_CASE("A64: Floating point instructions", "[a64]") {
A64TestEnv jit_env{};
A64TestEnv uni_env{};
Dynarmic::A64::Jit jit{GetUserConfig(jit_env)};
A64Unicorn uni{uni_env};
static constexpr std::array<u64, 80> float_numbers{
0x00000000, // positive zero
0x00000001, // smallest positive denormal
0x00000076, //
0x00002b94, //
0x00636d24, //
0x007fffff, // largest positive denormal
0x00800000, // smallest positive normalised real
0x00800002, //
0x01398437, //
0x0ba98d27, //
0x0ba98d7a, //
0x751f853a, //
0x7f7ffff0, //
0x7f7fffff, // largest positive normalised real
0x7f800000, // positive infinity
0x7f800001, // first positive SNaN
0x7f984a37, //
0x7fbfffff, // last positive SNaN
0x7fc00000, // first positive QNaN
0x7fd9ba98, //
0x7fffffff, // last positive QNaN
0x80000000, // negative zero
0x80000001, // smallest negative denormal
0x80000076, //
0x80002b94, //
0x80636d24, //
0x807fffff, // largest negative denormal
0x80800000, // smallest negative normalised real
0x80800002, //
0x81398437, //
0x8ba98d27, //
0x8ba98d7a, //
0xf51f853a, //
0xff7ffff0, //
0xff7fffff, // largest negative normalised real
0xff800000, // negative infinity
0xff800001, // first negative SNaN
0xff984a37, //
0xffbfffff, // last negative SNaN
0xffc00000, // first negative QNaN
0xffd9ba98, //
0xffffffff, // last negative QNaN
// some random numbers follow
0x4f3495cb,
0xe73a5134,
0x7c994e9e,
0x6164bd6c,
0x09503366,
0xbf5a97c9,
0xe6ff1a14,
0x77f31e2f,
0xaab4d7d8,
0x0966320b,
0xb26bddee,
0xb5c8e5d3,
0x317285d3,
0x3c9623b1,
0x51fd2c7c,
0x7b906a6c,
0x3f800000,
0x3dcccccd,
0x3f000000,
0x42280000,
0x3eaaaaab,
0xc1200000,
0xbf800000,
0xbf8147ae,
0x3f8147ae,
0x415df525,
0xc79b271e,
0x460e8c84,
// some 64-bit-float upper-halves
0x7ff00000, // +SNaN / +Inf
0x7ff0abcd, // +SNaN
0x7ff80000, // +QNaN
0x7ff81234, // +QNaN
0xfff00000, // -SNaN / -Inf
0xfff05678, // -SNaN
0xfff80000, // -QNaN
0xfff809ef, // -QNaN
0x3ff00000, // Number near +1.0
0xbff00000, // Number near -1.0
};
const auto gen_float = [&] {
if (RandInt<size_t>(0, 1) == 0) {
return RandInt<u64>(0, 0xffffffff);
}
return float_numbers[RandInt<size_t>(0, float_numbers.size() - 1)];
};
const auto gen_vector = [&] {
u64 upper = (gen_float() << 32) | gen_float();
u64 lower = (gen_float() << 32) | gen_float();
return Vector{lower, upper};
};
A64Unicorn::RegisterArray regs;
A64Unicorn::VectorArray vecs;
std::vector<u32> instructions(1);
for (size_t iteration = 0; iteration < 100000; ++iteration) {
std::generate(regs.begin(), regs.end(), gen_float);
std::generate(vecs.begin(), vecs.end(), gen_vector);
instructions[0] = GenFloatInst(0, true);
const u64 start_address = RandInt<u64>(0, 0x10'0000'0000) * 4;
const u32 pstate = RandInt<u32>(0, 0xF) << 28;
const u32 fpcr = RandomFpcr();
INFO("Instruction: 0x" << std::hex << instructions[0]);
RunTestInstance(jit, uni, jit_env, uni_env, regs, vecs, start_address, instructions, pstate, fpcr);
}
}
TEST_CASE("A64: Small random block", "[a64]") {
A64TestEnv jit_env{};
A64TestEnv uni_env{};
Dynarmic::A64::Jit jit{GetUserConfig(jit_env)};
A64Unicorn uni{uni_env};
A64Unicorn::RegisterArray regs;
A64Unicorn::VectorArray vecs;
std::vector<u32> instructions(5);
for (size_t iteration = 0; iteration < 100000; ++iteration) {
std::generate(regs.begin(), regs.end(), [] { return RandInt<u64>(0, ~u64(0)); });
std::generate(vecs.begin(), vecs.end(), RandomVector);
instructions[0] = GenRandomInst(0, false);
instructions[1] = GenRandomInst(4, false);
instructions[2] = GenRandomInst(8, false);
instructions[3] = GenRandomInst(12, false);
instructions[4] = GenRandomInst(16, true);
const u64 start_address = RandInt<u64>(0, 0x10'0000'0000) * 4;
const u32 pstate = RandInt<u32>(0, 0xF) << 28;
const u32 fpcr = RandomFpcr();
INFO("Instruction 1: 0x" << std::hex << instructions[0]);
INFO("Instruction 2: 0x" << std::hex << instructions[1]);
INFO("Instruction 3: 0x" << std::hex << instructions[2]);
INFO("Instruction 4: 0x" << std::hex << instructions[3]);
INFO("Instruction 5: 0x" << std::hex << instructions[4]);
RunTestInstance(jit, uni, jit_env, uni_env, regs, vecs, start_address, instructions, pstate, fpcr);
}
}
TEST_CASE("A64: Large random block", "[a64]") {
A64TestEnv jit_env{};
A64TestEnv uni_env{};
Dynarmic::A64::Jit jit{GetUserConfig(jit_env)};
A64Unicorn uni{uni_env};
A64Unicorn::RegisterArray regs;
A64Unicorn::VectorArray vecs;
constexpr size_t instruction_count = 100;
std::vector<u32> instructions(instruction_count);
for (size_t iteration = 0; iteration < 500; ++iteration) {
std::generate(regs.begin(), regs.end(), [] { return RandInt<u64>(0, ~u64(0)); });
std::generate(vecs.begin(), vecs.end(), RandomVector);
for (size_t j = 0; j < instruction_count; ++j) {
instructions[j] = GenRandomInst(j * 4, j == instruction_count - 1);
}
const u64 start_address = RandInt<u64>(0, 0x10'0000'0000) * 4;
const u32 pstate = RandInt<u32>(0, 0xF) << 28;
const u32 fpcr = RandomFpcr();
RunTestInstance(jit, uni, jit_env, uni_env, regs, vecs, start_address, instructions, pstate, fpcr);
}
}

View file

@ -0,0 +1,29 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "dynarmic/interface/A64/a64.h"
TEST_CASE("misaligned load/store do not use page_table when detect_misaligned_access_via_page_table is set", "[a64]") {
A64TestEnv env;
Dynarmic::A64::UserConfig conf{&env};
conf.page_table = nullptr;
conf.detect_misaligned_access_via_page_table = 128;
conf.only_detect_misalignment_via_page_table_on_page_boundary = true;
Dynarmic::A64::Jit jit{conf};
env.code_mem.emplace_back(0x3c800400); // STR Q0, [X0], #0
env.code_mem.emplace_back(0x14000000); // B .
jit.SetPC(0);
jit.SetRegister(0, 0x000000000b0afff8);
env.ticks_left = 2;
jit.Run();
// If we don't crash we're fine.
}

View file

@ -0,0 +1,113 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "dynarmic/interface/A64/a64.h"
using namespace Dynarmic;
TEST_CASE("ensure fast dispatch entry is cleared even when a block does not have any patching requirements", "[a64]") {
A64TestEnv env;
A64::UserConfig conf{&env};
A64::Jit jit{conf};
REQUIRE(conf.HasOptimization(OptimizationFlag::FastDispatch));
env.code_mem_start_address = 100;
env.code_mem.clear();
env.code_mem.emplace_back(0xd2800d80); // MOV X0, 108
env.code_mem.emplace_back(0xd61f0000); // BR X0
env.code_mem.emplace_back(0xd2800540); // MOV X0, 42
env.code_mem.emplace_back(0x14000000); // B .
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.InvalidateCacheRange(108, 4);
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
env.code_mem[2] = 0xd28008a0; // MOV X0, 69
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.InvalidateCacheRange(108, 4);
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 69);
jit.SetPC(100);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 69);
}
TEST_CASE("ensure fast dispatch entry is cleared even when a block does not have any patching requirements 2", "[a64]") {
A64TestEnv env;
A64::UserConfig conf{&env};
A64::Jit jit{conf};
REQUIRE(conf.HasOptimization(OptimizationFlag::FastDispatch));
env.code_mem.emplace_back(0xd2800100); // MOV X0, 8
env.code_mem.emplace_back(0xd61f0000); // BR X0
env.code_mem.emplace_back(0xd2800540); // MOV X0, 42
env.code_mem.emplace_back(0x14000000); // B .
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.InvalidateCacheRange(8, 4);
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
env.code_mem[2] = 0xd28008a0; // MOV X0, 69
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 42);
jit.InvalidateCacheRange(8, 4);
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 69);
jit.SetPC(0);
env.ticks_left = 4;
jit.Run();
REQUIRE(jit.GetRegister(0) == 69);
}

224
externals/dynarmic/tests/A64/testenv.h vendored Normal file
View file

@ -0,0 +1,224 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <map>
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/interface/A64/a64.h"
using Vector = Dynarmic::A64::Vector;
class A64TestEnv : public Dynarmic::A64::UserCallbacks {
public:
u64 ticks_left = 0;
bool code_mem_modified_by_guest = false;
u64 code_mem_start_address = 0;
std::vector<u32> code_mem;
std::map<u64, u8> modified_memory;
std::vector<std::string> interrupts;
bool IsInCodeMem(u64 vaddr) const {
return vaddr >= code_mem_start_address && vaddr < code_mem_start_address + code_mem.size() * 4;
}
std::optional<std::uint32_t> MemoryReadCode(u64 vaddr) override {
if (!IsInCodeMem(vaddr)) {
return 0x14000000; // B .
}
const size_t index = (vaddr - code_mem_start_address) / 4;
return code_mem[index];
}
std::uint8_t MemoryRead8(u64 vaddr) override {
if (IsInCodeMem(vaddr)) {
return reinterpret_cast<u8*>(code_mem.data())[vaddr - code_mem_start_address];
}
if (auto iter = modified_memory.find(vaddr); iter != modified_memory.end()) {
return iter->second;
}
return static_cast<u8>(vaddr);
}
std::uint16_t MemoryRead16(u64 vaddr) override {
return u16(MemoryRead8(vaddr)) | u16(MemoryRead8(vaddr + 1)) << 8;
}
std::uint32_t MemoryRead32(u64 vaddr) override {
return u32(MemoryRead16(vaddr)) | u32(MemoryRead16(vaddr + 2)) << 16;
}
std::uint64_t MemoryRead64(u64 vaddr) override {
return u64(MemoryRead32(vaddr)) | u64(MemoryRead32(vaddr + 4)) << 32;
}
Vector MemoryRead128(u64 vaddr) override {
return {MemoryRead64(vaddr), MemoryRead64(vaddr + 8)};
}
void MemoryWrite8(u64 vaddr, std::uint8_t value) override {
if (IsInCodeMem(vaddr)) {
code_mem_modified_by_guest = true;
}
modified_memory[vaddr] = value;
}
void MemoryWrite16(u64 vaddr, std::uint16_t value) override {
MemoryWrite8(vaddr, static_cast<u8>(value));
MemoryWrite8(vaddr + 1, static_cast<u8>(value >> 8));
}
void MemoryWrite32(u64 vaddr, std::uint32_t value) override {
MemoryWrite16(vaddr, static_cast<u16>(value));
MemoryWrite16(vaddr + 2, static_cast<u16>(value >> 16));
}
void MemoryWrite64(u64 vaddr, std::uint64_t value) override {
MemoryWrite32(vaddr, static_cast<u32>(value));
MemoryWrite32(vaddr + 4, static_cast<u32>(value >> 32));
}
void MemoryWrite128(u64 vaddr, Vector value) override {
MemoryWrite64(vaddr, value[0]);
MemoryWrite64(vaddr + 8, value[1]);
}
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, [[maybe_unused]] std::uint8_t expected) override {
MemoryWrite8(vaddr, value);
return true;
}
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, [[maybe_unused]] std::uint16_t expected) override {
MemoryWrite16(vaddr, value);
return true;
}
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, [[maybe_unused]] std::uint32_t expected) override {
MemoryWrite32(vaddr, value);
return true;
}
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, [[maybe_unused]] std::uint64_t expected) override {
MemoryWrite64(vaddr, value);
return true;
}
bool MemoryWriteExclusive128(u64 vaddr, Vector value, [[maybe_unused]] Vector expected) override {
MemoryWrite128(vaddr, value);
return true;
}
void InterpreterFallback(u64 pc, size_t num_instructions) override { ASSERT_MSG(false, "InterpreterFallback({:016x}, {})", pc, num_instructions); }
void CallSVC(std::uint32_t swi) override { ASSERT_MSG(false, "CallSVC({})", swi); }
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception /*exception*/) override { ASSERT_MSG(false, "ExceptionRaised({:016x})", pc); }
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {
ticks_left = 0;
return;
}
ticks_left -= ticks;
}
std::uint64_t GetTicksRemaining() override {
return ticks_left;
}
std::uint64_t GetCNTPCT() override {
return 0x10000000000 - ticks_left;
}
};
class A64FastmemTestEnv final : public Dynarmic::A64::UserCallbacks {
public:
u64 ticks_left = 0;
char* backing_memory = nullptr;
explicit A64FastmemTestEnv(char* addr)
: backing_memory(addr) {}
template<typename T>
T read(u64 vaddr) {
T value;
memcpy(&value, backing_memory + vaddr, sizeof(T));
return value;
}
template<typename T>
void write(u64 vaddr, const T& value) {
memcpy(backing_memory + vaddr, &value, sizeof(T));
}
std::optional<std::uint32_t> MemoryReadCode(u64 vaddr) override {
return read<std::uint32_t>(vaddr);
}
std::uint8_t MemoryRead8(u64 vaddr) override {
return read<std::uint8_t>(vaddr);
}
std::uint16_t MemoryRead16(u64 vaddr) override {
return read<std::uint16_t>(vaddr);
}
std::uint32_t MemoryRead32(u64 vaddr) override {
return read<std::uint32_t>(vaddr);
}
std::uint64_t MemoryRead64(u64 vaddr) override {
return read<std::uint64_t>(vaddr);
}
Vector MemoryRead128(u64 vaddr) override {
return read<Vector>(vaddr);
}
void MemoryWrite8(u64 vaddr, std::uint8_t value) override {
write(vaddr, value);
}
void MemoryWrite16(u64 vaddr, std::uint16_t value) override {
write(vaddr, value);
}
void MemoryWrite32(u64 vaddr, std::uint32_t value) override {
write(vaddr, value);
}
void MemoryWrite64(u64 vaddr, std::uint64_t value) override {
write(vaddr, value);
}
void MemoryWrite128(u64 vaddr, Vector value) override {
write(vaddr, value);
}
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, [[maybe_unused]] std::uint8_t expected) override {
MemoryWrite8(vaddr, value);
return true;
}
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, [[maybe_unused]] std::uint16_t expected) override {
MemoryWrite16(vaddr, value);
return true;
}
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, [[maybe_unused]] std::uint32_t expected) override {
MemoryWrite32(vaddr, value);
return true;
}
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, [[maybe_unused]] std::uint64_t expected) override {
MemoryWrite64(vaddr, value);
return true;
}
bool MemoryWriteExclusive128(u64 vaddr, Vector value, [[maybe_unused]] Vector expected) override {
MemoryWrite128(vaddr, value);
return true;
}
void InterpreterFallback(u64 pc, size_t num_instructions) override { ASSERT_MSG(false, "InterpreterFallback({:016x}, {})", pc, num_instructions); }
void CallSVC(std::uint32_t swi) override { ASSERT_MSG(false, "CallSVC({})", swi); }
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception) override { ASSERT_MSG(false, "ExceptionRaised({:016x})", pc); }
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {
ticks_left = 0;
return;
}
ticks_left -= ticks;
}
std::uint64_t GetTicksRemaining() override {
return ticks_left;
}
std::uint64_t GetCNTPCT() override {
return 0x10000000000 - ticks_left;
}
};

View file

@ -0,0 +1,80 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <array>
#include <catch2/catch_test_macros.hpp>
#include "../rand_int.h"
#include "../unicorn_emu/a64_unicorn.h"
#include "./testenv.h"
using namespace Dynarmic;
TEST_CASE("Unicorn: Sanity test", "[a64]") {
A64TestEnv env;
env.code_mem.emplace_back(0x8b020020); // ADD X0, X1, X2
env.code_mem.emplace_back(0x14000000); // B .
constexpr A64Unicorn::RegisterArray regs{
0, 1, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0};
A64Unicorn unicorn{env};
unicorn.SetRegisters(regs);
unicorn.SetPC(0);
env.ticks_left = 2;
unicorn.Run();
REQUIRE(unicorn.GetRegisters()[0] == 3);
REQUIRE(unicorn.GetRegisters()[1] == 1);
REQUIRE(unicorn.GetRegisters()[2] == 2);
REQUIRE(unicorn.GetPC() == 4);
}
TEST_CASE("Unicorn: Ensure 0xFFFF'FFFF'FFFF'FFFF is readable", "[a64]") {
A64TestEnv env;
env.code_mem.emplace_back(0x385fed99); // LDRB W25, [X12, #0xfffffffffffffffe]!
env.code_mem.emplace_back(0x14000000); // B .
A64Unicorn::RegisterArray regs{};
regs[12] = 1;
A64Unicorn unicorn{env};
unicorn.SetRegisters(regs);
unicorn.SetPC(0);
env.ticks_left = 2;
unicorn.Run();
REQUIRE(unicorn.GetPC() == 4);
}
TEST_CASE("Unicorn: Ensure is able to read across page boundaries", "[a64]") {
A64TestEnv env;
env.code_mem.emplace_back(0xb85f93d9); // LDUR W25, [X30, #0xfffffffffffffff9]
env.code_mem.emplace_back(0x14000000); // B .
A64Unicorn::RegisterArray regs{};
regs[30] = 4;
A64Unicorn unicorn{env};
unicorn.SetRegisters(regs);
unicorn.SetPC(0);
env.ticks_left = 2;
unicorn.Run();
REQUIRE(unicorn.GetPC() == 4);
}