[cmake] enable clang-cl and WoA builds (#348)

Compilation and CMake fixes for both Windows on ARM and clang-cl, meaning Windows can now be built on both MSVC and clang on both amd64 and aarch64.

Compiling on clang is *dramatically* faster so this should be useful for CI.

Co-authored-by: crueter <crueter@eden-emu.dev>
Co-authored-by: crueter <crueter@crueter.xyz>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/348
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: crueter <crueter@eden-emu.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2025-09-09 20:47:49 +02:00 committed by crueter
parent 428f136a75
commit 9d2681ecc9
Signed by: crueter
GPG key ID: 425ACD2D4830EBC6
276 changed files with 973 additions and 1010 deletions

View file

@ -373,7 +373,7 @@ else()
set_source_files_properties(vulkan_common/vma.cpp PROPERTIES COMPILE_OPTIONS "-Wno-conversion;-Wno-unused-variable;-Wno-unused-parameter;-Wno-missing-field-initializers")
# Get around GCC failing with intrinsics in Debug
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_BUILD_TYPE MATCHES "Debug")
if (CXX_GCC AND CMAKE_BUILD_TYPE MATCHES "Debug")
set_source_files_properties(host1x/vic.cpp PROPERTIES COMPILE_OPTIONS "-O2")
endif()
endif()

View file

@ -36,14 +36,14 @@ BufferCache<P>::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, R
const s64 device_local_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const s64 min_spacing_expected = device_local_memory - 1_GiB;
const s64 min_spacing_critical = device_local_memory - 512_MiB;
const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD);
const s64 mem_threshold = (std::min)(device_local_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
const s64 min_vacancy_critical = (2 * mem_threshold) / 10;
minimum_memory = static_cast<u64>(
std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected),
(std::max)((std::min)(device_local_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
(std::max)((std::min)(device_local_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
}
@ -553,8 +553,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
const DAddr buffer_start = buffer.CpuAddr();
const DAddr buffer_end = buffer_start + buffer.SizeBytes();
const DAddr new_start = std::max(buffer_start, device_addr);
const DAddr new_end = std::min(buffer_end, device_addr + size);
const DAddr new_start = (std::max)(buffer_start, device_addr);
const DAddr new_end = (std::min)(buffer_end, device_addr + size);
memory_tracker.ForEachDownloadRange(
new_start, new_end - new_start, false,
[&](u64 device_addr_out, u64 range_size) {
@ -574,7 +574,7 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
constexpr u64 align = 64ULL;
constexpr u64 mask = ~(align - 1ULL);
total_size_bytes += (new_size + align - 1) & mask;
largest_copy = std::max(largest_copy, new_size);
largest_copy = (std::max)(largest_copy, new_size);
};
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size,
@ -729,8 +729,8 @@ void BufferCache<P>::BindHostVertexBuffers() {
}
flags[Dirty::VertexBuffer0 + index] = false;
host_bindings.min_index = std::min(host_bindings.min_index, index);
host_bindings.max_index = std::max(host_bindings.max_index, index);
host_bindings.min_index = (std::min)(host_bindings.min_index, index);
host_bindings.max_index = (std::max)(host_bindings.max_index, index);
any_valid = true;
}
@ -789,7 +789,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
bool needs_bind) {
const Binding& binding = channel_state->uniform_buffers[stage][index];
const DAddr device_addr = binding.device_addr;
const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
const u32 size = (std::min)(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
@ -956,7 +956,7 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size =
std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
(std::min)(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
SynchronizeBuffer(buffer, binding.device_addr, size);
const u32 offset = buffer.Offset(binding.device_addr);
@ -1090,7 +1090,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
const u32 draw_size =
(index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes();
const u32 size = std::min(address_size, draw_size);
const u32 size = (std::min)(address_size, draw_size);
if (size == 0 || !device_addr) {
channel_state->index_buffer = NULL_BINDING;
return;
@ -1459,7 +1459,7 @@ bool BufferCache<P>::SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 si
.size = range_size,
});
total_size_bytes += range_size;
largest_copy = std::max(largest_copy, range_size);
largest_copy = (std::max)(largest_copy, range_size);
});
if (total_size_bytes == 0) {
return true;
@ -1594,7 +1594,7 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64
constexpr u64 align = 64ULL;
constexpr u64 mask = ~(align - 1ULL);
total_size_bytes += (new_size + align - 1) & mask;
largest_copy = std::max(largest_copy, new_size);
largest_copy = (std::max)(largest_copy, new_size);
};
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size, add_download);
@ -1715,7 +1715,7 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
return std::min(memory_layout_size, static_cast<u32>(8_MiB));
return (std::min)(memory_layout_size, static_cast<u32>(8_MiB));
}();
// Alignment only applies to the offset of the buffer
const u32 alignment = runtime.GetStorageBufferAlignment();

View file

@ -230,7 +230,7 @@ private:
std::size_t remaining_size{size};
std::size_t page_index{cpu_address >> HIGHER_PAGE_BITS};
u64 page_offset{cpu_address & HIGHER_PAGE_MASK};
u64 begin = std::numeric_limits<u64>::max();
u64 begin = (std::numeric_limits<u64>::max)();
u64 end = 0;
while (remaining_size > 0) {
const std::size_t copy_amount{
@ -240,8 +240,8 @@ private:
auto [new_begin, new_end] = func(manager, page_offset, copy_amount);
if (new_begin != 0 || new_end != 0) {
const u64 base_address = page_index << HIGHER_PAGE_BITS;
begin = std::min(new_begin + base_address, begin);
end = std::max(new_end + base_address, end);
begin = (std::min)(new_begin + base_address, begin);
end = (std::max)(new_end + base_address, end);
}
};
if (manager) {

View file

@ -181,7 +181,7 @@ public:
static u64 ExtractBits(u64 word, size_t page_start, size_t page_end) {
constexpr size_t number_bits = sizeof(u64) * 8;
const size_t limit_page_end = number_bits - std::min(page_end, number_bits);
const size_t limit_page_end = number_bits - (std::min)(page_end, number_bits);
u64 bits = (word >> page_start) << page_start;
bits = (bits << limit_page_end) >> limit_page_end;
return bits;
@ -206,11 +206,11 @@ public:
auto [start_word, start_page] = GetWordPage(start);
auto [end_word, end_page] = GetWordPage(end + BYTES_PER_PAGE - 1ULL);
const size_t num_words = NumWords();
start_word = std::min(start_word, num_words);
end_word = std::min(end_word, num_words);
start_word = (std::min)(start_word, num_words);
end_word = (std::min)(end_word, num_words);
const size_t diff = end_word - start_word;
end_word += (end_page + PAGES_PER_WORD - 1ULL) / PAGES_PER_WORD;
end_word = std::min(end_word, num_words);
end_word = (std::min)(end_word, num_words);
end_page += diff * PAGES_PER_WORD;
constexpr u64 base_mask{~0ULL};
for (size_t word_index = start_word; word_index < end_word; word_index++) {
@ -382,7 +382,7 @@ public:
const std::span<const u64> state_words = words.template Span<type>();
[[maybe_unused]] const std::span<const u64> untracked_words =
words.template Span<Type::Untracked>();
u64 begin = std::numeric_limits<u64>::max();
u64 begin = (std::numeric_limits<u64>::max)();
u64 end = 0;
IterateWords(offset, size, [&](size_t index, u64 mask) {
if constexpr (type == Type::GPU) {
@ -395,7 +395,7 @@ public:
const u64 local_page_begin = std::countr_zero(word);
const u64 local_page_end = PAGES_PER_WORD - std::countl_zero(word);
const u64 page_index = index * PAGES_PER_WORD;
begin = std::min(begin, page_index + local_page_begin);
begin = (std::min)(begin, page_index + local_page_begin);
end = page_index + local_page_end;
});
static constexpr std::pair<u64, u64> EMPTY{0, 0};

View file

@ -73,7 +73,7 @@ public:
}
protected:
static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
static constexpr size_t UNSET_CHANNEL{(std::numeric_limits<size_t>::max)()};
P* channel_state;
size_t current_channel_id{UNSET_CHANNEL};

View file

@ -37,7 +37,7 @@ public:
ConsumeSinkImpl();
}
std::bitset<std::numeric_limits<u16>::max()> execution_mask{};
std::bitset<(std::numeric_limits<u16>::max)()> execution_mask{};
std::vector<std::pair<u32, u32>> method_sink{};
bool current_dirty{};
GPUVAddr current_dma_segment;

View file

@ -30,7 +30,7 @@ void State::ProcessExec(const bool is_linear_) {
}
void State::ProcessData(const u32 data, const bool is_last_call) {
const u32 sub_copy_size = std::min(4U, copy_size - write_offset);
const u32 sub_copy_size = (std::min)(4U, copy_size - write_offset);
std::memcpy(&inner_buffer[write_offset], &data, sub_copy_size);
write_offset += sub_copy_size;
if (!is_last_call) {
@ -58,7 +58,7 @@ void State::ProcessData(std::span<const u8> read_buffer) {
u32 x_elements = regs.line_length_in;
u32 x_offset = regs.dest.x;
const u32 bpp_shift = Common::FoldRight(
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
4U, [](u32 x, u32 y) { return (std::min)(x, static_cast<u32>(std::countr_zero(y))); },
width, x_elements, x_offset, static_cast<u32>(address));
width >>= bpp_shift;
x_elements >>= bpp_shift;

View file

@ -245,15 +245,15 @@ u32 Maxwell3D::GetMaxCurrentVertices() {
}
const auto& attribute = regs.vertex_attrib_format[index];
if (attribute.constant) {
num_vertices = std::max(num_vertices, 1U);
num_vertices = (std::max)(num_vertices, 1U);
continue;
}
const auto& limit = regs.vertex_stream_limits[index];
const GPUVAddr gpu_addr_begin = array.Address();
const GPUVAddr gpu_addr_end = limit.Address() + 1;
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
num_vertices = std::max(
num_vertices, address_size / std::max(attribute.SizeInBytes(), array.stride.Value()));
num_vertices = (std::max)(
num_vertices, address_size / (std::max)(attribute.SizeInBytes(), array.stride.Value()));
break;
}
return num_vertices;
@ -262,9 +262,9 @@ u32 Maxwell3D::GetMaxCurrentVertices() {
size_t Maxwell3D::EstimateIndexBufferSize() {
GPUVAddr start_address = regs.index_buffer.StartAddress();
GPUVAddr end_address = regs.index_buffer.EndAddress();
static constexpr std::array<size_t, 3> max_sizes = {std::numeric_limits<u8>::max(),
std::numeric_limits<u16>::max(),
std::numeric_limits<u32>::max()};
static constexpr std::array<size_t, 3> max_sizes = {(std::numeric_limits<u8>::max)(),
(std::numeric_limits<u16>::max)(),
(std::numeric_limits<u32>::max)()};
const size_t byte_size = regs.index_buffer.FormatSizeInBytes();
const size_t log2_byte_size = Common::Log2Ceil64(byte_size);
const size_t cap{GetMaxCurrentVertices() * 4 * byte_size};

View file

@ -1180,11 +1180,11 @@ public:
}
f32 GetX() const {
return std::max(0.0f, translate_x - std::fabs(scale_x));
return (std::max)(0.0f, translate_x - std::fabs(scale_x));
}
f32 GetY() const {
return std::max(0.0f, translate_y - std::fabs(scale_y));
return (std::max)(0.0f, translate_y - std::fabs(scale_y));
}
f32 GetWidth() const {
@ -3091,7 +3091,7 @@ public:
}
struct DirtyState {
using Flags = std::bitset<std::numeric_limits<u8>::max()>;
using Flags = std::bitset<(std::numeric_limits<u8>::max)()>;
using Table = std::array<u8, Regs::NUM_REGS>;
using Tables = std::array<Table, 2>;

View file

@ -198,7 +198,7 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
u32 bpp_shift = 0U;
if (!is_remapping) {
bpp_shift = Common::FoldRight(
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
4U, [](u32 x, u32 y) { return (std::min)(x, static_cast<u32>(std::countr_zero(y))); },
width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
width >>= bpp_shift;
x_elements >>= bpp_shift;
@ -261,7 +261,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
u32 bpp_shift = 0U;
if (!is_remapping) {
bpp_shift = Common::FoldRight(
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
4U, [](u32 x, u32 y) { return (std::min)(x, static_cast<u32>(std::countr_zero(y))); },
width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
width >>= bpp_shift;
x_elements >>= bpp_shift;
@ -312,7 +312,7 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
u32 bpp_shift = 0U;
if (!is_remapping) {
bpp_shift = Common::FoldRight(
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
4U, [](u32 x, u32 y) { return (std::min)(x, static_cast<u32>(std::countr_zero(y))); },
src_width, dst_width, x_elements, src_x_offset, dst_x_offset,
static_cast<u32>(regs.offset_in), static_cast<u32>(regs.offset_out));
src_width >>= bpp_shift;

View file

@ -771,7 +771,7 @@ private:
};
const auto to_fp_n = [](f32 base_value, size_t bits, size_t mantissa) {
constexpr size_t fp32_mantissa_bits = 23;
u32 tmp_value = Common::BitCast<u32>(std::max(base_value, 0.0f));
u32 tmp_value = Common::BitCast<u32>((std::max)(base_value, 0.0f));
size_t shift_towards = fp32_mantissa_bits - mantissa;
return tmp_value >> shift_towards;
};

View file

@ -117,7 +117,7 @@ std::span<const u8> H264::ComposeFrame() {
(current_context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
u32 max_num_ref_frames =
std::max(std::max(current_context.h264_parameter_set.num_refidx_l0_default_active,
(std::max)((std::max)(current_context.h264_parameter_set.num_refidx_l0_default_active,
current_context.h264_parameter_set.num_refidx_l1_default_active) +
1,
4);

View file

@ -228,10 +228,10 @@ constexpr std::array<u8, 254> map_lut{
std::size_t index{};
if (old_prob * 2 <= 0xff) {
index = static_cast<std::size_t>(std::max(0, RecenterNonNeg(new_prob, old_prob) - 1));
index = static_cast<std::size_t>((std::max)(0, RecenterNonNeg(new_prob, old_prob) - 1));
} else {
index = static_cast<std::size_t>(
std::max(0, RecenterNonNeg(0xff - 1 - new_prob, 0xff - 1 - old_prob) - 1));
(std::max)(0, RecenterNonNeg(0xff - 1 - new_prob, 0xff - 1 - old_prob) - 1));
}
return static_cast<s32>(map_lut[index]);

View file

@ -201,8 +201,8 @@ void Vic::ReadProgressiveY8__V8U8_N420(const SlotStruct& slot,
slot_surface.resize_destructive(out_luma_width * out_luma_height);
const auto in_luma_width{std::min(frame->GetWidth(), static_cast<s32>(out_luma_width))};
const auto in_luma_height{std::min(frame->GetHeight(), static_cast<s32>(out_luma_height))};
const auto in_luma_width{(std::min)(frame->GetWidth(), static_cast<s32>(out_luma_width))};
const auto in_luma_height{(std::min)(frame->GetHeight(), static_cast<s32>(out_luma_height))};
const auto in_luma_stride{frame->GetStride(0)};
const auto in_chroma_stride{frame->GetStride(1)};
@ -425,9 +425,9 @@ void Vic::ReadInterlacedY8__V8U8_N420(const SlotStruct& slot, std::span<const Pl
slot_surface.resize_destructive(out_luma_width * out_luma_height);
const auto in_luma_width{std::min(frame->GetWidth(), static_cast<s32>(out_luma_width))};
const auto in_luma_width{(std::min)(frame->GetWidth(), static_cast<s32>(out_luma_width))};
[[maybe_unused]] const auto in_luma_height{
std::min(frame->GetHeight(), static_cast<s32>(out_luma_height))};
(std::min)(frame->GetHeight(), static_cast<s32>(out_luma_height))};
const auto in_luma_stride{frame->GetStride(0)};
[[maybe_unused]] const auto in_chroma_width{(frame->GetWidth() + 1) / 2};
@ -543,15 +543,15 @@ void Vic::Blend(const ConfigStruct& config, const SlotStruct& slot) {
auto rect_top{add_one(config.output_config.target_rect_top.Value())};
auto rect_bottom{add_one(config.output_config.target_rect_bottom.Value())};
rect_left = std::max(rect_left, dest_left);
rect_right = std::min(rect_right, dest_right);
rect_top = std::max(rect_top, dest_top);
rect_bottom = std::min(rect_bottom, dest_bottom);
rect_left = (std::max)(rect_left, dest_left);
rect_right = (std::min)(rect_right, dest_right);
rect_top = (std::max)(rect_top, dest_top);
rect_bottom = (std::min)(rect_bottom, dest_bottom);
source_left = std::max(source_left, rect_left);
source_right = std::min(source_right, rect_right);
source_top = std::max(source_top, rect_top);
source_bottom = std::min(source_bottom, rect_bottom);
source_left = (std::max)(source_left, rect_left);
source_right = (std::min)(source_right, rect_right);
source_top = (std::max)(source_top, rect_top);
source_bottom = (std::min)(source_bottom, rect_bottom);
if (source_left >= source_right || source_top >= source_bottom) {
return;
@ -562,14 +562,14 @@ void Vic::Blend(const ConfigStruct& config, const SlotStruct& slot) {
1};
const auto in_surface_width{slot.surface_config.slot_surface_width + 1};
source_bottom = std::min(source_bottom, out_surface_height);
source_right = std::min(source_right, out_surface_width);
source_bottom = (std::min)(source_bottom, out_surface_height);
source_right = (std::min)(source_right, out_surface_width);
// TODO Alpha blending. No games I've seen use more than a single surface or supply an alpha
// below max, so it's ignored for now.
if (!slot.color_matrix.matrix_enable) {
const auto copy_width = std::min(source_right - source_left, rect_right - rect_left);
const auto copy_width = (std::min)(source_right - source_left, rect_right - rect_left);
for (u32 y = source_top; y < source_bottom; y++) {
const auto dst_line = y * out_surface_width;
@ -818,8 +818,8 @@ void Vic::WriteY8__V8U8_N420(const OutputSurfaceConfig& output_surface_config) {
const auto out_chroma_stride = Common::AlignUp(out_chroma_width * BytesPerPixel * 2, 0x10);
const auto out_chroma_size = out_chroma_height * out_chroma_stride;
surface_width = std::min(surface_width, out_luma_width);
surface_height = std::min(surface_height, out_luma_height);
surface_width = (std::min)(surface_width, out_luma_width);
surface_height = (std::min)(surface_height, out_luma_height);
[[maybe_unused]] auto DecodeLinear = [&](std::span<u8> out_luma, std::span<u8> out_chroma) {
for (u32 y = 0; y < surface_height; ++y) {
@ -1089,8 +1089,8 @@ void Vic::WriteABGR(const OutputSurfaceConfig& output_surface_config) {
const auto out_luma_stride = Common ::AlignUp(out_luma_width * BytesPerPixel, 0x10);
const auto out_luma_size = out_luma_height * out_luma_stride;
surface_width = std::min(surface_width, out_luma_width);
surface_height = std::min(surface_height, out_luma_height);
surface_width = (std::min)(surface_width, out_luma_width);
surface_height = (std::min)(surface_height, out_luma_height);
[[maybe_unused]] auto DecodeLinear = [&](std::span<u8> out_buffer) {
for (u32 y = 0; y < surface_height; y++) {

View file

@ -301,7 +301,7 @@ private:
const u32 indirect_words = 5 + padding;
const std::size_t first_draw = start_indirect;
const std::size_t effective_draws = end_indirect - start_indirect;
const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws);
const std::size_t last_draw = start_indirect + (std::min)(effective_draws, max_draws);
for (std::size_t index = first_draw; index < last_draw; index++) {
const std::size_t base = index * indirect_words + 5;

View file

@ -293,7 +293,7 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
return memory.GetPointer<u8>(*address);
}
#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
#if defined(_MSC_VER) && !defined(__clang__) // no need for gcc / clang but msvc's compiler is more conservative with inlining.
#pragma inline_recursion(on)
#endif
@ -329,7 +329,7 @@ inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t si
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
(std::min)(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
auto entry = GetEntry<is_big_pages>(current_address);
if (entry == EntryType::Mapped) [[likely]] {
if constexpr (BOOL_BREAK_MAPPED) {

View file

@ -152,7 +152,7 @@ public:
PTEKind GetPageKind(GPUVAddr gpu_addr) const;
size_t GetMemoryLayoutSize(GPUVAddr gpu_addr,
size_t max_size = std::numeric_limits<size_t>::max()) const;
size_t max_size = (std::numeric_limits<size_t>::max)()) const;
void FlushCaching();

View file

@ -45,8 +45,8 @@ void BlitImageHelper::BlitColor(GLuint dst_framebuffer, GLuint src_image_view, G
static_cast<float>(src_region.start.x) / static_cast<float>(src_size.width),
static_cast<float>(src_region.start.y) /
static_cast<float>(src_size.height));
glViewport(std::min(dst_region.start.x, dst_region.end.x),
std::min(dst_region.start.y, dst_region.end.y),
glViewport((std::min)(dst_region.start.x, dst_region.end.x),
(std::min)(dst_region.start.y, dst_region.end.y),
std::abs(dst_region.end.x - dst_region.start.x),
std::abs(dst_region.end.y - dst_region.start.y));
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, dst_framebuffer);

View file

@ -248,7 +248,7 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
std::ranges::transform(bindings.strides, buffer_strides.begin(),
[](u64 stride) { return static_cast<GLsizei>(stride); });
const u32 count =
std::min(static_cast<u32>(bindings.buffers.size()), max_attributes - bindings.min_index);
(std::min)(static_cast<u32>(bindings.buffers.size()), max_attributes - bindings.min_index);
if (has_unified_vertex_buffers) {
for (u32 index = 0; index < count; ++index) {
Buffer& buffer = *bindings.buffers[index];

View file

@ -59,7 +59,7 @@ class BufferCacheRuntime {
friend Buffer;
public:
static constexpr u8 INVALID_BINDING = std::numeric_limits<u8>::max();
static constexpr u8 INVALID_BINDING = (std::numeric_limits<u8>::max)();
explicit BufferCacheRuntime(const Device& device_, StagingBufferPool& staging_buffer_pool_);

View file

@ -1266,7 +1266,7 @@ void RasterizerOpenGL::SyncPointState() {
oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.point_size_attribute.enabled);
const bool is_rescaling{texture_cache.IsRescaling()};
const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale));
glPointSize((std::max)(1.0f, maxwell3d->regs.point_size * scale));
}
void RasterizerOpenGL::SyncLineState() {

View file

@ -617,7 +617,7 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
}
std::unique_ptr<ShaderWorker> ShaderCache::CreateWorkers() const {
return std::make_unique<ShaderWorker>(std::max(std::thread::hardware_concurrency(), 2U) - 1,
return std::make_unique<ShaderWorker>((std::max)(std::thread::hardware_concurrency(), 2U) - 1,
"GlShaderBuilder",
[this] { return Context{emu_window}; });
}

View file

@ -68,7 +68,7 @@ size_t StagingBuffers::RequestBuffer(size_t requested_size) {
std::optional<size_t> StagingBuffers::FindBuffer(size_t requested_size) {
size_t known_unsignaled_index = current_sync_index + 1;
size_t smallest_buffer = std::numeric_limits<size_t>::max();
size_t smallest_buffer = (std::numeric_limits<size_t>::max)();
std::optional<size_t> found;
const size_t num_buffers = allocs.size();
for (size_t index = 0; index < num_buffers; ++index) {
@ -88,7 +88,7 @@ std::optional<size_t> StagingBuffers::FindBuffer(size_t requested_size) {
if (!alloc.sync.IsSignaled()) {
// Since this fence hasn't been signaled, it's safe to assume all later
// fences haven't been signaled either
known_unsignaled_index = std::min(known_unsignaled_index, alloc.sync_index);
known_unsignaled_index = (std::min)(known_unsignaled_index, alloc.sync_index);
continue;
}
alloc.sync.Release();
@ -120,7 +120,7 @@ std::pair<std::span<u8>, size_t> StreamBuffer::Request(size_t size) noexcept {
used_iterator = iterator;
for (size_t region = Region(free_iterator) + 1,
region_end = std::min(Region(iterator + size) + 1, NUM_SYNCS);
region_end = (std::min)(Region(iterator + size) + 1, NUM_SYNCS);
region < region_end; ++region) {
glClientWaitSync(fences[region].handle, 0, GL_TIMEOUT_IGNORED);
fences[region].Release();

View file

@ -79,7 +79,7 @@ enum : u8 {
Last
};
static_assert(Last <= std::numeric_limits<u8>::max());
static_assert(Last <= (std::numeric_limits<u8>::max)());
} // namespace Dirty

View file

@ -717,7 +717,7 @@ Image::Image(TextureCacheRuntime& runtime_, const VideoCommon::ImageInfo& info_,
gl_type = tuple.type;
}
const int max_host_mip_levels = std::bit_width(info.size.width);
gl_num_levels = std::min(info.resources.levels, max_host_mip_levels);
gl_num_levels = (std::min)(info.resources.levels, max_host_mip_levels);
texture = MakeImage(info, gl_internal_format, gl_num_levels);
current_texture = texture.handle;
if (runtime->device.HasDebuggingToolAttached()) {
@ -742,8 +742,8 @@ void Image::UploadMemory(GLuint buffer_handle, size_t buffer_offset,
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
u32 current_row_length = std::numeric_limits<u32>::max();
u32 current_image_height = std::numeric_limits<u32>::max();
u32 current_row_length = (std::numeric_limits<u32>::max)();
u32 current_image_height = (std::numeric_limits<u32>::max)();
for (const VideoCommon::BufferImageCopy& copy : copies) {
if (copy.image_subresource.base_level >= gl_num_levels) {
@ -788,8 +788,8 @@ void Image::DownloadMemory(std::span<GLuint> buffer_handles, std::span<size_t> b
glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer_handle);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
u32 current_row_length = std::numeric_limits<u32>::max();
u32 current_image_height = std::numeric_limits<u32>::max();
u32 current_row_length = (std::numeric_limits<u32>::max)();
u32 current_image_height = (std::numeric_limits<u32>::max)();
for (const VideoCommon::BufferImageCopy& copy : copies) {
if (copy.image_subresource.base_level >= gl_num_levels) {
@ -1033,10 +1033,10 @@ void Image::Scale(bool up_scale) {
const GLuint draw_fbo = runtime->rescale_draw_fbos[fbo_index].handle;
for (s32 layer = 0; layer < info.resources.layers; ++layer) {
for (s32 level = 0; level < info.resources.levels; ++level) {
const u32 src_level_width = std::max(1u, src_width >> level);
const u32 src_level_height = std::max(1u, src_height >> level);
const u32 dst_level_width = std::max(1u, dst_width >> level);
const u32 dst_level_height = std::max(1u, dst_height >> level);
const u32 src_level_width = (std::max)(1u, src_width >> level);
const u32 src_level_height = (std::max)(1u, src_height >> level);
const u32 dst_level_width = (std::max)(1u, dst_width >> level);
const u32 dst_level_height = (std::max)(1u, dst_height >> level);
glNamedFramebufferTextureLayer(read_fbo, attachment, src_handle, level, layer);
glNamedFramebufferTextureLayer(draw_fbo, attachment, dst_handle, level, layer);

View file

@ -340,8 +340,8 @@ void UpdateTwoTexturesDescriptorSet(const Device& device, VkDescriptorSet descri
void BindBlitState(vk::CommandBuffer cmdbuf, const Region2D& dst_region) {
const VkOffset2D offset{
.x = std::min(dst_region.start.x, dst_region.end.x),
.y = std::min(dst_region.start.y, dst_region.end.y),
.x = (std::min)(dst_region.start.x, dst_region.end.x),
.y = (std::min)(dst_region.start.y, dst_region.end.y),
};
const VkExtent2D extent{
.width = static_cast<u32>(std::abs(dst_region.end.x - dst_region.start.x)),

View file

@ -573,8 +573,8 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
buffer_handles.push_back(handle);
}
const u32 device_max = device.GetMaxVertexInputBindings();
const u32 min_binding = std::min(bindings.min_index, device_max);
const u32 max_binding = std::min(bindings.max_index, device_max);
const u32 min_binding = (std::min)(bindings.min_index, device_max);
const u32 max_binding = (std::min)(bindings.max_index, device_max);
const u32 binding_count = max_binding - min_binding;
if (binding_count == 0) {
return;

View file

@ -562,7 +562,7 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
if (!key.state.dynamic_vertex_input) {
const size_t num_vertex_arrays = std::min(
const size_t num_vertex_arrays = (std::min)(
Maxwell::NumVertexArrays, static_cast<size_t>(device.GetMaxVertexInputBindings()));
for (size_t index = 0; index < num_vertex_arrays; ++index) {
const bool instanced = key.state.binding_divisors[index] != 0;

View file

@ -86,8 +86,8 @@ bool CanBlitToSwapchain(const vk::PhysicalDevice& physical_device, VkFormat form
},
.extent =
{
.width = std::min(frame_width, swapchain_width),
.height = std::min(frame_height, swapchain_height),
.width = (std::min)(frame_width, swapchain_width),
.height = (std::min)(frame_height, swapchain_height),
.depth = 1,
},
};

View file

@ -202,8 +202,8 @@ public:
});
rasterizer->SyncOperation(std::move(func));
accumulation_since_last_sync = false;
first_accumulation_checkpoint = std::min(first_accumulation_checkpoint, num_slots_used);
last_accumulation_checkpoint = std::max(last_accumulation_checkpoint, num_slots_used);
first_accumulation_checkpoint = (std::min)(first_accumulation_checkpoint, num_slots_used);
last_accumulation_checkpoint = (std::max)(last_accumulation_checkpoint, num_slots_used);
}
void CloseCounter() override {
@ -311,9 +311,9 @@ public:
if (has_multi_queries) {
const size_t min_accumulation_limit =
std::min(first_accumulation_checkpoint, num_slots_used);
(std::min)(first_accumulation_checkpoint, num_slots_used);
const size_t max_accumulation_limit =
std::max(last_accumulation_checkpoint, num_slots_used);
(std::max)(last_accumulation_checkpoint, num_slots_used);
const size_t intermediary_buffer_index = ObtainBuffer<false>(num_slots_used);
resolve_buffers.push_back(intermediary_buffer_index);
queries_prefix_scan_pass->Run(*accumulation_buffer, *buffers[intermediary_buffer_index],
@ -332,7 +332,7 @@ public:
rasterizer->SyncOperation(std::move(func));
AbandonCurrentQuery();
num_slots_used = 0;
first_accumulation_checkpoint = std::numeric_limits<size_t>::max();
first_accumulation_checkpoint = (std::numeric_limits<size_t>::max)();
last_accumulation_checkpoint = 0;
accumulation_since_last_sync = has_multi_queries;
pending_sync.clear();
@ -414,7 +414,7 @@ private:
size_t start_slot = query->start_slot;
for (size_t i = 0; i < banks_set; i++) {
auto& the_bank = bank_pool.GetBank(bank_id);
size_t amount = std::min(the_bank.Size() - start_slot, size_slots);
size_t amount = (std::min)(the_bank.Size() - start_slot, size_slots);
func(&the_bank, start_slot, amount);
bank_id = the_bank.next_bank - 1;
start_slot = 0;
@ -431,11 +431,11 @@ private:
auto* query = GetQuery(q);
ApplyBankOp(query, [&indexer](SamplesQueryBank* bank, size_t start, size_t amount) {
auto id_ = bank->GetIndex();
auto pair = indexer.try_emplace(id_, std::numeric_limits<size_t>::max(),
std::numeric_limits<size_t>::min());
auto pair = indexer.try_emplace(id_, (std::numeric_limits<size_t>::max)(),
(std::numeric_limits<size_t>::min)());
auto& current_pair = pair.first->second;
current_pair.first = std::min(current_pair.first, start);
current_pair.second = std::max(current_pair.second, amount + start);
current_pair.first = (std::min)(current_pair.first, start);
current_pair.second = (std::max)(current_pair.second, amount + start);
});
}
for (auto& cont : indexer) {

View file

@ -131,8 +131,8 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3
s32 max_y = lower_left ? (clip_height - src.min_y) : src.max_y.Value();
// Bound to render area
min_y = std::max(min_y, 0);
max_y = std::max(max_y, 0);
min_y = (std::max)(min_y, 0);
max_y = (std::max)(max_y, 0);
if (src.enable) {
scissor.offset.x = scale_up(src.min_x);
@ -142,8 +142,8 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3
} else {
scissor.offset.x = 0;
scissor.offset.y = 0;
scissor.extent.width = std::numeric_limits<s32>::max();
scissor.extent.height = std::numeric_limits<s32>::max();
scissor.extent.width = (std::numeric_limits<s32>::max)();
scissor.extent.height = (std::numeric_limits<s32>::max)();
}
return scissor;
}
@ -380,8 +380,8 @@ void RasterizerVulkan::Clear(u32 layer_count) {
VkRect2D default_scissor;
default_scissor.offset.x = 0;
default_scissor.offset.y = 0;
default_scissor.extent.width = std::numeric_limits<s32>::max();
default_scissor.extent.height = std::numeric_limits<s32>::max();
default_scissor.extent.width = (std::numeric_limits<s32>::max)();
default_scissor.extent.height = (std::numeric_limits<s32>::max)();
VkClearRect clear_rect{
.rect = regs.clear_control.use_scissor ? GetScissorState(regs, 0, up_scale, down_shift)
@ -393,8 +393,8 @@ void RasterizerVulkan::Clear(u32 layer_count) {
return;
}
clear_rect.rect.extent = VkExtent2D{
.width = std::min(clear_rect.rect.extent.width, render_area.width),
.height = std::min(clear_rect.rect.extent.height, render_area.height),
.width = (std::min)(clear_rect.rect.extent.width, render_area.width),
.height = (std::min)(clear_rect.rect.extent.height, render_area.height),
};
const u32 color_attachment = regs.clear_surface.RT;

View file

@ -31,7 +31,7 @@ size_t GetStreamBufferSize(const Device& device) {
VkDeviceSize size{0};
if (device.HasDebuggingToolAttached()) {
ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) {
size = std::max(size, heap.size);
size = (std::max)(size, heap.size);
});
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue
@ -42,7 +42,7 @@ size_t GetStreamBufferSize(const Device& device) {
} else {
size = MAX_STREAM_BUFFER_SIZE;
}
return std::min(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
return (std::min)(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
}
} // Anonymous namespace
@ -104,7 +104,7 @@ void StagingBufferPool::TickFrame() {
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
if (AreRegionsActive(Region(free_iterator) + 1,
std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
(std::min)(Region(iterator + size) + 1, NUM_SYNCS))) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
@ -112,7 +112,7 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
current_tick);
used_iterator = iterator;
free_iterator = std::max(free_iterator, iterator + size);
free_iterator = (std::max)(free_iterator, iterator + size);
if (iterator + size >= stream_buffer_size) {
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
@ -170,7 +170,7 @@ std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t s
}
}
cache_level.iterate_index = std::distance(entries.begin(), it) + 1;
it->tick = deferred ? std::numeric_limits<u64>::max() : scheduler.CurrentTick();
it->tick = deferred ? (std::numeric_limits<u64>::max)() : scheduler.CurrentTick();
ASSERT(!it->deferred);
it->deferred = deferred;
return it->Ref();
@ -206,7 +206,7 @@ StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage
.usage = usage,
.log2_level = log2,
.index = unique_ids++,
.tick = deferred ? std::numeric_limits<u64>::max() : scheduler.CurrentTick(),
.tick = deferred ? (std::numeric_limits<u64>::max)() : scheduler.CurrentTick(),
.deferred = deferred,
});
return entry.Ref();
@ -240,7 +240,7 @@ void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
return scheduler.IsFree(entry.tick);
};
const size_t begin_offset = staging.delete_index;
const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
const size_t end_offset = (std::min)(begin_offset + deletions_per_tick, old_size);
const auto begin = entries.begin() + begin_offset;
const auto end = entries.begin() + end_offset;
entries.erase(std::remove_if(begin, end, is_deletable), end);

View file

@ -70,7 +70,7 @@ enum : u8 {
Last,
};
static_assert(Last <= std::numeric_limits<u8>::max());
static_assert(Last <= (std::numeric_limits<u8>::max)());
} // namespace Dirty

View file

@ -79,15 +79,15 @@ static VkPresentModeKHR ChooseSwapPresentMode(bool has_imm, bool has_mailbox,
}
VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height) {
constexpr auto undefined_size{std::numeric_limits<u32>::max()};
constexpr auto undefined_size{(std::numeric_limits<u32>::max)()};
if (capabilities.currentExtent.width != undefined_size) {
return capabilities.currentExtent;
}
VkExtent2D extent;
extent.width = std::max(capabilities.minImageExtent.width,
std::min(capabilities.maxImageExtent.width, width));
extent.height = std::max(capabilities.minImageExtent.height,
std::min(capabilities.maxImageExtent.height, height));
extent.width = (std::max)(capabilities.minImageExtent.width,
(std::min)(capabilities.maxImageExtent.width, width));
extent.height = (std::max)(capabilities.minImageExtent.height,
(std::min)(capabilities.maxImageExtent.height, height));
return extent;
}
@ -172,7 +172,7 @@ void Swapchain::Create(
bool Swapchain::AcquireNextImage() {
const VkResult result = device.GetLogical().AcquireNextImageKHR(
*swapchain, std::numeric_limits<u64>::max(), *present_semaphores[frame_index],
*swapchain, (std::numeric_limits<u64>::max)(), *present_semaphores[frame_index],
VK_NULL_HANDLE, &image_index);
switch (result) {
case VK_SUCCESS:
@ -261,10 +261,10 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
requested_image_count = capabilities.maxImageCount;
} else {
requested_image_count =
std::max(requested_image_count, std::min(3U, capabilities.maxImageCount));
(std::max)(requested_image_count, (std::min)(3U, capabilities.maxImageCount));
}
} else {
requested_image_count = std::max(requested_image_count, 3U);
requested_image_count = (std::max)(requested_image_count, 3U);
}
VkSwapchainCreateInfoKHR swapchain_ci{
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,

View file

@ -509,16 +509,16 @@ TransformBufferCopies(std::span<const VideoCommon::BufferCopy> copies, size_t bu
}
}
struct RangedBarrierRange {
u32 min_mip = std::numeric_limits<u32>::max();
u32 max_mip = std::numeric_limits<u32>::min();
u32 min_layer = std::numeric_limits<u32>::max();
u32 max_layer = std::numeric_limits<u32>::min();
u32 min_mip = (std::numeric_limits<u32>::max)();
u32 max_mip = (std::numeric_limits<u32>::min)();
u32 min_layer = (std::numeric_limits<u32>::max)();
u32 max_layer = (std::numeric_limits<u32>::min)();
void AddLayers(const VkImageSubresourceLayers& layers) {
min_mip = std::min(min_mip, layers.mipLevel);
max_mip = std::max(max_mip, layers.mipLevel + 1);
min_layer = std::min(min_layer, layers.baseArrayLayer);
max_layer = std::max(max_layer, layers.baseArrayLayer + layers.layerCount);
min_mip = (std::min)(min_mip, layers.mipLevel);
max_mip = (std::max)(max_mip, layers.mipLevel + 1);
min_layer = (std::min)(min_layer, layers.baseArrayLayer);
max_layer = (std::max)(max_layer, layers.baseArrayLayer + layers.layerCount);
}
VkImageSubresourceRange SubresourceRange(VkImageAspectFlags aspect_mask) const noexcept {
@ -747,8 +747,8 @@ void BlitScale(Scheduler& scheduler, VkImage src_image, VkImage dst_image, const
.z = 0,
},
{
.x = std::max(1, src_size.x >> level),
.y = std::max(1, src_size.y >> level),
.x = (std::max)(1, src_size.x >> level),
.y = (std::max)(1, src_size.y >> level),
.z = 1,
},
},
@ -765,8 +765,8 @@ void BlitScale(Scheduler& scheduler, VkImage src_image, VkImage dst_image, const
.z = 0,
},
{
.x = std::max(1, dst_size.x >> level),
.y = std::max(1, dst_size.y >> level),
.x = (std::max)(1, dst_size.x >> level),
.y = (std::max)(1, dst_size.y >> level),
.z = 1,
},
},
@ -1956,8 +1956,8 @@ bool Image::BlitScaleHelper(bool scale_up) {
.end = {static_cast<s32>(dst_width), static_cast<s32>(dst_height)},
};
const VkExtent2D extent{
.width = std::max(scaled_width, info.size.width),
.height = std::max(scaled_height, info.size.height),
.width = (std::max)(scaled_width, info.size.width),
.height = (std::max)(scaled_height, info.size.height),
};
auto* view_ptr = blit_view.get();
@ -2310,21 +2310,21 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
is_rescaled = is_rescaled_;
const auto& resolution = runtime.resolution;
u32 width = std::numeric_limits<u32>::max();
u32 height = std::numeric_limits<u32>::max();
u32 width = (std::numeric_limits<u32>::max)();
u32 height = (std::numeric_limits<u32>::max)();
for (size_t index = 0; index < NUM_RT; ++index) {
const ImageView* const color_buffer = color_buffers[index];
if (!color_buffer) {
renderpass_key.color_formats[index] = PixelFormat::Invalid;
continue;
}
width = std::min(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
width = (std::min)(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
: color_buffer->size.width);
height = std::min(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
height = (std::min)(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
: color_buffer->size.height);
attachments.push_back(color_buffer->RenderTarget());
renderpass_key.color_formats[index] = color_buffer->format;
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
num_layers = (std::max)(num_layers, color_buffer->range.extent.layers);
images[num_images] = color_buffer->ImageHandle();
image_ranges[num_images] = MakeSubresourceRange(color_buffer);
rt_map[index] = num_images;
@ -2333,13 +2333,13 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
}
const size_t num_colors = attachments.size();
if (depth_buffer) {
width = std::min(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
width = (std::min)(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
: depth_buffer->size.width);
height = std::min(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
height = (std::min)(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
: depth_buffer->size.height);
attachments.push_back(depth_buffer->RenderTarget());
renderpass_key.depth_format = depth_buffer->format;
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
num_layers = (std::max)(num_layers, depth_buffer->range.extent.layers);
images[num_images] = depth_buffer->ImageHandle();
const VkImageSubresourceRange subresource_range = MakeSubresourceRange(depth_buffer);
image_ranges[num_images] = subresource_range;
@ -2353,8 +2353,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
renderpass_key.samples = samples;
renderpass = runtime.render_pass_cache.Get(renderpass_key);
render_area.width = std::min(render_area.width, width);
render_area.height = std::min(render_area.height, height);
render_area.width = (std::min)(render_area.width, width);
render_area.height = (std::min)(render_area.height, height);
num_color_buffers = static_cast<u32>(num_colors);
framebuffer = runtime.device.GetLogical().CreateFramebuffer({
@ -2366,7 +2366,7 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
.pAttachments = attachments.data(),
.width = render_area.width,
.height = render_area.height,
.layers = static_cast<u32>(std::max(num_layers, 1)),
.layers = static_cast<u32>((std::max)(num_layers, 1)),
});
}

View file

@ -139,8 +139,8 @@ std::array<u32, 3> GenericEnvironment::WorkgroupSize() const {
}
u64 GenericEnvironment::ReadInstruction(u32 address) {
read_lowest = std::min(read_lowest, address);
read_highest = std::max(read_highest, address);
read_lowest = (std::min)(read_lowest, address);
read_highest = (std::max)(read_highest, address);
if (address >= cached_lowest && address < cached_highest) {
return code[(address - cached_lowest) / INST_SIZE];
@ -319,7 +319,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
break;
}
const u64 local_size{sph.LocalMemorySize()};
ASSERT(local_size <= std::numeric_limits<u32>::max());
ASSERT(local_size <= (std::numeric_limits<u32>::max)());
local_memory_size = static_cast<u32>(local_size) + sph.common3.shader_local_memory_crs_size;
texture_bound = maxwell3d->regs.bindless_texture_const_buffer_slot;
is_proprietary_driver = texture_bound == 2;

View file

@ -86,10 +86,10 @@ protected:
u32 shared_memory_size{};
std::array<u32, 3> workgroup_size{};
u32 read_lowest = std::numeric_limits<u32>::max();
u32 read_lowest = (std::numeric_limits<u32>::max)();
u32 read_highest = 0;
u32 cached_lowest = std::numeric_limits<u32>::max();
u32 cached_lowest = (std::numeric_limits<u32>::max)();
u32 cached_highest = 0;
u32 initial_offset = 0;

View file

@ -67,8 +67,8 @@ void DecompressBlocks(std::span<const u8> input, std::span<u8> output, BufferIma
const u32 width = copy.image_extent.width;
const u32 height = copy.image_extent.height * copy.image_subresource.num_layers;
const u32 depth = copy.image_extent.depth;
const u32 block_width = std::min(width, BLOCK_SIZE);
const u32 block_height = std::min(height, BLOCK_SIZE);
const u32 block_width = (std::min)(width, BLOCK_SIZE);
const u32 block_height = (std::min)(height, BLOCK_SIZE);
const u32 pitch = width * out_bpp;
size_t input_offset = 0;
size_t output_offset = 0;

View file

@ -185,7 +185,7 @@ bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1;
const s32 lhs_mips = lhs.info.resources.levels;
const s32 rhs_mips = rhs.info.resources.levels;
const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips);
const s32 num_mips = (std::min)(lhs_mips - base->level, rhs_mips);
AliasedImage lhs_alias;
AliasedImage rhs_alias;
lhs_alias.id = rhs_id;
@ -204,9 +204,9 @@ bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height);
}
const Extent3D copy_size{
.width = std::min(lhs_size.width, rhs_size.width),
.height = std::min(lhs_size.height, rhs_size.height),
.depth = std::min(lhs_size.depth, rhs_size.depth),
.width = (std::min)(lhs_size.width, rhs_size.width),
.height = (std::min)(lhs_size.height, rhs_size.height),
.depth = (std::min)(lhs_size.depth, rhs_size.depth),
};
if (copy_size.width == 0 || copy_size.height == 0) {
LOG_WARNING(HW_GPU, "Copy size is smaller than block size. Mip cannot be aliased.");
@ -218,7 +218,7 @@ bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
const Offset3D rhs_offset{0, 0, is_rhs_3d ? base->layer : 0};
const s32 lhs_layers = is_lhs_3d ? 1 : lhs.info.resources.layers - base->layer;
const s32 rhs_layers = is_rhs_3d ? 1 : rhs.info.resources.layers;
const s32 num_layers = std::min(lhs_layers, rhs_layers);
const s32 num_layers = (std::min)(lhs_layers, rhs_layers);
const SubresourceLayers lhs_subresource{
.base_level = mip_level,
.base_layer = 0,

View file

@ -18,9 +18,9 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ImageId image_id_, GPUVAddr addr)
: image_id{image_id_}, gpu_addr{addr}, format{info.format}, type{info.type}, range{info.range},
size{
.width = std::max(image_info.size.width >> range.base.level, 1u),
.height = std::max(image_info.size.height >> range.base.level, 1u),
.depth = std::max(image_info.size.depth >> range.base.level, 1u),
.width = (std::max)(image_info.size.width >> range.base.level, 1u),
.height = (std::max)(image_info.size.height >> range.base.level, 1u),
.depth = (std::max)(image_info.size.depth >> range.base.level, 1u),
} {
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,

View file

@ -19,7 +19,7 @@ namespace {
using Tegra::Texture::TextureType;
constexpr u8 RENDER_TARGET_SWIZZLE = std::numeric_limits<u8>::max();
constexpr u8 RENDER_TARGET_SWIZZLE = (std::numeric_limits<u8>::max)();
[[nodiscard]] u8 CastSwizzle(SwizzleSource source) {
const u8 casted = static_cast<u8>(source);

View file

@ -56,14 +56,14 @@ TextureCache<P>::TextureCache(Runtime& runtime_, Tegra::MaxwellDeviceMemoryManag
const s64 device_local_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
const s64 min_spacing_expected = device_local_memory - 1_GiB;
const s64 min_spacing_critical = device_local_memory - 512_MiB;
const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD);
const s64 mem_threshold = (std::min)(device_local_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
const s64 min_vacancy_critical = (2 * mem_threshold) / 10;
expected_memory = static_cast<u64>(
std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected),
(std::max)((std::min)(device_local_memory - min_vacancy_expected, min_spacing_expected),
DEFAULT_EXPECTED_MEMORY));
critical_memory = static_cast<u64>(
std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
(std::max)((std::min)(device_local_memory - min_vacancy_critical, min_spacing_critical),
DEFAULT_CRITICAL_MEMORY));
minimum_memory = static_cast<u64>((device_local_memory - mem_threshold) / 2);
} else {
@ -586,8 +586,8 @@ std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(D
area->end_address = cpu_addr + size;
area->preemtive = true;
}
area->start_address = std::min(area->start_address, image.cpu_addr);
area->end_address = std::max(area->end_address, image.cpu_addr_end);
area->start_address = (std::min)(area->start_address, image.cpu_addr);
area->end_address = (std::max)(area->end_address, image.cpu_addr_end);
for (auto image_view_id : image.image_view_ids) {
auto& image_view = slot_image_views[image_view_id];
image_view.flags |= ImageViewFlagBits::PreemtiveDownload;
@ -1273,7 +1273,7 @@ u64 TextureCache<P>::GetScaledImageSizeBytes(const ImageBase& image) {
const u64 down_shift = static_cast<u64>(Settings::values.resolution_info.down_shift +
Settings::values.resolution_info.down_shift);
const u64 image_size_bytes =
static_cast<u64>(std::max(image.guest_size_bytes, image.unswizzled_size_bytes));
static_cast<u64>((std::max)(image.guest_size_bytes, image.unswizzled_size_bytes));
const u64 tentative_size = (image_size_bytes * scale_up) >> down_shift;
const u64 fitted_size = Common::AlignUp(tentative_size, 1024);
return fitted_size;
@ -1994,7 +1994,7 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
ASSERT_MSG(False(image.flags & ImageFlagBits::Registered),
"Trying to register an already registered image");
image.flags |= ImageFlagBits::Registered;
u64 tentative_size = std::max(image.guest_size_bytes, image.unswizzled_size_bytes);
u64 tentative_size = (std::max)(image.guest_size_bytes, image.unswizzled_size_bytes);
if ((IsPixelFormatASTC(image.info.format) &&
True(image.flags & ImageFlagBits::AcceleratedUpload)) ||
True(image.flags & ImageFlagBits::Converted)) {
@ -2168,7 +2168,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
if (image.HasScaled()) {
total_used_memory -= GetScaledImageSizeBytes(image);
}
u64 tentative_size = std::max(image.guest_size_bytes, image.unswizzled_size_bytes);
u64 tentative_size = (std::max)(image.guest_size_bytes, image.unswizzled_size_bytes);
if ((IsPixelFormatASTC(image.info.format) &&
True(image.flags & ImageFlagBits::AcceleratedUpload)) ||
True(image.flags & ImageFlagBits::Converted)) {
@ -2302,7 +2302,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
for (const AliasedImage& aliased : image.aliased_images) {
ImageBase& aliased_image = slot_images[aliased.id];
if (image.modification_tick < aliased_image.modification_tick) {
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
most_recent_tick = (std::max)(most_recent_tick, aliased_image.modification_tick);
aliased_images.push_back(&aliased);
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified);
@ -2443,9 +2443,9 @@ void TextureCache<P>::CopyImage(ImageId dst_id, ImageId src_id, std::vector<Imag
ImageView& dst_view = slot_image_views[dst_view_id];
ImageView& src_view = slot_image_views[src_view_id];
[[maybe_unused]] const Extent3D expected_size{
.width = std::min(dst_view.size.width, src_view.size.width),
.height = std::min(dst_view.size.height, src_view.size.height),
.depth = std::min(dst_view.size.depth, src_view.size.depth),
.width = (std::min)(dst_view.size.width, src_view.size.width),
.height = (std::min)(dst_view.size.height, src_view.size.height),
.depth = (std::min)(dst_view.size.depth, src_view.size.depth),
};
const Extent3D scaled_extent = [is_rescaled, expected_size]() {
if (!is_rescaled) {

View file

@ -108,7 +108,7 @@ class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelI
/// True when the API can do asynchronous texture downloads.
static constexpr bool IMPLEMENTS_ASYNC_DOWNLOADS = P::IMPLEMENTS_ASYNC_DOWNLOADS;
static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
static constexpr size_t UNSET_CHANNEL{(std::numeric_limits<size_t>::max)()};
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;

View file

@ -327,8 +327,8 @@ template <u32 GOB_EXTENT>
}
const SubresourceExtent resources = new_info.resources;
return SubresourceExtent{
.levels = std::max(resources.levels, info.resources.levels),
.layers = std::max(resources.layers, info.resources.layers),
.levels = (std::max)(resources.levels, info.resources.levels),
.layers = (std::max)(resources.layers, info.resources.layers),
};
}
@ -354,7 +354,7 @@ template <u32 GOB_EXTENT>
return std::nullopt;
}
return SubresourceExtent{
.levels = std::max(new_info.resources.levels, info.resources.levels + base.level),
.levels = (std::max)(new_info.resources.levels, info.resources.levels + base.level),
.layers = 1,
};
}
@ -388,8 +388,8 @@ template <u32 GOB_EXTENT>
return std::nullopt;
}
return SubresourceExtent{
.levels = std::max(new_info.resources.levels, info.resources.levels + base.level),
.layers = std::max(new_info.resources.layers, info.resources.layers + base.layer),
.levels = (std::max)(new_info.resources.levels, info.resources.levels + base.level),
.layers = (std::max)(new_info.resources.layers, info.resources.layers + base.layer),
};
}
@ -439,14 +439,14 @@ template <u32 GOB_EXTENT>
}
layers = 1;
} else {
layers = std::max(resources.layers, info.resources.layers + base->layer);
layers = (std::max)(resources.layers, info.resources.layers + base->layer);
}
return OverlapResult{
.gpu_addr = overlap.gpu_addr,
.cpu_addr = overlap.cpu_addr,
.resources =
{
.levels = std::max(resources.levels + base->level, info.resources.levels),
.levels = (std::max)(resources.levels + base->level, info.resources.levels),
.layers = layers,
},
};

View file

@ -1291,7 +1291,7 @@ static void ComputeEndpoints(Pixel& ep1, Pixel& ep2, const u32*& colorValues,
case 1: {
READ_UINT_VALUES(2)
u32 L0 = (v[0] >> 2) | (v[1] & 0xC0);
u32 L1 = std::min(L0 + (v[1] & 0x3F), 0xFFU);
u32 L1 = (std::min)(L0 + (v[1] & 0x3F), 0xFFU);
ep1 = Pixel(0xFF, L0, L0, L0);
ep2 = Pixel(0xFF, L1, L1, L1);
} break;
@ -1522,7 +1522,7 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
// Read color data...
u32 colorDataBits = remainingBits;
while (remainingBits > 0) {
u32 nb = std::min(remainingBits, 8);
u32 nb = (std::min)(remainingBits, 8);
u32 b = strm.ReadBits(nb);
colorEndpointStream.WriteBits(b, nb);
remainingBits -= 8;
@ -1603,7 +1603,7 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
texelWeightData[clearByteStart - 1] &=
static_cast<u8>((1 << (weightParams.GetPackedBitSize() % 8)) - 1);
std::memset(texelWeightData.data() + clearByteStart, 0,
std::min(16U - clearByteStart, 16U));
(std::min)(16U - clearByteStart, 16U));
}
IntegerEncodedVector texelWeightValues;
@ -1674,8 +1674,8 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height,
std::array<u32, 12 * 12> uncompData;
DecompressBlock(blockPtr, block_width, block_height, uncompData);
u32 decompWidth = std::min(block_width, width - x);
u32 decompHeight = std::min(block_height, height - y);
u32 decompWidth = (std::min)(block_width, width - x);
u32 decompHeight = (std::min)(block_height, height - y);
const std::span<u8> outRow = output.subspan(depth_offset + (y * width + x) * 4);
for (u32 h = 0; h < decompHeight; ++h) {

View file

@ -111,13 +111,13 @@ void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 wid
const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth;
u32 unprocessed_lines = num_lines;
u32 extent_y = std::min(num_lines, height - origin_y);
u32 extent_y = (std::min)(num_lines, height - origin_y);
for (u32 slice = 0; slice < depth; ++slice) {
const u32 z = slice + origin_z;
const u32 offset_z = (z >> block_depth) * slice_size +
((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height));
const u32 lines_in_y = std::min(unprocessed_lines, extent_y);
const u32 lines_in_y = (std::min)(unprocessed_lines, extent_y);
for (u32 line = 0; line < lines_in_y; ++line) {
const u32 y = line + origin_y;
const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y);
@ -180,7 +180,7 @@ void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes
u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
const u32 new_bpp = (std::min)(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
width = (width * bytes_per_pixel) >> new_bpp;
bytes_per_pixel = 1U << new_bpp;
Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
@ -191,7 +191,7 @@ void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_p
u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
const u32 new_bpp = (std::min)(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
width = (width * bytes_per_pixel) >> new_bpp;
bytes_per_pixel = 1U << new_bpp;
Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,

View file

@ -75,7 +75,7 @@ float TSCEntry::MaxAnisotropy() const noexcept {
if (anisotropic_settings == Settings::AnisotropyMode::Automatic) {
added_anisotropic = Settings::values.resolution_info.up_scale >>
Settings::values.resolution_info.down_shift;
added_anisotropic = std::max(added_anisotropic - 1, 0);
added_anisotropic = (std::max)(added_anisotropic - 1, 0);
} else {
added_anisotropic = static_cast<u32>(Settings::values.max_anisotropy.GetValue()) - 1U;
}

View file

@ -6,7 +6,7 @@
namespace Tegra::Texture {
Common::ThreadWorker& GetThreadWorkers() {
static Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2,
static Common::ThreadWorker workers{(std::max)(std::thread::hardware_concurrency(), 2U) / 2,
"ImageTranscode"};
return workers;

View file

@ -104,8 +104,8 @@ std::pair<std::array<Shader::TransformFeedbackVarying, 256>, u32> MakeTransformF
}
}
xfb[attribute] = varying;
count = std::max(count, attribute);
highest = std::max(highest, (base_offset + varying.components) * 4);
count = (std::max)(count, attribute);
highest = (std::max)(highest, (base_offset + varying.components) * 4);
}
UNIMPLEMENTED_IF(highest != layout.stride);
}

View file

@ -699,9 +699,9 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
LOG_WARNING(Render_Vulkan,
"MVK driver breaks when using more than 16 vertex attributes/bindings");
properties.properties.limits.maxVertexInputAttributes =
std::min(properties.properties.limits.maxVertexInputAttributes, 16U);
(std::min)(properties.properties.limits.maxVertexInputAttributes, 16U);
properties.properties.limits.maxVertexInputBindings =
std::min(properties.properties.limits.maxVertexInputBindings, 16U);
(std::min)(properties.properties.limits.maxVertexInputBindings, 16U);
}
if (is_turnip) {

View file

@ -136,7 +136,7 @@ namespace Vulkan {
if (vmaMapMemory(allocator, allocation, &mapped_ptr) != VK_SUCCESS) return {};
}
const size_t n = static_cast<size_t>(std::min<VkDeviceSize>(size,
std::numeric_limits<size_t>::max()));
(std::numeric_limits<size_t>::max)()));
return std::span<u8>{static_cast<u8 *>(mapped_ptr), n};
}
@ -149,7 +149,7 @@ namespace Vulkan {
const_cast<MemoryCommit *>(this)->mapped_ptr = p;
}
const size_t n = static_cast<size_t>(std::min<VkDeviceSize>(size,
std::numeric_limits<size_t>::max()));
(std::numeric_limits<size_t>::max)()));
return std::span<const u8>{static_cast<const u8 *>(mapped_ptr), n};
}

View file

@ -860,7 +860,7 @@ public:
/// Set object name.
void SetObjectNameEXT(const char* name) const;
VkResult Wait(u64 timeout = std::numeric_limits<u64>::max()) const noexcept {
VkResult Wait(u64 timeout = (std::numeric_limits<u64>::max)()) const noexcept {
return dld->vkWaitForFences(owner, 1, &handle, true, timeout);
}
@ -961,7 +961,7 @@ public:
* @param timeout Time in nanoseconds to timeout
* @return True on successful wait, false on timeout
*/
bool Wait(u64 value, u64 timeout = std::numeric_limits<u64>::max()) const {
bool Wait(u64 value, u64 timeout = (std::numeric_limits<u64>::max)()) const {
const VkSemaphoreWaitInfo wait_info{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
.pNext = nullptr,