mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-07-30 01:29:42 -06:00
Merge pull request #4548 from stenzek/gcc-sse
Support SSSE3 texture decoders and CRC32 hashing on non-native builds (gcc)
This commit is contained in:
@ -238,9 +238,11 @@ u64 GetMurmurHash3(const u8* src, u32 len, u32 samples)
|
||||
}
|
||||
|
||||
// CRC32 hash using the SSE4.2 instruction
|
||||
#if defined(_M_X86_64)
|
||||
|
||||
FUNCTION_TARGET_SSE42
|
||||
u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
{
|
||||
#if _M_SSE >= 0x402 || defined(_M_ARM_64)
|
||||
u64 h[4] = {len, 0, 0, 0};
|
||||
u32 Step = (len / 8);
|
||||
const u64* data = (const u64*)src;
|
||||
@ -250,9 +252,7 @@ u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
Step = Step / samples;
|
||||
if (Step < 1)
|
||||
Step = 1;
|
||||
#endif
|
||||
|
||||
#if _M_SSE >= 0x402
|
||||
while (data < end - Step * 3)
|
||||
{
|
||||
h[0] = _mm_crc32_u64(h[0], data[Step * 0]);
|
||||
@ -274,7 +274,25 @@ u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
memcpy(&temp, end, len & 7);
|
||||
h[0] = _mm_crc32_u64(h[0], temp);
|
||||
}
|
||||
|
||||
// FIXME: is there a better way to combine these partial hashes?
|
||||
return h[0] + (h[1] << 10) + (h[2] << 21) + (h[3] << 32);
|
||||
}
|
||||
|
||||
#elif defined(_M_ARM_64)
|
||||
|
||||
u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
{
|
||||
u64 h[4] = {len, 0, 0, 0};
|
||||
u32 Step = (len / 8);
|
||||
const u64* data = (const u64*)src;
|
||||
const u64* end = data + Step;
|
||||
if (samples == 0)
|
||||
samples = std::max(Step, 1u);
|
||||
Step = Step / samples;
|
||||
if (Step < 1)
|
||||
Step = 1;
|
||||
|
||||
// We should be able to use intrinsics for this
|
||||
// Too bad the intrinsics for this instruction was added in GCC 4.9.1
|
||||
// The Android NDK (as of r10e) only has GCC 4.9
|
||||
@ -317,16 +335,20 @@ u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
: [res] "=r"(h[0])
|
||||
: [two] "r"(h[0]), [three] "r"(temp));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _M_SSE >= 0x402 || defined(_M_ARM_64)
|
||||
// FIXME: is there a better way to combine these partial hashes?
|
||||
return h[0] + (h[1] << 10) + (h[2] << 21) + (h[3] << 32);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: This hash function is used for custom texture loading/dumping, so
|
||||
* it should not be changed, which would require all custom textures to be
|
||||
@ -386,10 +408,13 @@ u64 GetHashHiresTexture(const u8* src, u32 len, u32 samples)
|
||||
return h;
|
||||
}
|
||||
#else
|
||||
|
||||
// CRC32 hash using the SSE4.2 instruction
|
||||
#if defined(_M_X86)
|
||||
|
||||
FUNCTION_TARGET_SSE42
|
||||
u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
{
|
||||
#if _M_SSE >= 0x402
|
||||
u32 h = len;
|
||||
u32 Step = (len / 4);
|
||||
const u32* data = (const u32*)src;
|
||||
@ -407,11 +432,17 @@ u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
|
||||
const u8* data2 = (const u8*)end;
|
||||
return (u64)_mm_crc32_u32(h, u32(data2[0]));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
u64 GetCRC32(const u8* src, u32 len, u32 samples)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Block read - if your platform needs to do endian-swapping or can only
|
||||
// handle aligned reads, do the conversion here
|
||||
@ -606,7 +637,7 @@ u64 GetHash64(const u8* src, u32 len, u32 samples)
|
||||
// sets the hash function used for the texture cache
|
||||
void SetHash64Function()
|
||||
{
|
||||
#if _M_SSE >= 0x402
|
||||
#if defined(_M_X86_64) || defined(_M_X86)
|
||||
if (cpu_info.bSSE4_2) // sse crc32 version
|
||||
{
|
||||
ptrHashFunction = &GetCRC32;
|
||||
|
@ -4,28 +4,69 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef _M_X86
|
||||
#if defined(_M_X86)
|
||||
|
||||
/**
|
||||
* It is assumed that all compilers used to build Dolphin support intrinsics up to and including
|
||||
* SSE 4.2 on x86/x64.
|
||||
*/
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
|
||||
/**
|
||||
* Due to limitations in GCC, SSE intrinsics are only available when compiling with the
|
||||
* corresponding instruction set enabled. However, using the target attribute, we can compile
|
||||
* single functions with a different target instruction set, while still creating a generic build.
|
||||
*
|
||||
* Since this instruction set is enabled per-function, any callers should verify that the
|
||||
* instruction set is supported at runtime before calling it, and provide a fallback implementation
|
||||
* when not supported.
|
||||
*
|
||||
* When building with -march=native, or enabling the instruction sets in the compile flags, permit
|
||||
* usage of the instrinsics without any function attributes. If the command-line architecture does
|
||||
* not support this instruction set, enable it via function targeting.
|
||||
*/
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#ifndef __SSE4_2__
|
||||
#define FUNCTION_TARGET_SSE42 [[gnu::target("sse4.2")]]
|
||||
#endif
|
||||
#ifndef __SSE4_1__
|
||||
#define FUNCTION_TARGET_SSR41 [[gnu::target("sse4.1")]]
|
||||
#endif
|
||||
#ifndef __SSSE3__
|
||||
#define FUNCTION_TARGET_SSSE3 [[gnu::target("ssse3")]]
|
||||
#endif
|
||||
#ifndef __SSE3__
|
||||
#define FUNCTION_TARGET_SSE3 [[gnu::target("sse3")]]
|
||||
#endif
|
||||
|
||||
#if defined _M_GENERIC
|
||||
#define _M_SSE 0
|
||||
#elif _MSC_VER || __INTEL_COMPILER
|
||||
#define _M_SSE 0x402
|
||||
#elif defined __GNUC__
|
||||
#if defined __SSE4_2__
|
||||
#define _M_SSE 0x402
|
||||
#elif defined __SSE4_1__
|
||||
#define _M_SSE 0x401
|
||||
#elif defined __SSSE3__
|
||||
#define _M_SSE 0x301
|
||||
#elif defined __SSE3__
|
||||
#define _M_SSE 0x300
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
|
||||
|
||||
/**
|
||||
* MSVC and ICC support intrinsics for any instruction set without any function attributes.
|
||||
*/
|
||||
#include <intrin.h>
|
||||
|
||||
#endif // defined(_MSC_VER) || defined(__INTEL_COMPILER)
|
||||
|
||||
#endif // _M_X86
|
||||
|
||||
/**
|
||||
* Define the FUNCTION_TARGET macros to nothing if they are not needed, or not on an X86 platform.
|
||||
* This way when a function is defined with FUNCTION_TARGET you don't need to define a second
|
||||
* version without the macro around a #ifdef guard. Be careful when using intrinsics, as all use
|
||||
* should still be placed around a #ifdef _M_X86 if the file is compiled on all architectures.
|
||||
*/
|
||||
#ifndef FUNCTION_TARGET_SSE42
|
||||
#define FUNCTION_TARGET_SSE42
|
||||
#endif
|
||||
#ifndef FUNCTION_TARGET_SSR41
|
||||
#define FUNCTION_TARGET_SSR41
|
||||
#endif
|
||||
#ifndef FUNCTION_TARGET_SSSE3
|
||||
#define FUNCTION_TARGET_SSSE3
|
||||
#endif
|
||||
#ifndef FUNCTION_TARGET_SSE3
|
||||
#define FUNCTION_TARGET_SSE3
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user