Clean up more space/tab mismatches in AudioCommon, Common, and VideoCommon.

Not planning to touch Core since it's the most actively changed part of the project.
This commit is contained in:
lioncash
2013-03-19 21:51:12 -04:00
parent 0e3d8e2e9f
commit edd9d0e0ef
91 changed files with 2151 additions and 2169 deletions

View File

@ -119,7 +119,7 @@ u32 HashEctor(const u8* ptr, int length)
inline u64 getblock(const u64 * p, int i)
{
return p[i];
return p[i];
}
//----------
@ -127,25 +127,25 @@ inline u64 getblock(const u64 * p, int i)
inline void bmix64(u64 & h1, u64 & h2, u64 & k1, u64 & k2, u64 & c1, u64 & c2)
{
k1 *= c1;
k1 = _rotl64(k1,23);
k1 *= c2;
h1 ^= k1;
h1 += h2;
k1 *= c1;
k1 = _rotl64(k1,23);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h2 = _rotl64(h2,41);
h2 = _rotl64(h2,41);
k2 *= c2;
k2 = _rotl64(k2,23);
k2 *= c1;
h2 ^= k2;
h2 += h1;
k2 *= c2;
k2 = _rotl64(k2,23);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1*3+0x52dce729;
h2 = h2*3+0x38495ab5;
h1 = h1*3+0x52dce729;
h2 = h2*3+0x38495ab5;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
}
//----------
@ -153,87 +153,87 @@ inline void bmix64(u64 & h1, u64 & h2, u64 & k1, u64 & k2, u64 & c1, u64 & c2)
inline u64 fmix64(u64 k)
{
k ^= k >> 33;
k *= 0xff51afd7ed558ccd;
k ^= k >> 33;
k *= 0xc4ceb9fe1a85ec53;
k ^= k >> 33;
k ^= k >> 33;
k *= 0xff51afd7ed558ccd;
k ^= k >> 33;
k *= 0xc4ceb9fe1a85ec53;
k ^= k >> 33;
return k;
return k;
}
u64 GetMurmurHash3(const u8 *src, int len, u32 samples)
{
const u8 * data = (const u8*)src;
const int nblocks = len / 16;
const u8 * data = (const u8*)src;
const int nblocks = len / 16;
u32 Step = (len / 8);
if(samples == 0) samples = max(Step, 1u);
Step = Step / samples;
if(Step < 1) Step = 1;
u64 h1 = 0x9368e53c2f6af274;
u64 h2 = 0x586dcd208f7cd3fd;
u64 h1 = 0x9368e53c2f6af274;
u64 h2 = 0x586dcd208f7cd3fd;
u64 c1 = 0x87c37b91114253d5;
u64 c2 = 0x4cf5ad432745937f;
u64 c1 = 0x87c37b91114253d5;
u64 c2 = 0x4cf5ad432745937f;
//----------
// body
//----------
// body
const u64 * blocks = (const u64 *)(data);
const u64 * blocks = (const u64 *)(data);
for(int i = 0; i < nblocks; i+=Step)
{
u64 k1 = getblock(blocks,i*2+0);
u64 k2 = getblock(blocks,i*2+1);
for(int i = 0; i < nblocks; i+=Step)
{
u64 k1 = getblock(blocks,i*2+0);
u64 k2 = getblock(blocks,i*2+1);
bmix64(h1,h2,k1,k2,c1,c2);
}
bmix64(h1,h2,k1,k2,c1,c2);
}
//----------
// tail
//----------
// tail
const u8 * tail = (const u8*)(data + nblocks*16);
const u8 * tail = (const u8*)(data + nblocks*16);
u64 k1 = 0;
u64 k2 = 0;
u64 k1 = 0;
u64 k2 = 0;
switch(len & 15)
{
case 15: k2 ^= u64(tail[14]) << 48;
case 14: k2 ^= u64(tail[13]) << 40;
case 13: k2 ^= u64(tail[12]) << 32;
case 12: k2 ^= u64(tail[11]) << 24;
case 11: k2 ^= u64(tail[10]) << 16;
case 10: k2 ^= u64(tail[ 9]) << 8;
case 9: k2 ^= u64(tail[ 8]) << 0;
switch(len & 15)
{
case 15: k2 ^= u64(tail[14]) << 48;
case 14: k2 ^= u64(tail[13]) << 40;
case 13: k2 ^= u64(tail[12]) << 32;
case 12: k2 ^= u64(tail[11]) << 24;
case 11: k2 ^= u64(tail[10]) << 16;
case 10: k2 ^= u64(tail[ 9]) << 8;
case 9: k2 ^= u64(tail[ 8]) << 0;
case 8: k1 ^= u64(tail[ 7]) << 56;
case 7: k1 ^= u64(tail[ 6]) << 48;
case 6: k1 ^= u64(tail[ 5]) << 40;
case 5: k1 ^= u64(tail[ 4]) << 32;
case 4: k1 ^= u64(tail[ 3]) << 24;
case 3: k1 ^= u64(tail[ 2]) << 16;
case 2: k1 ^= u64(tail[ 1]) << 8;
case 1: k1 ^= u64(tail[ 0]) << 0;
bmix64(h1,h2,k1,k2,c1,c2);
};
case 8: k1 ^= u64(tail[ 7]) << 56;
case 7: k1 ^= u64(tail[ 6]) << 48;
case 6: k1 ^= u64(tail[ 5]) << 40;
case 5: k1 ^= u64(tail[ 4]) << 32;
case 4: k1 ^= u64(tail[ 3]) << 24;
case 3: k1 ^= u64(tail[ 2]) << 16;
case 2: k1 ^= u64(tail[ 1]) << 8;
case 1: k1 ^= u64(tail[ 0]) << 0;
bmix64(h1,h2,k1,k2,c1,c2);
};
//----------
// finalization
//----------
// finalization
h2 ^= len;
h2 ^= len;
h1 += h2;
h2 += h1;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h1 += h2;
return h1;
return h1;
}
@ -262,11 +262,13 @@ u64 GetCRC32(const u8 *src, int len, u32 samples)
}
/* NOTE: This hash function is used for custom texture loading/dumping, so
it should not be changed, which would require all custom textures to be
recalculated for their new hash values. If the hashing function is
changed, make sure this one is still used when the legacy parameter is
true. */
/*
* NOTE: This hash function is used for custom texture loading/dumping, so
* it should not be changed, which would require all custom textures to be
* recalculated for their new hash values. If the hashing function is
* changed, make sure this one is still used when the legacy parameter is
* true.
*/
u64 GetHashHiresTexture(const u8 *src, int len, u32 samples)
{
const u64 m = 0xc6a4a7935bd1e995;
@ -282,11 +284,11 @@ u64 GetHashHiresTexture(const u8 *src, int len, u32 samples)
{
u64 k = data[0];
data+=Step;
k *= m;
k *= m;
k ^= k >> r;
k *= m;
k *= m;
h ^= k;
h *= m;
h *= m;
}
const u8 * data2 = (const u8*)end;
@ -453,9 +455,11 @@ u64 GetMurmurHash3(const u8* src, int len, u32 samples)
return *((u64 *)&out);
}
/* FIXME: The old 32-bit version of this hash made different hashes than the
64-bit version. Until someone can make a new version of the 32-bit one that
makes identical hashes, this is just a c/p of the 64-bit one. */
/*
* FIXME: The old 32-bit version of this hash made different hashes than the
* 64-bit version. Until someone can make a new version of the 32-bit one that
* makes identical hashes, this is just a c/p of the 64-bit one.
*/
u64 GetHashHiresTexture(const u8 *src, int len, u32 samples)
{
const u64 m = 0xc6a4a7935bd1e995ULL;
@ -473,7 +477,7 @@ u64 GetHashHiresTexture(const u8 *src, int len, u32 samples)
data+=Step;
k *= m;
k ^= k >> r;
k *= m;
k *= m;
h ^= k;
h *= m;
}
@ -502,7 +506,7 @@ u64 GetHashHiresTexture(const u8 *src, int len, u32 samples)
u64 GetHash64(const u8 *src, int len, u32 samples)
{
return ptrHashFunction(src, len, samples);
return ptrHashFunction(src, len, samples);
}
// sets the hash function used for the texture cache