Commit 23319007 authored by Rusty Russell's avatar Rusty Russell

hash: remove VALGRIND #ifdef - always run clean.

My simple test program on my laptop showed that with modern 32 bit Intel
CPUs and modern GCC, there's no measurable penalty for the clean version.

Andrew Bartlett complained that the valgrind noise was grating.  Agreed.
parent c10ed331
...@@ -259,9 +259,7 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 ) ...@@ -259,9 +259,7 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
u.ptr = key; u.ptr = key;
if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
#ifdef VALGRIND
const uint8_t *k8; const uint8_t *k8;
#endif
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12) while (length > 12)
...@@ -283,9 +281,10 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 ) ...@@ -283,9 +281,10 @@ static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
* does it on word boundaries, so is OK with this. But VALGRIND will * does it on word boundaries, so is OK with this. But VALGRIND will
* still catch it and complain. The masking trick does make the hash * still catch it and complain. The masking trick does make the hash
* noticably faster for short strings (like English words). * noticably faster for short strings (like English words).
*
* Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
*/ */
#ifndef VALGRIND #if 0
switch(length) switch(length)
{ {
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
...@@ -437,9 +436,7 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2) ...@@ -437,9 +436,7 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
u.ptr = key; u.ptr = key;
if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
#ifdef VALGRIND
const uint8_t *k8; const uint8_t *k8;
#endif
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12) while (length > 12)
...@@ -461,9 +458,10 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2) ...@@ -461,9 +458,10 @@ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
* does it on word boundaries, so is OK with this. But VALGRIND will * does it on word boundaries, so is OK with this. But VALGRIND will
* still catch it and complain. The masking trick does make the hash * still catch it and complain. The masking trick does make the hash
* noticably faster for short strings (like English words). * noticably faster for short strings (like English words).
*
* Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
*/ */
#ifndef VALGRIND #if 0
switch(length) switch(length)
{ {
case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
......
...@@ -65,10 +65,6 @@ ...@@ -65,10 +65,6 @@
* Author: Rusty Russell * Author: Rusty Russell
* *
* License: LGPLv3 (or later) * License: LGPLv3 (or later)
*
* Ccanlint:
* // hash fails because it accesses data in 4 byte quantities for speed.
* tests_pass_valgrind --partial-loads-ok=yes
*/ */
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment