summaryrefslogtreecommitdiff
path: root/lib/tdb
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2011-06-08 17:20:48 +0930
committerRusty Russell <rusty@rustcorp.com.au>2011-06-08 11:05:47 +0200
commit4fa51257b283c2e8bb415cc7f9c001d64c8a2669 (patch)
tree4529dad08efa2757a2b6eb26a1d43be76f500ba7 /lib/tdb
parent4afe426877fed3ed4d1dae4a8d96dce3f4983b91 (diff)
downloadsamba-4fa51257b283c2e8bb415cc7f9c001d64c8a2669.tar.gz
samba-4fa51257b283c2e8bb415cc7f9c001d64c8a2669.tar.bz2
samba-4fa51257b283c2e8bb415cc7f9c001d64c8a2669.zip
tdb: enable VALGRIND to remove valgrind noise.
Andrew Bartlett complained that valgrind needs --partial-loads-ok=yes otherwise the Jenkins hash makes it complain. My benchmarking here revealed that at least with modern gcc (4.5) and CPU (Intel i5 32 bit) there's no measurable performance penalty for the "correct" code, so rip out the optimized one. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Autobuild-User: Rusty Russell <rusty@rustcorp.com.au> Autobuild-Date: Wed Jun 8 11:05:47 CEST 2011 on sn-devel-104
Diffstat (limited to 'lib/tdb')
-rw-r--r--lib/tdb/common/hash.c35
1 files changed, 0 insertions, 35 deletions
diff --git a/lib/tdb/common/hash.c b/lib/tdb/common/hash.c
index 2472ed1ace..1eed7221d2 100644
--- a/lib/tdb/common/hash.c
+++ b/lib/tdb/common/hash.c
@@ -214,9 +214,7 @@ static uint32_t hashlittle( const void *key, size_t length )
u.ptr = key;
if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
-#ifdef VALGRIND
const uint8_t *k8;
-#endif
/*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
while (length > 12)
@@ -230,36 +228,6 @@ static uint32_t hashlittle( const void *key, size_t length )
}
/*----------------------------- handle the last (probably partial) block */
- /*
- * "k[2]&0xffffff" actually reads beyond the end of the string, but
- * then masks off the part it's not allowed to read. Because the
- * string is aligned, the masked-off tail is in the same word as the
- * rest of the string. Every machine with memory protection I've seen
- * does it on word boundaries, so is OK with this. But VALGRIND will
- * still catch it and complain. The masking trick does make the hash
- * noticably faster for short strings (like English words).
- */
-#ifndef VALGRIND
-
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
- case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
- case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
- case 6 : b+=k[1]&0xffff; a+=k[0]; break;
- case 5 : b+=k[1]&0xff; a+=k[0]; break;
- case 4 : a+=k[0]; break;
- case 3 : a+=k[0]&0xffffff; break;
- case 2 : a+=k[0]&0xffff; break;
- case 1 : a+=k[0]&0xff; break;
- case 0 : return c; /* zero length strings require no mixing */
- }
-
-#else /* make valgrind happy */
-
k8 = (const uint8_t *)k;
switch(length)
{
@@ -277,9 +245,6 @@ static uint32_t hashlittle( const void *key, size_t length )
case 1 : a+=k8[0]; break;
case 0 : return c;
}
-
-#endif /* !valgrind */
-
} else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
const uint8_t *k8;