diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2012-06-19 12:43:04 +0930 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2012-06-19 05:38:07 +0200 |
commit | dd42962878ab7c9ddfa79d7c32094fb6748017b8 (patch) | |
tree | a614af427c5ad0d962db77a58f133cb39c9bd057 /lib/ntdb/open.c | |
parent | f986554b1e38d8dd40b4bf4748d4aeb470e27d2e (diff) | |
download | samba-dd42962878ab7c9ddfa79d7c32094fb6748017b8.tar.gz samba-dd42962878ab7c9ddfa79d7c32094fb6748017b8.tar.bz2 samba-dd42962878ab7c9ddfa79d7c32094fb6748017b8.zip |
ntdb: remove hash table trees.
TDB2 started with a top-level hash of 1024 entries, divided into 128
groups of 8 buckets. When a bucket filled, the 8 bucket group
expanded into pointers into 8 new 64-entry hash tables. When these
filled, they expanded in turn, etc.
It's a nice idea to automatically expand the hash tables, but it
doesn't pay off. Remove it for NTDB.
1) It only beats TDB performance when the database is huge and the
TDB hashsize is small. We are about 20% slower on medium-size
databases (1000 to 10000 records), worse on really small ones.
2) Since we're 64 bits, our hash tables are already twice as expensive
as TDB.
3) Since our hash function is good, it means that all groups tend to
fill at the same time, meaning the hash enlarges by a factor of 128
all at once, leading to a very large database at that point.
4) Our efficiency would improve if we enlarged the top level, but
that makes our minimum db size even worse: it's already over 8k,
and jumps to 1M after about 1000 entries!
5) Making the sub group size larger gives a shallower tree, which
performs better, but makes the "hash explosion" problem worse.
6) The code is complicated, having to handle delete and reshuffling
groups of hash buckets, and expansion of buckets.
7) We have to handle the case where all the records somehow end up with
the same hash value, which requires special code to chain records for
that case.
On the other hand, it would be nice if we didn't degrade as badly as
TDB does when the hash chains get long.
This patch removes the hash-growing code, but instead of chaining like
TDB does when a bucket fills, we point the bucket to an array of
record pointers. Since each on-disk NTDB pointer contains some hash
bits from the record (we steal the upper 8 bits of the offset), 99.5%
of the time we don't need to load the record to determine if it
matches. This makes an array of offsets much more cache-friendly than
a linked list.
Here are the times (in ns) for tdb_store of N records, tdb_store of N
records the second time, and a fetch of all N records. I've also
included the final database size and the smbtorture local.[n]tdb_speed
results.
Benchmark details:
1) Compiled with -O2.
2) assert() was disabled in TDB2 and NTDB.
3) The "optimize fetch" patch was applied to NTDB.
10 runs, using tmpfs (otherwise massive swapping as db hits ~30M,
despite plenty of RAM).
Insert Re-ins Fetch Size dbspeed
(nsec) (nsec) (nsec) (Kb) (ops/sec)
TDB (10000 hashsize):
100 records: 3882 3320 1609 53 203204
1000 records: 3651 3281 1571 115 218021
10000 records: 3404 3326 1595 880 202874
100000 records: 4317 3825 2097 8262 126811
1000000 records: 11568 11578 9320 77005 25046
TDB2 (1024 hashsize, expandable):
100 records: 3867 3329 1699 17 187100
1000 records: 4040 3249 1639 154 186255
10000 records: 4143 3300 1695 1226 185110
100000 records: 4481 3425 1800 17848 163483
1000000 records: 4055 3534 1878 106386 160774
NTDB (8192 hashsize)
100 records: 4259 3376 1692 82 190852
1000 records: 3640 3275 1566 130 195106
10000 records: 4337 3438 1614 773 188362
100000 records: 4750 5165 1746 9001 169197
1000000 records: 4897 5180 2341 83838 121901
Analysis:
1) TDB wins on small databases, beating TDB2 by ~15%, NTDB by ~10%.
2) TDB starts to lose when hash chains get 10 long (fetch 10% slower
than TDB2/NTDB).
3) TDB does horribly when hash chains get 100 long (fetch 4x slower
than NTDB, 5x slower than TDB2, insert about 2-3x slower).
4) TDB2 databases are 40% larger than TDB1. NTDB is about 15% larger
than TDB1
Diffstat (limited to 'lib/ntdb/open.c')
-rw-r--r-- | lib/ntdb/open.c | 146 |
1 files changed, 87 insertions, 59 deletions
diff --git a/lib/ntdb/open.c b/lib/ntdb/open.c index 6aac46ab2f..4b8c45298b 100644 --- a/lib/ntdb/open.c +++ b/lib/ntdb/open.c @@ -17,7 +17,6 @@ */ #include "private.h" #include <ccan/build_assert/build_assert.h> -#include <assert.h> /* all tdbs, to detect double-opens (fcntl file don't nest!) */ static struct ntdb_context *tdbs = NULL; @@ -53,10 +52,10 @@ static bool read_all(int fd, void *buf, size_t len) return true; } -static uint64_t random_number(struct ntdb_context *ntdb) +static uint32_t random_number(struct ntdb_context *ntdb) { int fd; - uint64_t ret = 0; + uint32_t ret = 0; struct timeval now; fd = open("/dev/urandom", O_RDONLY); @@ -71,14 +70,14 @@ static uint64_t random_number(struct ntdb_context *ntdb) fd = open("/dev/egd-pool", O_RDWR); if (fd >= 0) { /* Command is 1, next byte is size we want to read. */ - char cmd[2] = { 1, sizeof(uint64_t) }; + char cmd[2] = { 1, sizeof(uint32_t) }; if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) { - char reply[1 + sizeof(uint64_t)]; + char reply[1 + sizeof(uint32_t)]; int r = read(fd, reply, sizeof(reply)); if (r > 1) { /* Copy at least some bytes. */ memcpy(&ret, reply+1, r - 1); - if (reply[0] == sizeof(uint64_t) + if (reply[0] == sizeof(uint32_t) && r == sizeof(reply)) { close(fd); return ret; @@ -105,92 +104,119 @@ static void ntdb_context_init(struct ntdb_context *ntdb) ntdb->access = NULL; } -struct new_database { - struct ntdb_header hdr; - struct ntdb_freetable ftable; - struct ntdb_free_record remainder; -}; +/* initialise a new database: + * + * struct ntdb_header; + * struct { + * struct ntdb_used_record hash_header; + * ntdb_off_t hash_buckets[1 << ntdb->hash_bits]; + * } hash; + * struct ntdb_freetable ftable; + * struct { + * struct ntdb_free_record free_header; + * char forty_three[...]; + * } remainder; + */ +#define NEW_DATABASE_HDR_SIZE(hbits) \ + (sizeof(struct ntdb_header) \ + + sizeof(struct ntdb_used_record) + (sizeof(ntdb_off_t) << hbits) \ + + sizeof(struct ntdb_freetable) \ + + sizeof(struct ntdb_free_record)) -/* initialise a new database */ static enum NTDB_ERROR ntdb_new_database(struct ntdb_context *ntdb, - struct ntdb_attribute_seed *seed, - struct ntdb_header *hdr) + struct ntdb_attribute_seed *seed, + struct ntdb_header *rhdr) { /* We make it up in memory, then write it out if not internal */ - struct new_database *newdb; + struct ntdb_freetable *ftable; + struct ntdb_used_record *htable; + struct ntdb_header *hdr; + struct ntdb_free_record *remainder; + char *mem; unsigned int magic_len; ssize_t rlen; - size_t dbsize, remaindersize; + size_t dbsize, hashsize, hdrsize, remaindersize; enum NTDB_ERROR ecode; + hashsize = sizeof(ntdb_off_t) << ntdb->hash_bits; + /* Always make db a multiple of NTDB_PGSIZE */ - dbsize = (sizeof(*newdb) + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1); - remaindersize = dbsize - sizeof(*newdb); - newdb = ntdb->alloc_fn(ntdb, dbsize, ntdb->alloc_data); - if (!newdb) { + hdrsize = NEW_DATABASE_HDR_SIZE(ntdb->hash_bits); + dbsize = (hdrsize + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1); + + mem = ntdb->alloc_fn(ntdb, dbsize, ntdb->alloc_data); + if (!mem) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_new_database: failed to allocate"); } + hdr = (void *)mem; + htable = (void *)(mem + sizeof(*hdr)); + ftable = (void *)(mem + sizeof(*hdr) + sizeof(*htable) + hashsize); + remainder = (void *)(mem + sizeof(*hdr) + sizeof(*htable) + hashsize + + sizeof(*ftable)); + /* Fill in the header */ - newdb->hdr.version = NTDB_VERSION; + hdr->version = NTDB_VERSION; if (seed) - newdb->hdr.hash_seed = seed->seed; + hdr->hash_seed = seed->seed; else - newdb->hdr.hash_seed = random_number(ntdb); - newdb->hdr.hash_test = NTDB_HASH_MAGIC; - newdb->hdr.hash_test = ntdb->hash_fn(&newdb->hdr.hash_test, - sizeof(newdb->hdr.hash_test), - newdb->hdr.hash_seed, - ntdb->hash_data); - newdb->hdr.recovery = 0; - newdb->hdr.features_used = newdb->hdr.features_offered = NTDB_FEATURE_MASK; - newdb->hdr.seqnum = 0; - newdb->hdr.capabilities = 0; - memset(newdb->hdr.reserved, 0, sizeof(newdb->hdr.reserved)); - /* Initial hashes are empty. */ - memset(newdb->hdr.hashtable, 0, sizeof(newdb->hdr.hashtable)); + hdr->hash_seed = random_number(ntdb); + hdr->hash_test = NTDB_HASH_MAGIC; + hdr->hash_test = ntdb->hash_fn(&hdr->hash_test, + sizeof(hdr->hash_test), + hdr->hash_seed, + ntdb->hash_data); + hdr->hash_bits = ntdb->hash_bits; + hdr->recovery = 0; + hdr->features_used = hdr->features_offered = NTDB_FEATURE_MASK; + hdr->seqnum = 0; + hdr->capabilities = 0; + memset(hdr->reserved, 0, sizeof(hdr->reserved)); + + /* Hash is all zero after header. */ + set_header(NULL, htable, NTDB_HTABLE_MAGIC, 0, hashsize, hashsize); + memset(htable + 1, 0, hashsize); /* Free is empty. */ - newdb->hdr.free_table = offsetof(struct new_database, ftable); - memset(&newdb->ftable, 0, sizeof(newdb->ftable)); - ecode = set_header(NULL, &newdb->ftable.hdr, NTDB_FTABLE_MAGIC, 0, - sizeof(newdb->ftable) - sizeof(newdb->ftable.hdr), - sizeof(newdb->ftable) - sizeof(newdb->ftable.hdr), - 0); + hdr->free_table = (char *)ftable - (char *)hdr; + memset(ftable, 0, sizeof(*ftable)); + ecode = set_header(NULL, &ftable->hdr, NTDB_FTABLE_MAGIC, 0, + sizeof(*ftable) - sizeof(ftable->hdr), + sizeof(*ftable) - sizeof(ftable->hdr)); if (ecode != NTDB_SUCCESS) { goto out; } /* Rest of database is a free record, containing junk. */ - newdb->remainder.ftable_and_len - = (remaindersize + sizeof(newdb->remainder) + remaindersize = dbsize - hdrsize; + remainder->ftable_and_len + = (remaindersize + sizeof(*remainder) - sizeof(struct ntdb_used_record)); - newdb->remainder.next = 0; - newdb->remainder.magic_and_prev + remainder->next = 0; + remainder->magic_and_prev = (NTDB_FREE_MAGIC << (64-NTDB_OFF_UPPER_STEAL)) - | offsetof(struct new_database, remainder); - memset(&newdb->remainder + 1, 0x43, remaindersize); + | ((char *)remainder - (char *)hdr); + memset(remainder + 1, 0x43, remaindersize); /* Put in our single free entry. */ - newdb->ftable.buckets[size_to_bucket(remaindersize)] = - offsetof(struct new_database, remainder); + ftable->buckets[size_to_bucket(remaindersize)] = + (char *)remainder - (char *)hdr; /* Magic food */ - memset(newdb->hdr.magic_food, 0, sizeof(newdb->hdr.magic_food)); - strcpy(newdb->hdr.magic_food, NTDB_MAGIC_FOOD); + memset(hdr->magic_food, 0, sizeof(hdr->magic_food)); + strcpy(hdr->magic_food, NTDB_MAGIC_FOOD); /* This creates an endian-converted database, as if read from disk */ - magic_len = sizeof(newdb->hdr.magic_food); - ntdb_convert(ntdb, - (char *)&newdb->hdr + magic_len, - sizeof(*newdb) - magic_len); + magic_len = sizeof(hdr->magic_food); + ntdb_convert(ntdb, (char *)hdr + magic_len, hdrsize - magic_len); - *hdr = newdb->hdr; + /* Return copy of header. */ + *rhdr = *hdr; if (ntdb->flags & NTDB_INTERNAL) { ntdb->file->map_size = dbsize; - ntdb->file->map_ptr = newdb; + ntdb->file->map_ptr = hdr; return NTDB_SUCCESS; } if (lseek(ntdb->file->fd, 0, SEEK_SET) == -1) { @@ -207,7 +233,7 @@ static enum NTDB_ERROR ntdb_new_database(struct ntdb_context *ntdb, goto out; } - rlen = write(ntdb->file->fd, newdb, dbsize); + rlen = write(ntdb->file->fd, hdr, dbsize); if (rlen != dbsize) { if (rlen >= 0) errno = ENOSPC; @@ -218,7 +244,7 @@ static enum NTDB_ERROR ntdb_new_database(struct ntdb_context *ntdb, } out: - ntdb->free_fn(newdb, ntdb->alloc_data); + ntdb->free_fn(hdr, ntdb->alloc_data); return ecode; } @@ -487,6 +513,7 @@ _PUBLIC_ struct ntdb_context *ntdb_open(const char *name, int ntdb_flags, ntdb->alloc_fn = default_alloc; ntdb->expand_fn = default_expand; ntdb->free_fn = default_free; + ntdb->hash_bits = NTDB_DEFAULT_HBITS; /* 64k of hash by default. */ while (attr) { switch (attr->base.attr) { @@ -687,6 +714,7 @@ _PUBLIC_ struct ntdb_context *ntdb_open(const char *name, int ntdb_flags, ntdb_context_init(ntdb); ntdb_convert(ntdb, &hdr, sizeof(hdr)); + ntdb->hash_bits = hdr.hash_bits; ntdb->hash_seed = hdr.hash_seed; hash_test = NTDB_HASH_MAGIC; hash_test = ntdb_hash(ntdb, &hash_test, sizeof(hash_test)); |