summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Allison <jra@samba.org>2001-08-02 20:17:40 +0000
committerJeremy Allison <jra@samba.org>2001-08-02 20:17:40 +0000
commit3538c8bea92da1a439c357ff0d25e369687717ac (patch)
tree73f03bb436cf48878f6f494f781bb1205d53e349
parentb930ed51953bdccb6e4476a7b382936270373755 (diff)
downloadsamba-3538c8bea92da1a439c357ff0d25e369687717ac.tar.gz
samba-3538c8bea92da1a439c357ff0d25e369687717ac.tar.bz2
samba-3538c8bea92da1a439c357ff0d25e369687717ac.zip
Added mmap fix to pass lock test from HP.
Ok - now we're no longer trying to reach a silly 1k loc target, change the formatting to be *readable* - eg. change if (x) y else z to be : if (x) y else z and other compact sillyness. Oh look - when I did this I found some areas where we *WEREN'T CHECKING SYSTEM CALL ERROR RETURNS !!!!* CompSci 101 guys....... :-). Jeremy. (This used to be commit 38d2e6983a6be8232ae7ce21a950d69dd95ce3e6)
-rw-r--r--source3/tdb/tdb.c337
1 files changed, 222 insertions, 115 deletions
diff --git a/source3/tdb/tdb.c b/source3/tdb/tdb.c
index c84af49b31..5c116bcf7a 100644
--- a/source3/tdb/tdb.c
+++ b/source3/tdb/tdb.c
@@ -78,14 +78,21 @@ static TDB_CONTEXT *tdbs = NULL;
static void tdb_munmap(TDB_CONTEXT *tdb)
{
+ if (tdb->flags & TDB_INTERNAL)
+ return;
+
#ifdef HAVE_MMAP
- if (tdb->map_ptr) munmap(tdb->map_ptr, tdb->map_size);
+ if (tdb->map_ptr)
+ munmap(tdb->map_ptr, tdb->map_size);
#endif
tdb->map_ptr = NULL;
}
static void tdb_mmap(TDB_CONTEXT *tdb)
{
+ if (tdb->flags & TDB_INTERNAL)
+ return;
+
#ifdef HAVE_MMAP
if (!(tdb->flags & TDB_NOMMAP)) {
tdb->map_ptr = mmap(NULL, tdb->map_size,
@@ -111,7 +118,8 @@ static void tdb_mmap(TDB_CONTEXT *tdb)
static void *convert(void *buf, u32 size)
{
u32 i, *p = buf;
- for (i = 0; i < size / 4; i++) p[i] = TDB_BYTEREV(p[i]);
+ for (i = 0; i < size / 4; i++)
+ p[i] = TDB_BYTEREV(p[i]);
return buf;
}
#define DOCONV() (tdb->flags & TDB_CONVERT)
@@ -145,8 +153,10 @@ static int tdb_brlock(TDB_CONTEXT *tdb, tdb_off offset,
{
struct flock fl;
- if (tdb->flags & TDB_NOLOCK) return 0;
- if (tdb->read_only) return -1;
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+ if (tdb->read_only)
+ return -1;
fl.l_type = rw_type;
fl.l_whence = SEEK_SET;
@@ -172,7 +182,8 @@ static int tdb_lock(TDB_CONTEXT *tdb, int list, int ltype)
list, ltype));
return -1;
}
- if (tdb->flags & TDB_NOLOCK) return 0;
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
/* Since fcntl locks don't nest, we do a lock for the first one,
and simply bump the count for future ones */
@@ -197,16 +208,21 @@ static int tdb_lock(TDB_CONTEXT *tdb, int list, int ltype)
/* unlock the database: returns void because it's too late for errors. */
static void tdb_unlock(TDB_CONTEXT *tdb, int list, int ltype)
{
- if (tdb->flags & TDB_NOLOCK) return;
+ if (tdb->flags & TDB_NOLOCK)
+ return;
/* Sanity checks */
- if (list < -1 || list >= (int)tdb->header.hash_size) return;
- if (tdb->locked[list+1].count==0) return;
+ if (list < -1 || list >= (int)tdb->header.hash_size)
+ return;
+ if (tdb->locked[list+1].count==0)
+ return;
if (tdb->locked[list+1].count == 1) {
/* Down to last nested lock: unlock underneath */
- if (tdb->header.rwlocks) tdb_spinunlock(tdb, list, ltype);
- else tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, F_SETLKW, 0);
+ if (tdb->header.rwlocks)
+ tdb_spinunlock(tdb, list, ltype);
+ else
+ tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, F_SETLKW, 0);
}
tdb->locked[list+1].count--;
}
@@ -232,8 +248,10 @@ static u32 tdb_hash(TDB_DATA *key)
static int tdb_oob(TDB_CONTEXT *tdb, tdb_off len, int probe)
{
struct stat st;
- if (len <= tdb->map_size) return 0;
- if (tdb->flags & TDB_INTERNAL) return 0;
+ if (len <= tdb->map_size)
+ return 0;
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
if (fstat(tdb->fd, &st) == -1)
return TDB_ERRCODE(TDB_ERR_IO, -1);
@@ -256,9 +274,11 @@ static int tdb_oob(TDB_CONTEXT *tdb, tdb_off len, int probe)
/* write a lump of data at a specified offset */
static int tdb_write(TDB_CONTEXT *tdb, tdb_off off, void *buf, tdb_len len)
{
- if (tdb_oob(tdb, off + len, 0) != 0) return -1;
+ if (tdb_oob(tdb, off + len, 0) != 0)
+ return -1;
- if (tdb->map_ptr) memcpy(off + (char *)tdb->map_ptr, buf, len);
+ if (tdb->map_ptr)
+ memcpy(off + (char *)tdb->map_ptr, buf, len);
else if (lseek(tdb->fd, off, SEEK_SET) != off
|| write(tdb->fd, buf, len) != (ssize_t)len) {
TDB_LOG((tdb, 0,"tdb_write failed at %d len=%d (%s)\n",
@@ -271,16 +291,19 @@ static int tdb_write(TDB_CONTEXT *tdb, tdb_off off, void *buf, tdb_len len)
/* read a lump of data at a specified offset, maybe convert */
static int tdb_read(TDB_CONTEXT *tdb,tdb_off off,void *buf,tdb_len len,int cv)
{
- if (tdb_oob(tdb, off + len, 0) != 0) return -1;
+ if (tdb_oob(tdb, off + len, 0) != 0)
+ return -1;
- if (tdb->map_ptr) memcpy(buf, off + (char *)tdb->map_ptr, len);
+ if (tdb->map_ptr)
+ memcpy(buf, off + (char *)tdb->map_ptr, len);
else if (lseek(tdb->fd, off, SEEK_SET) != off
|| read(tdb->fd, buf, len) != (ssize_t)len) {
TDB_LOG((tdb, 0,"tdb_read failed at %d len=%d (%s)\n",
off, len, strerror(errno)));
return TDB_ERRCODE(TDB_ERR_IO, -1);
}
- if (cv) convert(buf, len);
+ if (cv)
+ convert(buf, len);
return 0;
}
@@ -315,7 +338,8 @@ static int ofs_write(TDB_CONTEXT *tdb, tdb_off offset, tdb_off *d)
/* read/write a record */
static int rec_read(TDB_CONTEXT *tdb, tdb_off offset, struct list_struct *rec)
{
- if (tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1) return -1;
+ if (tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
if (TDB_BAD_MAGIC(rec)) {
TDB_LOG((tdb, 0,"rec_read bad magic 0x%x at offset=%d\n", rec->magic, offset));
return TDB_ERRCODE(TDB_ERR_CORRUPT, -1);
@@ -331,13 +355,15 @@ static int rec_write(TDB_CONTEXT *tdb, tdb_off offset, struct list_struct *rec)
/* read a freelist record and check for simple errors */
static int rec_free_read(TDB_CONTEXT *tdb, tdb_off off, struct list_struct *rec)
{
- if (tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1) return -1;
+ if (tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
if (rec->magic != TDB_FREE_MAGIC) {
TDB_LOG((tdb, 0,"rec_free_read bad magic 0x%x at offset=%d\n",
rec->magic, off));
return TDB_ERRCODE(TDB_ERR_CORRUPT, -1);
}
- if (tdb_oob(tdb, rec->next+sizeof(*rec), 0) != 0) return -1;
+ if (tdb_oob(tdb, rec->next+sizeof(*rec), 0) != 0)
+ return -1;
return 0;
}
@@ -392,7 +418,8 @@ static void tdb_dump_chain(TDB_CONTEXT *tdb, int i)
return;
}
- if (rec_ptr) printf("hash=%d\n", i);
+ if (rec_ptr)
+ printf("hash=%d\n", i);
while (rec_ptr) {
rec_ptr = tdb_dump_record(tdb, rec_ptr);
@@ -476,7 +503,8 @@ static int tdb_free(TDB_CONTEXT *tdb, tdb_off offset, struct list_struct *rec)
tdb_off right, left;
/* Allocation and tailer lock */
- if (tdb_lock(tdb, -1, F_WRLCK) != 0) return -1;
+ if (tdb_lock(tdb, -1, F_WRLCK) != 0)
+ return -1;
/* set an initial tailer, so if we fail we don't leave a bogus record */
update_tailer(tdb, offset, rec);
@@ -582,7 +610,8 @@ static int expand_file(TDB_CONTEXT *tdb, tdb_off size, tdb_off addition)
/* now fill the file with something. This ensures that the file isn't sparse, which would be
very bad if we ran out of disk. This must be done with write, not via mmap */
memset(buf, 0x42, sizeof(buf));
- if (lseek(tdb->fd, size, SEEK_SET) != size) return -1;
+ if (lseek(tdb->fd, size, SEEK_SET) != size)
+ return -1;
while (addition) {
int n = addition>sizeof(buf)?sizeof(buf):addition;
int ret = write(tdb->fd, buf, n);
@@ -627,22 +656,24 @@ static int tdb_expand(TDB_CONTEXT *tdb, tdb_off size)
/* expand the file itself */
if (!(tdb->flags & TDB_INTERNAL)) {
- if (expand_file(tdb, tdb->map_size, size) != 0) goto fail;
+ if (expand_file(tdb, tdb->map_size, size) != 0)
+ goto fail;
}
tdb->map_size += size;
if (tdb->flags & TDB_INTERNAL)
tdb->map_ptr = realloc(tdb->map_ptr, tdb->map_size);
+ else {
+ /*
+ * We must ensure the file is remapped before adding the space
+ * to ensure consistency with systems like OpenBSD where
+ * writes and mmaps are not consistent.
+ */
- /*
- * We must ensure the file is remapped before adding the space
- * to ensure consistency with systems like OpenBSD where
- * writes and mmaps are not consistent.
- */
-
- /* We're ok if the mmap fails as we'll fallback to read/write */
- tdb_mmap(tdb);
+ /* We're ok if the mmap fails as we'll fallback to read/write */
+ tdb_mmap(tdb);
+ }
/* form a new freelist record */
memset(&rec,'\0',sizeof(rec));
@@ -650,7 +681,8 @@ static int tdb_expand(TDB_CONTEXT *tdb, tdb_off size)
/* link it into the free list */
offset = tdb->map_size - size;
- if (tdb_free(tdb, offset, &rec) == -1) goto fail;
+ if (tdb_free(tdb, offset, &rec) == -1)
+ goto fail;
tdb_unlock(tdb, -1, F_WRLCK);
return 0;
@@ -671,7 +703,8 @@ static tdb_off tdb_allocate(TDB_CONTEXT *tdb, tdb_len length,
tdb_off rec_ptr, last_ptr, newrec_ptr;
struct list_struct newrec;
- if (tdb_lock(tdb, -1, F_WRLCK) == -1) return 0;
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1)
+ return 0;
/* Extra bytes required for tailer */
length += sizeof(tdb_off);
@@ -680,11 +713,13 @@ static tdb_off tdb_allocate(TDB_CONTEXT *tdb, tdb_len length,
last_ptr = FREELIST_TOP;
/* read in the freelist top */
- if (ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) goto fail;
+ if (ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
+ goto fail;
/* keep looking until we find a freelist record big enough */
while (rec_ptr) {
- if (rec_free_read(tdb, rec_ptr, rec) == -1) goto fail;
+ if (rec_free_read(tdb, rec_ptr, rec) == -1)
+ goto fail;
if (rec->rec_len >= length) {
/* found it - now possibly split it up */
@@ -736,7 +771,8 @@ static tdb_off tdb_allocate(TDB_CONTEXT *tdb, tdb_len length,
}
/* we didn't find enough space. See if we can expand the
database and if we can then try again */
- if (tdb_expand(tdb, length + sizeof(*rec)) == 0) goto again;
+ if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
+ goto again;
fail:
tdb_unlock(tdb, -1, F_WRLCK);
return 0;
@@ -746,15 +782,16 @@ static tdb_off tdb_allocate(TDB_CONTEXT *tdb, tdb_len length,
static int tdb_new_database(TDB_CONTEXT *tdb, int hash_size)
{
struct tdb_header *newdb;
- int size, ret;
+ int size, ret = -1;
/* We make it up in memory, then write it out if not internal */
size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off);
- if (!(newdb = calloc(size, 1))) return TDB_ERRCODE(TDB_ERR_OOM, -1);
+ if (!(newdb = calloc(size, 1)))
+ return TDB_ERRCODE(TDB_ERR_OOM, -1);
- /* Fill in the header */
- newdb->version = TDB_VERSION;
- newdb->hash_size = hash_size;
+ /* Fill in the header */
+ newdb->version = TDB_VERSION;
+ newdb->hash_size = hash_size;
#ifdef USE_SPINLOCKS
newdb->rwlocks = size;
#endif
@@ -766,16 +803,23 @@ static int tdb_new_database(TDB_CONTEXT *tdb, int hash_size)
CONVERT(*newdb);
return 0;
}
- lseek(tdb->fd, 0, SEEK_SET);
- ftruncate(tdb->fd, 0);
+ if (lseek(tdb->fd, 0, SEEK_SET) == -1)
+ goto fail;
+
+ if (ftruncate(tdb->fd, 0) == -1)
+ goto fail;
+
/* This creates an endian-converted header, as if read from disk */
CONVERT(*newdb);
memcpy(&tdb->header, newdb, sizeof(tdb->header));
/* Don't endian-convert the magic food! */
memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1);
- if (write(tdb->fd, newdb, size) != size) ret = -1;
- else ret = tdb_create_rwlocks(tdb->fd, hash_size);
+ if (write(tdb->fd, newdb, size) != size)
+ ret = -1;
+ else
+ ret = tdb_create_rwlocks(tdb->fd, hash_size);
+ fail:
free(newdb);
return ret;
}
@@ -788,18 +832,21 @@ static tdb_off tdb_find(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash,
tdb_off rec_ptr;
/* read in the hash top */
- if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1) return 0;
+ if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ return 0;
/* keep looking until we find the right record */
while (rec_ptr) {
- if (rec_read(tdb, rec_ptr, r) == -1) return 0;
+ if (rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
if (!TDB_DEAD(r) && hash==r->full_hash && key.dsize==r->key_len) {
char *k;
/* a very likely hit - read the key */
k = tdb_alloc_read(tdb, rec_ptr + sizeof(*r),
r->key_len);
- if (!k) return 0;
+ if (!k)
+ return 0;
if (memcmp(key.dptr, k, key.dsize) == 0) {
free(k);
@@ -816,9 +863,11 @@ static tdb_off tdb_find(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash,
static int tdb_keylocked(TDB_CONTEXT *tdb, u32 hash)
{
u32 i;
- if (!tdb->lockedkeys) return 1;
+ if (!tdb->lockedkeys)
+ return 1;
for (i = 0; i < tdb->lockedkeys[0]; i++)
- if (tdb->lockedkeys[i+1] == hash) return 1;
+ if (tdb->lockedkeys[i+1] == hash)
+ return 1;
return TDB_ERRCODE(TDB_ERR_NOLOCK, 0);
}
@@ -829,8 +878,10 @@ static tdb_off tdb_find_lock(TDB_CONTEXT *tdb, TDB_DATA key, int locktype,
u32 hash, rec_ptr;
hash = tdb_hash(&key);
- if (!tdb_keylocked(tdb, hash)) return 0;
- if (tdb_lock(tdb, BUCKET(hash), locktype) == -1) return 0;
+ if (!tdb_keylocked(tdb, hash))
+ return 0;
+ if (tdb_lock(tdb, BUCKET(hash), locktype) == -1)
+ return 0;
if (!(rec_ptr = tdb_find(tdb, key, hash, rec)))
tdb_unlock(tdb, BUCKET(hash), locktype);
return rec_ptr;
@@ -857,7 +908,8 @@ const char *tdb_errorstr(TDB_CONTEXT *tdb)
{
u32 i;
for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++)
- if (tdb->ecode == emap[i].ecode) return emap[i].estring;
+ if (tdb->ecode == emap[i].ecode)
+ return emap[i].estring;
return "Invalid error code";
}
@@ -872,7 +924,8 @@ static int tdb_update(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf)
int ret = -1;
/* find entry */
- if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec))) return -1;
+ if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec)))
+ return -1;
/* must be long enough key, data and tailer */
if (rec.rec_len < key.dsize + dbuf.dsize + sizeof(tdb_off)) {
@@ -888,8 +941,8 @@ static int tdb_update(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf)
/* update size */
rec.data_len = dbuf.dsize;
ret = rec_write(tdb, rec_ptr, &rec);
- }
- else ret = 0;
+ } else
+ ret = 0;
out:
tdb_unlock(tdb, BUCKET(rec.full_hash), F_WRLCK);
return ret;
@@ -903,7 +956,8 @@ TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key)
TDB_DATA ret;
/* find which hash bucket it is in */
- if (!(rec_ptr = tdb_find_lock(tdb,key,F_RDLCK,&rec))) return tdb_null;
+ if (!(rec_ptr = tdb_find_lock(tdb,key,F_RDLCK,&rec)))
+ return tdb_null;
ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len,
rec.data_len);
@@ -922,7 +976,8 @@ int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key)
{
struct list_struct rec;
- if (tdb_find_lock(tdb, key, F_RDLCK, &rec) == 0) return 0;
+ if (tdb_find_lock(tdb, key, F_RDLCK, &rec) == 0)
+ return 0;
tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
return 1;
}
@@ -936,7 +991,9 @@ static int lock_record(TDB_CONTEXT *tdb, tdb_off off)
static int write_lock_record(TDB_CONTEXT *tdb, tdb_off off)
{
struct tdb_traverse_lock *i;
- for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) return -1;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ return -1;
return tdb_brlock(tdb, off, F_WRLCK, F_SETLK, 1);
}
static int write_unlock_record(TDB_CONTEXT *tdb, tdb_off off)
@@ -949,8 +1006,11 @@ static int unlock_record(TDB_CONTEXT *tdb, tdb_off off)
struct tdb_traverse_lock *i;
u32 count = 0;
- if (off == 0) return 0;
- for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) count++;
+ if (off == 0)
+ return 0;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ count++;
return (count == 1 ? tdb_brlock(tdb, off, F_UNLCK, F_SETLKW, 0) : 0);
}
@@ -970,16 +1030,21 @@ static int do_delete(TDB_CONTEXT *tdb, tdb_off rec_ptr, struct list_struct*rec)
write_unlock_record(tdb, rec_ptr);
/* find previous record in hash chain */
- if (ofs_read(tdb, TDB_HASH_TOP(rec->full_hash), &i) == -1) return -1;
+ if (ofs_read(tdb, TDB_HASH_TOP(rec->full_hash), &i) == -1)
+ return -1;
for (last_ptr = 0; i != rec_ptr; last_ptr = i, i = lastrec.next)
- if (rec_read(tdb, i, &lastrec) == -1) return -1;
+ if (rec_read(tdb, i, &lastrec) == -1)
+ return -1;
/* unlink it: next ptr is at start of record. */
- if (last_ptr == 0) last_ptr = TDB_HASH_TOP(rec->full_hash);
- if (ofs_write(tdb, last_ptr, &rec->next) == -1) return -1;
+ if (last_ptr == 0)
+ last_ptr = TDB_HASH_TOP(rec->full_hash);
+ if (ofs_write(tdb, last_ptr, &rec->next) == -1)
+ return -1;
/* recover the space */
- if (tdb_free(tdb, rec_ptr, rec) == -1) return -1;
+ if (tdb_free(tdb, rec_ptr, rec) == -1)
+ return -1;
return 0;
}
@@ -990,11 +1055,13 @@ static int tdb_next_lock(TDB_CONTEXT *tdb, struct tdb_traverse_lock *tlock,
int want_next = (tlock->off != 0);
/* No traversal allows if you've called tdb_lockkeys() */
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
/* Lock each chain from the start one. */
for (; tlock->hash < tdb->header.hash_size; tlock->hash++) {
- if (tdb_lock(tdb, tlock->hash, F_WRLCK) == -1) return -1;
+ if (tdb_lock(tdb, tlock->hash, F_WRLCK) == -1)
+ return -1;
/* No previous record? Start at top of chain. */
if (!tlock->off) {
@@ -1008,14 +1075,16 @@ static int tdb_next_lock(TDB_CONTEXT *tdb, struct tdb_traverse_lock *tlock,
if (want_next) {
/* We have offset of old record: grab next */
- if (rec_read(tdb, tlock->off, rec) == -1) goto fail;
+ if (rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
tlock->off = rec->next;
}
/* Iterate through chain */
while( tlock->off) {
tdb_off current;
- if (rec_read(tdb, tlock->off, rec) == -1) goto fail;
+ if (rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
if (!TDB_DEAD(rec)) {
/* Woohoo: we found one! */
lock_record(tdb, tlock->off);
@@ -1102,7 +1171,8 @@ TDB_DATA tdb_firstkey(TDB_CONTEXT *tdb)
unlock_record(tdb, tdb->travlocks.off);
tdb->travlocks.off = tdb->travlocks.hash = 0;
- if (tdb_next_lock(tdb, &tdb->travlocks, &rec) <= 0) return tdb_null;
+ if (tdb_next_lock(tdb, &tdb->travlocks, &rec) <= 0)
+ return tdb_null;
/* now read the key */
key.dsize = rec.key_len;
key.dptr =tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),key.dsize);
@@ -1120,7 +1190,8 @@ TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA oldkey)
/* Is locked key the old key? If so, traverse will be reliable. */
if (tdb->travlocks.off) {
- if (tdb_lock(tdb,tdb->travlocks.hash,F_WRLCK)) return tdb_null;
+ if (tdb_lock(tdb,tdb->travlocks.hash,F_WRLCK))
+ return tdb_null;
if (rec_read(tdb, tdb->travlocks.off, &rec) == -1
|| !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),
rec.key_len))
@@ -1138,7 +1209,8 @@ TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA oldkey)
if (!tdb->travlocks.off) {
/* No previous element: do normal find, and lock record */
tdb->travlocks.off = tdb_find_lock(tdb, oldkey, F_WRLCK, &rec);
- if (!tdb->travlocks.off) return tdb_null;
+ if (!tdb->travlocks.off)
+ return tdb_null;
tdb->travlocks.hash = BUCKET(rec.full_hash);
lock_record(tdb, tdb->travlocks.off);
}
@@ -1165,7 +1237,8 @@ int tdb_delete(TDB_CONTEXT *tdb, TDB_DATA key)
struct list_struct rec;
int ret;
- if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec))) return -1;
+ if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec)))
+ return -1;
ret = do_delete(tdb, rec_ptr, &rec);
tdb_unlock(tdb, BUCKET(rec.full_hash), F_WRLCK);
return ret;
@@ -1186,8 +1259,10 @@ int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
/* find which hash bucket it is in */
hash = tdb_hash(&key);
- if (!tdb_keylocked(tdb, hash)) return -1;
- if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1;
+ if (!tdb_keylocked(tdb, hash))
+ return -1;
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
/* check for it existing, on insert. */
if (flag == TDB_INSERT) {
@@ -1197,7 +1272,8 @@ int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
}
} else {
/* first try in-place update, on modify or replace. */
- if (tdb_update(tdb, key, dbuf) == 0) goto out;
+ if (tdb_update(tdb, key, dbuf) == 0)
+ goto out;
if (flag == TDB_MODIFY && tdb->ecode == TDB_ERR_NOEXIST)
goto fail;
}
@@ -1207,7 +1283,8 @@ int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
/* delete any existing record - if it doesn't exist we don't
care. Doing this first reduces fragmentation, and avoids
coalescing with `allocated' block before it's updated. */
- if (flag != TDB_INSERT) tdb_delete(tdb, key);
+ if (flag != TDB_INSERT)
+ tdb_delete(tdb, key);
/* Copy key+value *before* allocating free space in case malloc
fails and we are left with a dead spot in the tdb. */
@@ -1227,7 +1304,8 @@ int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
goto fail;
/* Read hash top into next ptr */
- if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1) goto fail;
+ if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1)
+ goto fail;
rec.key_len = key.dsize;
rec.data_len = dbuf.dsize;
@@ -1270,8 +1348,10 @@ TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags,
tdb.lockedkeys = NULL;
tdb.flags = tdb_flags;
- if ((open_flags & O_ACCMODE) == O_WRONLY) goto fail;
- if (hash_size == 0) hash_size = DEFAULT_HASH_SIZE;
+ if ((open_flags & O_ACCMODE) == O_WRONLY)
+ goto fail;
+ if (hash_size == 0)
+ hash_size = DEFAULT_HASH_SIZE;
if ((open_flags & O_ACCMODE) == O_RDONLY) {
tdb.read_only = 1;
/* read only databases don't do locking */
@@ -1286,16 +1366,19 @@ TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags,
goto internal;
}
- if ((tdb.fd = open(name, open_flags, mode)) == -1) goto fail;
+ if ((tdb.fd = open(name, open_flags, mode)) == -1)
+ goto fail;
/* ensure there is only one process initialising at once */
- tdb_brlock(&tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0);
-
+ if (tdb_brlock(&tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0) == -1)
+ goto fail;
+
/* we need to zero database if we are the only one with it open */
if ((locked = (tdb_brlock(&tdb, ACTIVE_LOCK, F_WRLCK, F_SETLK, 0) == 0))
&& (tdb_flags & TDB_CLEAR_IF_FIRST)) {
open_flags |= O_CREAT;
- ftruncate(tdb.fd, 0);
+ if (ftruncate(tdb.fd, 0) == -1)
+ goto fail;
}
if (read(tdb.fd, &tdb.header, sizeof(tdb.header)) != sizeof(tdb.header)
@@ -1303,16 +1386,19 @@ TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags,
|| (tdb.header.version != TDB_VERSION
&& !(rev = (tdb.header.version==TDB_BYTEREV(TDB_VERSION))))) {
/* its not a valid database - possibly initialise it */
- if (!(open_flags & O_CREAT)
- || tdb_new_database(&tdb, hash_size) == -1) goto fail;
+ if (!(open_flags & O_CREAT) || tdb_new_database(&tdb, hash_size) == -1)
+ goto fail;
rev = (tdb.flags & TDB_CONVERT);
}
- if (!rev) tdb.flags &= ~TDB_CONVERT;
+ if (!rev)
+ tdb.flags &= ~TDB_CONVERT;
else {
tdb.flags |= TDB_CONVERT;
convert(&tdb.header, sizeof(tdb.header));
}
- fstat(tdb.fd, &st);
+ if (fstat(tdb.fd, &st) == -1)
+ goto fail;
+
/* Is it already in the open list? If so, fail. */
for (i = tdbs; i; i = i->next) {
if (i->device == st.st_dev && i->inode == st.st_ino) {
@@ -1324,31 +1410,47 @@ TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags,
/* map the database and fill in the return structure */
tdb.name = (char *)strdup(name);
+ if (!tdb.name)
+ goto fail;
tdb.map_size = st.st_size;
tdb.device = st.st_dev;
tdb.inode = st.st_ino;
- tdb.locked = calloc(tdb.header.hash_size+1, sizeof(tdb.locked[0]));
- if (!tdb.locked) goto fail;
+ tdb.locked = calloc(tdb.header.hash_size+1, sizeof(tdb.locked[0]));
+ if (!tdb.locked)
+ goto fail;
tdb_mmap(&tdb);
if (locked) {
tdb_clear_spinlocks(&tdb);
- tdb_brlock(&tdb, ACTIVE_LOCK, F_UNLCK, F_SETLK, 0);
+ if (tdb_brlock(&tdb, ACTIVE_LOCK, F_UNLCK, F_SETLK, 0) == -1)
+ goto fail;
}
/* leave this lock in place to indicate it's in use */
- tdb_brlock(&tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0);
+ if (tdb_brlock(&tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0) == -1)
+ goto fail;
internal:
- if (!(ret = malloc(sizeof(tdb)))) goto fail;
+ if (!(ret = malloc(sizeof(tdb))))
+ goto fail;
*ret = tdb;
- tdb_brlock(&tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0);
+ if (tdb_brlock(&tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0) == -1)
+ goto fail;
ret->next = tdbs;
tdbs = ret;
return ret;
fail:
- if (tdb.name) free(tdb.name);
- if (tdb.fd != -1) close(tdb.fd);
- tdb_munmap(&tdb);
+ if (tdb.map_ptr) {
+ if (tdb.flags & TDB_INTERNAL)
+ free(tdb.map_ptr);
+ else
+ tdb_munmap(&tdb);
+ }
+ if (tdb.name)
+ free(tdb.name);
+ if (tdb.fd != -1)
+ close(tdb.fd);
+ if (tdb.locked)
+ free(tdb.locked);
return NULL;
}
@@ -1359,15 +1461,19 @@ int tdb_close(TDB_CONTEXT *tdb)
int ret = 0;
if (tdb->map_ptr) {
- if (tdb->flags & TDB_INTERNAL) free(tdb->map_ptr);
- else tdb_munmap(tdb);
+ if (tdb->flags & TDB_INTERNAL)
+ free(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
}
- if (tdb->name) free(tdb->name);
- if (tdb->fd != -1) {
+ if (tdb->name)
+ free(tdb->name);
+ if (tdb->fd != -1)
ret = close(tdb->fd);
- }
- if (tdb->locked) free(tdb->locked);
- if (tdb->lockedkeys) free(tdb->lockedkeys);
+ if (tdb->locked)
+ free(tdb->locked);
+ if (tdb->lockedkeys)
+ free(tdb->lockedkeys);
/* Remove from contexts list */
for (i = &tdbs; *i; i = &(*i)->next) {
@@ -1389,8 +1495,10 @@ int tdb_lockall(TDB_CONTEXT *tdb)
u32 i;
/* There are no locks on read-only dbs */
- if (tdb->read_only) return TDB_ERRCODE(TDB_ERR_LOCK, -1);
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->read_only)
+ return TDB_ERRCODE(TDB_ERR_LOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
for (i = 0; i < tdb->header.hash_size; i++)
if (tdb_lock(tdb, i, F_WRLCK))
break;
@@ -1409,7 +1517,8 @@ int tdb_lockall(TDB_CONTEXT *tdb)
void tdb_unlockall(TDB_CONTEXT *tdb)
{
u32 i;
- for (i=0; i < tdb->header.hash_size; i++) tdb_unlock(tdb, i, F_WRLCK);
+ for (i=0; i < tdb->header.hash_size; i++)
+ tdb_unlock(tdb, i, F_WRLCK);
}
int tdb_lockkeys(TDB_CONTEXT *tdb, u32 number, TDB_DATA keys[])
@@ -1417,7 +1526,8 @@ int tdb_lockkeys(TDB_CONTEXT *tdb, u32 number, TDB_DATA keys[])
u32 i, j, hash;
/* Can't lock more keys if already locked */
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
if (!(tdb->lockedkeys = malloc(sizeof(u32) * (number+1))))
return TDB_ERRCODE(TDB_ERR_OOM, -1);
/* First number in array is # keys */
@@ -1426,11 +1536,8 @@ int tdb_lockkeys(TDB_CONTEXT *tdb, u32 number, TDB_DATA keys[])
/* Insertion sort by bucket */
for (i = 0; i < number; i++) {
hash = tdb_hash(&keys[i]);
- for (j = 0;
- j < i && BUCKET(tdb->lockedkeys[j+1]) < BUCKET(hash);
- j++);
- memmove(&tdb->lockedkeys[j+2], &tdb->lockedkeys[j+1],
- sizeof(u32) * (i-j));
+ for (j = 0; j < i && BUCKET(tdb->lockedkeys[j+1]) < BUCKET(hash); j++);
+ memmove(&tdb->lockedkeys[j+2], &tdb->lockedkeys[j+1], sizeof(u32) * (i-j));
tdb->lockedkeys[j+1] = hash;
}
/* Finally, lock in order */