From 4a245a15803e3df93cdb197adfc1c1046c3a8716 Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher Date: Mon, 16 Apr 2007 08:20:13 +0000 Subject: r22249: move tdb code to lib/tdb/ as in samba4 metze (This used to be commit c7def92a9c2e17c90061ef17bc14e36250574e85) --- source3/lib/tdb/common/transaction.c | 1053 ++++++++++++++++++++++++++++++++++ 1 file changed, 1053 insertions(+) create mode 100644 source3/lib/tdb/common/transaction.c (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c new file mode 100644 index 0000000000..640cd5014d --- /dev/null +++ b/source3/lib/tdb/common/transaction.c @@ -0,0 +1,1053 @@ + /* + Unix SMB/CIFS implementation. + + trivial database library + + Copyright (C) Andrew Tridgell 2005 + + ** NOTE! The following LGPL license applies to the tdb + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include "tdb_private.h" + +/* + transaction design: + + - only allow a single transaction at a time per database. This makes + using the transaction API simpler, as otherwise the caller would + have to cope with temporary failures in transactions that conflict + with other current transactions + + - keep the transaction recovery information in the same file as the + database, using a special 'transaction recovery' record pointed at + by the header. This removes the need for extra journal files as + used by some other databases + + - dynamically allocated the transaction recover record, re-using it + for subsequent transactions. If a larger record is needed then + tdb_free() the old record to place it on the normal tdb freelist + before allocating the new record + + - during transactions, keep a linked list of writes all that have + been performed by intercepting all tdb_write() calls. The hooked + transaction versions of tdb_read() and tdb_write() check this + linked list and try to use the elements of the list in preference + to the real database. + + - don't allow any locks to be held when a transaction starts, + otherwise we can end up with deadlock (plus lack of lock nesting + in posix locks would mean the lock is lost) + + - if the caller gains a lock during the transaction but doesn't + release it then fail the commit + + - allow for nested calls to tdb_transaction_start(), re-using the + existing transaction record. If the inner transaction is cancelled + then a subsequent commit will fail + + - keep a mirrored copy of the tdb hash chain heads to allow for the + fast hash heads scan on traverse, updating the mirrored copy in + the transaction version of tdb_write + + - allow callers to mix transaction and non-transaction use of tdb, + although once a transaction is started then an exclusive lock is + gained until the transaction is committed or cancelled + + - the commit stategy involves first saving away all modified data + into a linearised buffer in the transaction recovery area, then + marking the transaction recovery area with a magic value to + indicate a valid recovery record. In total 4 fsync/msync calls are + needed per commit to prevent race conditions. It might be possible + to reduce this to 3 or even 2 with some more work. + + - check for a valid recovery record on open of the tdb, while the + global lock is held. Automatically recover from the transaction + recovery area if needed, then continue with the open as + usual. This allows for smooth crash recovery with no administrator + intervention. + + - if TDB_NOSYNC is passed to flags in tdb_open then transactions are + still available, but no transaction recovery area is used and no + fsync/msync calls are made. + +*/ + +struct tdb_transaction_el { + struct tdb_transaction_el *next, *prev; + tdb_off_t offset; + tdb_len_t length; + unsigned char *data; +}; + +/* + hold the context of any current transaction +*/ +struct tdb_transaction { + /* we keep a mirrored copy of the tdb hash heads here so + tdb_next_hash_chain() can operate efficiently */ + u32 *hash_heads; + + /* the original io methods - used to do IOs to the real db */ + const struct tdb_methods *io_methods; + + /* the list of transaction elements. We use a doubly linked + list with a last pointer to allow us to keep the list + ordered, with first element at the front of the list. It + needs to be doubly linked as the read/write traversals need + to be backwards, while the commit needs to be forwards */ + struct tdb_transaction_el *elements, *elements_last; + + /* non-zero when an internal transaction error has + occurred. All write operations will then fail until the + transaction is ended */ + int transaction_error; + + /* when inside a transaction we need to keep track of any + nested tdb_transaction_start() calls, as these are allowed, + but don't create a new transaction */ + int nesting; + + /* old file size before transaction */ + tdb_len_t old_map_size; +}; + + +/* + read while in a transaction. We need to check first if the data is in our list + of transaction elements, then if not do a real read +*/ +static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf, + tdb_len_t len, int cv) +{ + struct tdb_transaction_el *el; + + /* we need to walk the list backwards to get the most recent data */ + for (el=tdb->transaction->elements_last;el;el=el->prev) { + tdb_len_t partial; + + if (off+len <= el->offset) { + continue; + } + if (off >= el->offset + el->length) { + continue; + } + + /* an overlapping read - needs to be split into up to + 2 reads and a memcpy */ + if (off < el->offset) { + partial = el->offset - off; + if (transaction_read(tdb, off, buf, partial, cv) != 0) { + goto fail; + } + len -= partial; + off += partial; + buf = (void *)(partial + (char *)buf); + } + if (off + len <= el->offset + el->length) { + partial = len; + } else { + partial = el->offset + el->length - off; + } + memcpy(buf, el->data + (off - el->offset), partial); + if (cv) { + tdb_convert(buf, len); + } + len -= partial; + off += partial; + buf = (void *)(partial + (char *)buf); + + if (len != 0 && transaction_read(tdb, off, buf, len, cv) != 0) { + goto fail; + } + + return 0; + } + + /* its not in the transaction elements - do a real read */ + return tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv); + +fail: + TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%d len=%d\n", off, len)); + tdb->ecode = TDB_ERR_IO; + tdb->transaction->transaction_error = 1; + return -1; +} + + +/* + write while in a transaction +*/ +static int transaction_write(struct tdb_context *tdb, tdb_off_t off, + const void *buf, tdb_len_t len) +{ + struct tdb_transaction_el *el, *best_el=NULL; + + if (len == 0) { + return 0; + } + + /* if the write is to a hash head, then update the transaction + hash heads */ + if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP && + off < FREELIST_TOP+TDB_HASHTABLE_SIZE(tdb)) { + u32 chain = (off-FREELIST_TOP) / sizeof(tdb_off_t); + memcpy(&tdb->transaction->hash_heads[chain], buf, len); + } + + /* first see if we can replace an existing entry */ + for (el=tdb->transaction->elements_last;el;el=el->prev) { + tdb_len_t partial; + + if (best_el == NULL && off == el->offset+el->length) { + best_el = el; + } + + if (off+len <= el->offset) { + continue; + } + if (off >= el->offset + el->length) { + continue; + } + + /* an overlapping write - needs to be split into up to + 2 writes and a memcpy */ + if (off < el->offset) { + partial = el->offset - off; + if (transaction_write(tdb, off, buf, partial) != 0) { + goto fail; + } + len -= partial; + off += partial; + buf = (const void *)(partial + (const char *)buf); + } + if (off + len <= el->offset + el->length) { + partial = len; + } else { + partial = el->offset + el->length - off; + } + memcpy(el->data + (off - el->offset), buf, partial); + len -= partial; + off += partial; + buf = (const void *)(partial + (const char *)buf); + + if (len != 0 && transaction_write(tdb, off, buf, len) != 0) { + goto fail; + } + + return 0; + } + + /* see if we can append the new entry to an existing entry */ + if (best_el && best_el->offset + best_el->length == off && + (off+len < tdb->transaction->old_map_size || + off > tdb->transaction->old_map_size)) { + unsigned char *data = best_el->data; + el = best_el; + el->data = (unsigned char *)realloc(el->data, + el->length + len); + if (el->data == NULL) { + tdb->ecode = TDB_ERR_OOM; + tdb->transaction->transaction_error = 1; + el->data = data; + return -1; + } + if (buf) { + memcpy(el->data + el->length, buf, len); + } else { + memset(el->data + el->length, TDB_PAD_BYTE, len); + } + el->length += len; + return 0; + } + + /* add a new entry at the end of the list */ + el = (struct tdb_transaction_el *)malloc(sizeof(*el)); + if (el == NULL) { + tdb->ecode = TDB_ERR_OOM; + tdb->transaction->transaction_error = 1; + return -1; + } + el->next = NULL; + el->prev = tdb->transaction->elements_last; + el->offset = off; + el->length = len; + el->data = (unsigned char *)malloc(len); + if (el->data == NULL) { + free(el); + tdb->ecode = TDB_ERR_OOM; + tdb->transaction->transaction_error = 1; + return -1; + } + if (buf) { + memcpy(el->data, buf, len); + } else { + memset(el->data, TDB_PAD_BYTE, len); + } + if (el->prev) { + el->prev->next = el; + } else { + tdb->transaction->elements = el; + } + tdb->transaction->elements_last = el; + return 0; + +fail: + TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", off, len)); + tdb->ecode = TDB_ERR_IO; + tdb->transaction->transaction_error = 1; + return -1; +} + +/* + accelerated hash chain head search, using the cached hash heads +*/ +static void transaction_next_hash_chain(struct tdb_context *tdb, u32 *chain) +{ + u32 h = *chain; + for (;h < tdb->header.hash_size;h++) { + /* the +1 takes account of the freelist */ + if (0 != tdb->transaction->hash_heads[h+1]) { + break; + } + } + (*chain) = h; +} + +/* + out of bounds check during a transaction +*/ +static int transaction_oob(struct tdb_context *tdb, tdb_off_t len, int probe) +{ + if (len <= tdb->map_size) { + return 0; + } + return TDB_ERRCODE(TDB_ERR_IO, -1); +} + +/* + transaction version of tdb_expand(). +*/ +static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size, + tdb_off_t addition) +{ + /* add a write to the transaction elements, so subsequent + reads see the zero data */ + if (transaction_write(tdb, size, NULL, addition) != 0) { + return -1; + } + + return 0; +} + +/* + brlock during a transaction - ignore them +*/ +int transaction_brlock(struct tdb_context *tdb, tdb_off_t offset, + int rw_type, int lck_type, int probe, size_t len) +{ + return 0; +} + +static const struct tdb_methods transaction_methods = { + transaction_read, + transaction_write, + transaction_next_hash_chain, + transaction_oob, + transaction_expand_file, + transaction_brlock +}; + + +/* + start a tdb transaction. No token is returned, as only a single + transaction is allowed to be pending per tdb_context +*/ +int tdb_transaction_start(struct tdb_context *tdb) +{ + /* some sanity checks */ + if (tdb->read_only || (tdb->flags & TDB_INTERNAL) || tdb->traverse_read) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction on a read-only or internal db\n")); + tdb->ecode = TDB_ERR_EINVAL; + return -1; + } + + /* cope with nested tdb_transaction_start() calls */ + if (tdb->transaction != NULL) { + tdb->transaction->nesting++; + TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_start: nesting %d\n", + tdb->transaction->nesting)); + return 0; + } + + if (tdb->num_locks != 0 || tdb->global_lock.count) { + /* the caller must not have any locks when starting a + transaction as otherwise we'll be screwed by lack + of nested locks in posix */ + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction with locks held\n")); + tdb->ecode = TDB_ERR_LOCK; + return -1; + } + + if (tdb->travlocks.next != NULL) { + /* you cannot use transactions inside a traverse (although you can use + traverse inside a transaction) as otherwise you can end up with + deadlock */ + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction within a traverse\n")); + tdb->ecode = TDB_ERR_LOCK; + return -1; + } + + tdb->transaction = (struct tdb_transaction *) + calloc(sizeof(struct tdb_transaction), 1); + if (tdb->transaction == NULL) { + tdb->ecode = TDB_ERR_OOM; + return -1; + } + + /* get the transaction write lock. This is a blocking lock. As + discussed with Volker, there are a number of ways we could + make this async, which we will probably do in the future */ + if (tdb_brlock(tdb, TRANSACTION_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get transaction lock\n")); + tdb->ecode = TDB_ERR_LOCK; + SAFE_FREE(tdb->transaction); + return -1; + } + + /* get a read lock from the freelist to the end of file. This + is upgraded to a write lock during the commit */ + if (tdb_brlock(tdb, FREELIST_TOP, F_RDLCK, F_SETLKW, 0, 0) == -1) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get hash locks\n")); + tdb->ecode = TDB_ERR_LOCK; + goto fail; + } + + /* setup a copy of the hash table heads so the hash scan in + traverse can be fast */ + tdb->transaction->hash_heads = (u32 *) + calloc(tdb->header.hash_size+1, sizeof(u32)); + if (tdb->transaction->hash_heads == NULL) { + tdb->ecode = TDB_ERR_OOM; + goto fail; + } + if (tdb->methods->tdb_read(tdb, FREELIST_TOP, tdb->transaction->hash_heads, + TDB_HASHTABLE_SIZE(tdb), 0) != 0) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to read hash heads\n")); + tdb->ecode = TDB_ERR_IO; + goto fail; + } + + /* make sure we know about any file expansions already done by + anyone else */ + tdb->methods->tdb_oob(tdb, tdb->map_size + 1, 1); + tdb->transaction->old_map_size = tdb->map_size; + + /* finally hook the io methods, replacing them with + transaction specific methods */ + tdb->transaction->io_methods = tdb->methods; + tdb->methods = &transaction_methods; + + /* by calling this transaction write here, we ensure that we don't grow the + transaction linked list due to hash table updates */ + if (transaction_write(tdb, FREELIST_TOP, tdb->transaction->hash_heads, + TDB_HASHTABLE_SIZE(tdb)) != 0) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to prime hash table\n")); + tdb->ecode = TDB_ERR_IO; + goto fail; + } + + return 0; + +fail: + tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0); + tdb_brlock(tdb, TRANSACTION_LOCK, F_UNLCK, F_SETLKW, 0, 1); + SAFE_FREE(tdb->transaction->hash_heads); + SAFE_FREE(tdb->transaction); + return -1; +} + + +/* + cancel the current transaction +*/ +int tdb_transaction_cancel(struct tdb_context *tdb) +{ + if (tdb->transaction == NULL) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n")); + return -1; + } + + if (tdb->transaction->nesting != 0) { + tdb->transaction->transaction_error = 1; + tdb->transaction->nesting--; + return 0; + } + + tdb->map_size = tdb->transaction->old_map_size; + + /* free all the transaction elements */ + while (tdb->transaction->elements) { + struct tdb_transaction_el *el = tdb->transaction->elements; + tdb->transaction->elements = el->next; + free(el->data); + free(el); + } + + /* remove any global lock created during the transaction */ + if (tdb->global_lock.count != 0) { + tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 4*tdb->header.hash_size); + tdb->global_lock.count = 0; + } + + /* remove any locks created during the transaction */ + if (tdb->num_locks != 0) { + int i; + for (i=0;inum_lockrecs;i++) { + tdb_brlock(tdb,FREELIST_TOP+4*tdb->lockrecs[i].list, + F_UNLCK,F_SETLKW, 0, 1); + } + tdb->num_locks = 0; + tdb->num_lockrecs = 0; + SAFE_FREE(tdb->lockrecs); + } + + /* restore the normal io methods */ + tdb->methods = tdb->transaction->io_methods; + + tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0); + tdb_brlock(tdb, TRANSACTION_LOCK, F_UNLCK, F_SETLKW, 0, 1); + SAFE_FREE(tdb->transaction->hash_heads); + SAFE_FREE(tdb->transaction); + + return 0; +} + +/* + sync to disk +*/ +static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t length) +{ + if (fsync(tdb->fd) != 0) { + tdb->ecode = TDB_ERR_IO; + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n")); + return -1; + } +#ifdef MS_SYNC + if (tdb->map_ptr) { + tdb_off_t moffset = offset & ~(tdb->page_size-1); + if (msync(moffset + (char *)tdb->map_ptr, + length + (offset - moffset), MS_SYNC) != 0) { + tdb->ecode = TDB_ERR_IO; + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: msync failed - %s\n", + strerror(errno))); + return -1; + } + } +#endif + return 0; +} + + +/* + work out how much space the linearised recovery data will consume +*/ +static tdb_len_t tdb_recovery_size(struct tdb_context *tdb) +{ + struct tdb_transaction_el *el; + tdb_len_t recovery_size = 0; + + recovery_size = sizeof(u32); + for (el=tdb->transaction->elements;el;el=el->next) { + if (el->offset >= tdb->transaction->old_map_size) { + continue; + } + recovery_size += 2*sizeof(tdb_off_t) + el->length; + } + + return recovery_size; +} + +/* + allocate the recovery area, or use an existing recovery area if it is + large enough +*/ +static int tdb_recovery_allocate(struct tdb_context *tdb, + tdb_len_t *recovery_size, + tdb_off_t *recovery_offset, + tdb_len_t *recovery_max_size) +{ + struct list_struct rec; + const struct tdb_methods *methods = tdb->transaction->io_methods; + tdb_off_t recovery_head; + + if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery head\n")); + return -1; + } + + rec.rec_len = 0; + + if (recovery_head != 0 && + methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery record\n")); + return -1; + } + + *recovery_size = tdb_recovery_size(tdb); + + if (recovery_head != 0 && *recovery_size <= rec.rec_len) { + /* it fits in the existing area */ + *recovery_max_size = rec.rec_len; + *recovery_offset = recovery_head; + return 0; + } + + /* we need to free up the old recovery area, then allocate a + new one at the end of the file. Note that we cannot use + tdb_allocate() to allocate the new one as that might return + us an area that is being currently used (as of the start of + the transaction) */ + if (recovery_head != 0) { + if (tdb_free(tdb, recovery_head, &rec) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to free previous recovery area\n")); + return -1; + } + } + + /* the tdb_free() call might have increased the recovery size */ + *recovery_size = tdb_recovery_size(tdb); + + /* round up to a multiple of page size */ + *recovery_max_size = TDB_ALIGN(sizeof(rec) + *recovery_size, tdb->page_size) - sizeof(rec); + *recovery_offset = tdb->map_size; + recovery_head = *recovery_offset; + + if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size, + (tdb->map_size - tdb->transaction->old_map_size) + + sizeof(rec) + *recovery_max_size) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to create recovery area\n")); + return -1; + } + + /* remap the file (if using mmap) */ + methods->tdb_oob(tdb, tdb->map_size + 1, 1); + + /* we have to reset the old map size so that we don't try to expand the file + again in the transaction commit, which would destroy the recovery area */ + tdb->transaction->old_map_size = tdb->map_size; + + /* write the recovery header offset and sync - we can sync without a race here + as the magic ptr in the recovery record has not been set */ + CONVERT(recovery_head); + if (methods->tdb_write(tdb, TDB_RECOVERY_HEAD, + &recovery_head, sizeof(tdb_off_t)) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n")); + return -1; + } + + return 0; +} + + +/* + setup the recovery data that will be used on a crash during commit +*/ +static int transaction_setup_recovery(struct tdb_context *tdb, + tdb_off_t *magic_offset) +{ + struct tdb_transaction_el *el; + tdb_len_t recovery_size; + unsigned char *data, *p; + const struct tdb_methods *methods = tdb->transaction->io_methods; + struct list_struct *rec; + tdb_off_t recovery_offset, recovery_max_size; + tdb_off_t old_map_size = tdb->transaction->old_map_size; + u32 magic, tailer; + + /* + check that the recovery area has enough space + */ + if (tdb_recovery_allocate(tdb, &recovery_size, + &recovery_offset, &recovery_max_size) == -1) { + return -1; + } + + data = (unsigned char *)malloc(recovery_size + sizeof(*rec)); + if (data == NULL) { + tdb->ecode = TDB_ERR_OOM; + return -1; + } + + rec = (struct list_struct *)data; + memset(rec, 0, sizeof(*rec)); + + rec->magic = 0; + rec->data_len = recovery_size; + rec->rec_len = recovery_max_size; + rec->key_len = old_map_size; + CONVERT(rec); + + /* build the recovery data into a single blob to allow us to do a single + large write, which should be more efficient */ + p = data + sizeof(*rec); + for (el=tdb->transaction->elements;el;el=el->next) { + if (el->offset >= old_map_size) { + continue; + } + if (el->offset + el->length > tdb->transaction->old_map_size) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n")); + free(data); + tdb->ecode = TDB_ERR_CORRUPT; + return -1; + } + memcpy(p, &el->offset, 4); + memcpy(p+4, &el->length, 4); + if (DOCONV()) { + tdb_convert(p, 8); + } + /* the recovery area contains the old data, not the + new data, so we have to call the original tdb_read + method to get it */ + if (methods->tdb_read(tdb, el->offset, p + 8, el->length, 0) != 0) { + free(data); + tdb->ecode = TDB_ERR_IO; + return -1; + } + p += 8 + el->length; + } + + /* and the tailer */ + tailer = sizeof(*rec) + recovery_max_size; + memcpy(p, &tailer, 4); + CONVERT(p); + + /* write the recovery data to the recovery area */ + if (methods->tdb_write(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery data\n")); + free(data); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + /* as we don't have ordered writes, we have to sync the recovery + data before we update the magic to indicate that the recovery + data is present */ + if (transaction_sync(tdb, recovery_offset, sizeof(*rec) + recovery_size) == -1) { + free(data); + return -1; + } + + free(data); + + magic = TDB_RECOVERY_MAGIC; + CONVERT(magic); + + *magic_offset = recovery_offset + offsetof(struct list_struct, magic); + + if (methods->tdb_write(tdb, *magic_offset, &magic, sizeof(magic)) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery magic\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + /* ensure the recovery magic marker is on disk */ + if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) { + return -1; + } + + return 0; +} + +/* + commit the current transaction +*/ +int tdb_transaction_commit(struct tdb_context *tdb) +{ + const struct tdb_methods *methods; + tdb_off_t magic_offset = 0; + u32 zero = 0; + + if (tdb->transaction == NULL) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n")); + return -1; + } + + if (tdb->transaction->transaction_error) { + tdb->ecode = TDB_ERR_IO; + tdb_transaction_cancel(tdb); + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: transaction error pending\n")); + return -1; + } + + if (tdb->transaction->nesting != 0) { + tdb->transaction->nesting--; + return 0; + } + + /* check for a null transaction */ + if (tdb->transaction->elements == NULL) { + tdb_transaction_cancel(tdb); + return 0; + } + + methods = tdb->transaction->io_methods; + + /* if there are any locks pending then the caller has not + nested their locks properly, so fail the transaction */ + if (tdb->num_locks || tdb->global_lock.count) { + tdb->ecode = TDB_ERR_LOCK; + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: locks pending on commit\n")); + tdb_transaction_cancel(tdb); + return -1; + } + + /* upgrade the main transaction lock region to a write lock */ + if (tdb_brlock_upgrade(tdb, FREELIST_TOP, 0) == -1) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to upgrade hash locks\n")); + tdb->ecode = TDB_ERR_LOCK; + tdb_transaction_cancel(tdb); + return -1; + } + + /* get the global lock - this prevents new users attaching to the database + during the commit */ + if (tdb_brlock(tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) { + TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: failed to get global lock\n")); + tdb->ecode = TDB_ERR_LOCK; + tdb_transaction_cancel(tdb); + return -1; + } + + if (!(tdb->flags & TDB_NOSYNC)) { + /* write the recovery data to the end of the file */ + if (transaction_setup_recovery(tdb, &magic_offset) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: failed to setup recovery data\n")); + tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1); + tdb_transaction_cancel(tdb); + return -1; + } + } + + /* expand the file to the new size if needed */ + if (tdb->map_size != tdb->transaction->old_map_size) { + if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size, + tdb->map_size - + tdb->transaction->old_map_size) == -1) { + tdb->ecode = TDB_ERR_IO; + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: expansion failed\n")); + tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1); + tdb_transaction_cancel(tdb); + return -1; + } + tdb->map_size = tdb->transaction->old_map_size; + methods->tdb_oob(tdb, tdb->map_size + 1, 1); + } + + /* perform all the writes */ + while (tdb->transaction->elements) { + struct tdb_transaction_el *el = tdb->transaction->elements; + + if (methods->tdb_write(tdb, el->offset, el->data, el->length) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n")); + + /* we've overwritten part of the data and + possibly expanded the file, so we need to + run the crash recovery code */ + tdb->methods = methods; + tdb_transaction_recover(tdb); + + tdb_transaction_cancel(tdb); + tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1); + + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n")); + return -1; + } + tdb->transaction->elements = el->next; + free(el->data); + free(el); + } + + if (!(tdb->flags & TDB_NOSYNC)) { + /* ensure the new data is on disk */ + if (transaction_sync(tdb, 0, tdb->map_size) == -1) { + return -1; + } + + /* remove the recovery marker */ + if (methods->tdb_write(tdb, magic_offset, &zero, 4) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: failed to remove recovery magic\n")); + return -1; + } + + /* ensure the recovery marker has been removed on disk */ + if (transaction_sync(tdb, magic_offset, 4) == -1) { + return -1; + } + } + + tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1); + + /* + TODO: maybe write to some dummy hdr field, or write to magic + offset without mmap, before the last sync, instead of the + utime() call + */ + + /* on some systems (like Linux 2.6.x) changes via mmap/msync + don't change the mtime of the file, this means the file may + not be backed up (as tdb rounding to block sizes means that + file size changes are quite rare too). The following forces + mtime changes when a transaction completes */ +#ifdef HAVE_UTIME + utime(tdb->name, NULL); +#endif + + /* use a transaction cancel to free memory and remove the + transaction locks */ + tdb_transaction_cancel(tdb); + return 0; +} + + +/* + recover from an aborted transaction. Must be called with exclusive + database write access already established (including the global + lock to prevent new processes attaching) +*/ +int tdb_transaction_recover(struct tdb_context *tdb) +{ + tdb_off_t recovery_head, recovery_eof; + unsigned char *data, *p; + u32 zero = 0; + struct list_struct rec; + + /* find the recovery area */ + if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery head\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + if (recovery_head == 0) { + /* we have never allocated a recovery record */ + return 0; + } + + /* read the recovery record */ + if (tdb->methods->tdb_read(tdb, recovery_head, &rec, + sizeof(rec), DOCONV()) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery record\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + if (rec.magic != TDB_RECOVERY_MAGIC) { + /* there is no valid recovery data */ + return 0; + } + + if (tdb->read_only) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: attempt to recover read only database\n")); + tdb->ecode = TDB_ERR_CORRUPT; + return -1; + } + + recovery_eof = rec.key_len; + + data = (unsigned char *)malloc(rec.data_len); + if (data == NULL) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to allocate recovery data\n")); + tdb->ecode = TDB_ERR_OOM; + return -1; + } + + /* read the full recovery data */ + if (tdb->methods->tdb_read(tdb, recovery_head + sizeof(rec), data, + rec.data_len, 0) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery data\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + /* recover the file data */ + p = data; + while (p+8 < data + rec.data_len) { + u32 ofs, len; + if (DOCONV()) { + tdb_convert(p, 8); + } + memcpy(&ofs, p, 4); + memcpy(&len, p+4, 4); + + if (tdb->methods->tdb_write(tdb, ofs, p+8, len) == -1) { + free(data); + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to recover %d bytes at offset %d\n", len, ofs)); + tdb->ecode = TDB_ERR_IO; + return -1; + } + p += 8 + len; + } + + free(data); + + if (transaction_sync(tdb, 0, tdb->map_size) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync recovery\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + /* if the recovery area is after the recovered eof then remove it */ + if (recovery_eof <= recovery_head) { + if (tdb_ofs_write(tdb, TDB_RECOVERY_HEAD, &zero) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery head\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + } + + /* remove the recovery magic */ + if (tdb_ofs_write(tdb, recovery_head + offsetof(struct list_struct, magic), + &zero) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery magic\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + /* reduce the file size to the old size */ + tdb_munmap(tdb); + if (ftruncate(tdb->fd, recovery_eof) != 0) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to reduce to recovery size\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + tdb->map_size = recovery_eof; + tdb_mmap(tdb); + + if (transaction_sync(tdb, 0, recovery_eof) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync2 recovery\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } + + TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_recover: recovered %d byte database\n", + recovery_eof)); + + /* all done */ + return 0; +} -- cgit From 6f9f5bd745b594e63c0b982967a03620e91999a5 Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher Date: Tue, 17 Apr 2007 17:03:38 +0000 Subject: r22316: merge from samba4: this function should be static metze (This used to be commit f722fd32da56aabca2fffe243e89177bc23e2be7) --- source3/lib/tdb/common/transaction.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 640cd5014d..0a609af521 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -358,8 +358,8 @@ static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size, /* brlock during a transaction - ignore them */ -int transaction_brlock(struct tdb_context *tdb, tdb_off_t offset, - int rw_type, int lck_type, int probe, size_t len) +static int transaction_brlock(struct tdb_context *tdb, tdb_off_t offset, + int rw_type, int lck_type, int probe, size_t len) { return 0; } -- cgit From 2c09988e46d4e917b1c53c9bda3f81a48bba4952 Mon Sep 17 00:00:00 2001 From: Andrew Tridgell Date: Tue, 10 Jul 2007 01:44:42 +0000 Subject: r23790: LGPLv3+ conversion for our LGPLv2+ library code (This used to be commit 1b78cace504f60c0f525765fbf59d9cc6506cd4d) --- source3/lib/tdb/common/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 0a609af521..6442db4534 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -12,7 +12,7 @@ This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. + version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -- cgit From 9fa1c63578733077c0aaaeeb2fc97c3b191089cc Mon Sep 17 00:00:00 2001 From: Andrew Tridgell Date: Tue, 10 Jul 2007 03:42:26 +0000 Subject: r23798: updated old Temple Place FSF addresses to new URL (This used to be commit c676a971142d7176fd5dbf21405fca14515a0a76) --- source3/lib/tdb/common/transaction.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 6442db4534..d2562906e1 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -20,8 +20,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + License along with this library; if not, see . */ #include "tdb_private.h" -- cgit From 594bbfcdeedc306ce4634dd827df342e86ed8390 Mon Sep 17 00:00:00 2001 From: Jelmer Vernooij Date: Sun, 12 Aug 2007 00:55:03 +0000 Subject: r24340: Use standard data type uint32_t rather than tdb-specific u32. (This used to be commit 26d1430283bd4ae8b8a84f3253e33417d509c1a4) --- source3/lib/tdb/common/transaction.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index d2562906e1..8263bd7f38 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -100,7 +100,7 @@ struct tdb_transaction_el { struct tdb_transaction { /* we keep a mirrored copy of the tdb hash heads here so tdb_next_hash_chain() can operate efficiently */ - u32 *hash_heads; + uint32_t *hash_heads; /* the original io methods - used to do IOs to the real db */ const struct tdb_methods *io_methods; @@ -205,7 +205,7 @@ static int transaction_write(struct tdb_context *tdb, tdb_off_t off, hash heads */ if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP && off < FREELIST_TOP+TDB_HASHTABLE_SIZE(tdb)) { - u32 chain = (off-FREELIST_TOP) / sizeof(tdb_off_t); + uint32_t chain = (off-FREELIST_TOP) / sizeof(tdb_off_t); memcpy(&tdb->transaction->hash_heads[chain], buf, len); } @@ -316,9 +316,9 @@ fail: /* accelerated hash chain head search, using the cached hash heads */ -static void transaction_next_hash_chain(struct tdb_context *tdb, u32 *chain) +static void transaction_next_hash_chain(struct tdb_context *tdb, uint32_t *chain) { - u32 h = *chain; + uint32_t h = *chain; for (;h < tdb->header.hash_size;h++) { /* the +1 takes account of the freelist */ if (0 != tdb->transaction->hash_heads[h+1]) { @@ -439,8 +439,8 @@ int tdb_transaction_start(struct tdb_context *tdb) /* setup a copy of the hash table heads so the hash scan in traverse can be fast */ - tdb->transaction->hash_heads = (u32 *) - calloc(tdb->header.hash_size+1, sizeof(u32)); + tdb->transaction->hash_heads = (uint32_t *) + calloc(tdb->header.hash_size+1, sizeof(uint32_t)); if (tdb->transaction->hash_heads == NULL) { tdb->ecode = TDB_ERR_OOM; goto fail; @@ -571,7 +571,7 @@ static tdb_len_t tdb_recovery_size(struct tdb_context *tdb) struct tdb_transaction_el *el; tdb_len_t recovery_size = 0; - recovery_size = sizeof(u32); + recovery_size = sizeof(uint32_t); for (el=tdb->transaction->elements;el;el=el->next) { if (el->offset >= tdb->transaction->old_map_size) { continue; @@ -677,7 +677,7 @@ static int transaction_setup_recovery(struct tdb_context *tdb, struct list_struct *rec; tdb_off_t recovery_offset, recovery_max_size; tdb_off_t old_map_size = tdb->transaction->old_map_size; - u32 magic, tailer; + uint32_t magic, tailer; /* check that the recovery area has enough space @@ -780,7 +780,7 @@ int tdb_transaction_commit(struct tdb_context *tdb) { const struct tdb_methods *methods; tdb_off_t magic_offset = 0; - u32 zero = 0; + uint32_t zero = 0; if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n")); @@ -933,7 +933,7 @@ int tdb_transaction_recover(struct tdb_context *tdb) { tdb_off_t recovery_head, recovery_eof; unsigned char *data, *p; - u32 zero = 0; + uint32_t zero = 0; struct list_struct rec; /* find the recovery area */ @@ -987,7 +987,7 @@ int tdb_transaction_recover(struct tdb_context *tdb) /* recover the file data */ p = data; while (p+8 < data + rec.data_len) { - u32 ofs, len; + uint32_t ofs, len; if (DOCONV()) { tdb_convert(p, 8); } -- cgit From 3809490dd0ceb902c002bfeff09e6c04946e1a0c Mon Sep 17 00:00:00 2001 From: Jeremy Allison Date: Tue, 6 Nov 2007 22:57:35 -0800 Subject: Keep the tdb code in sync with Samba4. Mainly this is the svn changes : ------------------------------------------------------------------------ r23238 | tridge | 2007-05-30 01:15:49 -0700 (Wed, 30 May 2007) | 6 lines merged transaction lock changes from ctdb this ensures that having the global lock also implies the transaction lock ------------------------------------------------------------------------ r22832 | tridge | 2007-05-13 18:00:06 -0700 (Sun, 13 May 2007) | 3 lines merged the latest tdb changes from ctdb to Samba4 Jeremy. (This used to be commit ab11c0055761e97ba32a8c3ec4fa6f8763a262cd) --- source3/lib/tdb/common/transaction.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 8263bd7f38..7eaacf7a16 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -422,9 +422,7 @@ int tdb_transaction_start(struct tdb_context *tdb) /* get the transaction write lock. This is a blocking lock. As discussed with Volker, there are a number of ways we could make this async, which we will probably do in the future */ - if (tdb_brlock(tdb, TRANSACTION_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) { - TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get transaction lock\n")); - tdb->ecode = TDB_ERR_LOCK; + if (tdb_transaction_lock(tdb, F_WRLCK) == -1) { SAFE_FREE(tdb->transaction); return -1; } @@ -468,6 +466,7 @@ int tdb_transaction_start(struct tdb_context *tdb) TDB_HASHTABLE_SIZE(tdb)) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to prime hash table\n")); tdb->ecode = TDB_ERR_IO; + tdb->methods = tdb->transaction->io_methods; goto fail; } @@ -475,7 +474,7 @@ int tdb_transaction_start(struct tdb_context *tdb) fail: tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0); - tdb_brlock(tdb, TRANSACTION_LOCK, F_UNLCK, F_SETLKW, 0, 1); + tdb_transaction_unlock(tdb); SAFE_FREE(tdb->transaction->hash_heads); SAFE_FREE(tdb->transaction); return -1; @@ -530,7 +529,7 @@ int tdb_transaction_cancel(struct tdb_context *tdb) tdb->methods = tdb->transaction->io_methods; tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0); - tdb_brlock(tdb, TRANSACTION_LOCK, F_UNLCK, F_SETLKW, 0, 1); + tdb_transaction_unlock(tdb); SAFE_FREE(tdb->transaction->hash_heads); SAFE_FREE(tdb->transaction); -- cgit From 2ec41571a3efbea254cc3e132280a194c86a2f89 Mon Sep 17 00:00:00 2001 From: Jeremy Allison Date: Fri, 11 Jan 2008 15:08:37 -0800 Subject: Sync tdb with the tdb changes in ctdb. Spoke to tridge about this. Fixes insidious problem with order n^2 freelist merging. Jeremy. (This used to be commit c6609c042b128e7d63eb64cfdfb0f6b65cb59d76) --- source3/lib/tdb/common/transaction.c | 343 +++++++++++++++++------------------ 1 file changed, 170 insertions(+), 173 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 7eaacf7a16..2da09face0 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -87,13 +87,6 @@ */ -struct tdb_transaction_el { - struct tdb_transaction_el *next, *prev; - tdb_off_t offset; - tdb_len_t length; - unsigned char *data; -}; - /* hold the context of any current transaction */ @@ -105,12 +98,12 @@ struct tdb_transaction { /* the original io methods - used to do IOs to the real db */ const struct tdb_methods *io_methods; - /* the list of transaction elements. We use a doubly linked - list with a last pointer to allow us to keep the list - ordered, with first element at the front of the list. It - needs to be doubly linked as the read/write traversals need - to be backwards, while the commit needs to be forwards */ - struct tdb_transaction_el *elements, *elements_last; + /* the list of transaction blocks. When a block is first + written to, it gets created in this list */ + uint8_t **blocks; + uint32_t num_blocks; + uint32_t block_size; /* bytes in each block */ + uint32_t last_block_size; /* number of valid bytes in the last block */ /* non-zero when an internal transaction error has occurred. All write operations will then fail until the @@ -134,52 +127,48 @@ struct tdb_transaction { static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf, tdb_len_t len, int cv) { - struct tdb_transaction_el *el; - - /* we need to walk the list backwards to get the most recent data */ - for (el=tdb->transaction->elements_last;el;el=el->prev) { - tdb_len_t partial; + uint32_t blk; - if (off+len <= el->offset) { - continue; - } - if (off >= el->offset + el->length) { - continue; + /* break it down into block sized ops */ + while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { + tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); + if (transaction_read(tdb, off, buf, len2, cv) != 0) { + return -1; } + len -= len2; + off += len2; + buf = (void *)(len2 + (char *)buf); + } - /* an overlapping read - needs to be split into up to - 2 reads and a memcpy */ - if (off < el->offset) { - partial = el->offset - off; - if (transaction_read(tdb, off, buf, partial, cv) != 0) { - goto fail; - } - len -= partial; - off += partial; - buf = (void *)(partial + (char *)buf); - } - if (off + len <= el->offset + el->length) { - partial = len; - } else { - partial = el->offset + el->length - off; - } - memcpy(buf, el->data + (off - el->offset), partial); - if (cv) { - tdb_convert(buf, len); - } - len -= partial; - off += partial; - buf = (void *)(partial + (char *)buf); - - if (len != 0 && transaction_read(tdb, off, buf, len, cv) != 0) { + if (len == 0) { + return 0; + } + + blk = off / tdb->transaction->block_size; + + /* see if we have it in the block list */ + if (tdb->transaction->num_blocks <= blk || + tdb->transaction->blocks[blk] == NULL) { + /* nope, do a real read */ + if (tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv) != 0) { goto fail; } - return 0; } - /* its not in the transaction elements - do a real read */ - return tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv); + /* it is in the block list. Now check for the last block */ + if (blk == tdb->transaction->num_blocks-1) { + if (len > tdb->transaction->last_block_size) { + goto fail; + } + } + + /* now copy it out of this block */ + memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len); + if (cv) { + tdb_convert(buf, len); + } + return 0; fail: TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%d len=%d\n", off, len)); @@ -195,12 +184,8 @@ fail: static int transaction_write(struct tdb_context *tdb, tdb_off_t off, const void *buf, tdb_len_t len) { - struct tdb_transaction_el *el, *best_el=NULL; + uint32_t blk; - if (len == 0) { - return 0; - } - /* if the write is to a hash head, then update the transaction hash heads */ if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP && @@ -209,106 +194,88 @@ static int transaction_write(struct tdb_context *tdb, tdb_off_t off, memcpy(&tdb->transaction->hash_heads[chain], buf, len); } - /* first see if we can replace an existing entry */ - for (el=tdb->transaction->elements_last;el;el=el->prev) { - tdb_len_t partial; - - if (best_el == NULL && off == el->offset+el->length) { - best_el = el; + /* break it up into block sized chunks */ + while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { + tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); + if (transaction_write(tdb, off, buf, len2) != 0) { + return -1; } - - if (off+len <= el->offset) { - continue; - } - if (off >= el->offset + el->length) { - continue; + len -= len2; + off += len2; + if (buf != NULL) { + buf = (const void *)(len2 + (const char *)buf); } + } - /* an overlapping write - needs to be split into up to - 2 writes and a memcpy */ - if (off < el->offset) { - partial = el->offset - off; - if (transaction_write(tdb, off, buf, partial) != 0) { - goto fail; - } - len -= partial; - off += partial; - buf = (const void *)(partial + (const char *)buf); - } - if (off + len <= el->offset + el->length) { - partial = len; + if (len == 0) { + return 0; + } + + blk = off / tdb->transaction->block_size; + off = off % tdb->transaction->block_size; + + if (tdb->transaction->num_blocks <= blk) { + uint8_t **new_blocks; + /* expand the blocks array */ + if (tdb->transaction->blocks == NULL) { + new_blocks = malloc((blk+1)*sizeof(uint8_t *)); } else { - partial = el->offset + el->length - off; + new_blocks = realloc(tdb->transaction->blocks, (blk+1)*sizeof(uint8_t *)); } - memcpy(el->data + (off - el->offset), buf, partial); - len -= partial; - off += partial; - buf = (const void *)(partial + (const char *)buf); - - if (len != 0 && transaction_write(tdb, off, buf, len) != 0) { + if (new_blocks == NULL) { + tdb->ecode = TDB_ERR_OOM; goto fail; } - - return 0; + memset(&new_blocks[tdb->transaction->num_blocks], 0, + (1+(blk - tdb->transaction->num_blocks))*sizeof(uint8_t *)); + tdb->transaction->blocks = new_blocks; + tdb->transaction->num_blocks = blk+1; + tdb->transaction->last_block_size = 0; } - /* see if we can append the new entry to an existing entry */ - if (best_el && best_el->offset + best_el->length == off && - (off+len < tdb->transaction->old_map_size || - off > tdb->transaction->old_map_size)) { - unsigned char *data = best_el->data; - el = best_el; - el->data = (unsigned char *)realloc(el->data, - el->length + len); - if (el->data == NULL) { + /* allocate and fill a block? */ + if (tdb->transaction->blocks[blk] == NULL) { + tdb->transaction->blocks[blk] = (uint8_t *)calloc(tdb->transaction->block_size, 1); + if (tdb->transaction->blocks[blk] == NULL) { tdb->ecode = TDB_ERR_OOM; tdb->transaction->transaction_error = 1; - el->data = data; - return -1; + return -1; } - if (buf) { - memcpy(el->data + el->length, buf, len); - } else { - memset(el->data + el->length, TDB_PAD_BYTE, len); + if (tdb->transaction->old_map_size > blk * tdb->transaction->block_size) { + tdb_len_t len2 = tdb->transaction->block_size; + if (len2 + (blk * tdb->transaction->block_size) > tdb->transaction->old_map_size) { + len2 = tdb->transaction->old_map_size - (blk * tdb->transaction->block_size); + } + if (tdb->transaction->io_methods->tdb_read(tdb, blk * tdb->transaction->block_size, + tdb->transaction->blocks[blk], + len2, 0) != 0) { + SAFE_FREE(tdb->transaction->blocks[blk]); + tdb->ecode = TDB_ERR_IO; + goto fail; + } + if (blk == tdb->transaction->num_blocks-1) { + tdb->transaction->last_block_size = len2; + } } - el->length += len; - return 0; } - - /* add a new entry at the end of the list */ - el = (struct tdb_transaction_el *)malloc(sizeof(*el)); - if (el == NULL) { - tdb->ecode = TDB_ERR_OOM; - tdb->transaction->transaction_error = 1; - return -1; - } - el->next = NULL; - el->prev = tdb->transaction->elements_last; - el->offset = off; - el->length = len; - el->data = (unsigned char *)malloc(len); - if (el->data == NULL) { - free(el); - tdb->ecode = TDB_ERR_OOM; - tdb->transaction->transaction_error = 1; - return -1; - } - if (buf) { - memcpy(el->data, buf, len); + + /* overwrite part of an existing block */ + if (buf == NULL) { + memset(tdb->transaction->blocks[blk] + off, 0, len); } else { - memset(el->data, TDB_PAD_BYTE, len); + memcpy(tdb->transaction->blocks[blk] + off, buf, len); } - if (el->prev) { - el->prev->next = el; - } else { - tdb->transaction->elements = el; + if (blk == tdb->transaction->num_blocks-1) { + if (len + off > tdb->transaction->last_block_size) { + tdb->transaction->last_block_size = len + off; + } } - tdb->transaction->elements_last = el; + return 0; fail: - TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", off, len)); - tdb->ecode = TDB_ERR_IO; + TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", + (blk*tdb->transaction->block_size) + off, len)); tdb->transaction->transaction_error = 1; return -1; } @@ -419,10 +386,14 @@ int tdb_transaction_start(struct tdb_context *tdb) return -1; } + /* a page at a time seems like a reasonable compromise between compactness and efficiency */ + tdb->transaction->block_size = tdb->page_size; + /* get the transaction write lock. This is a blocking lock. As discussed with Volker, there are a number of ways we could make this async, which we will probably do in the future */ if (tdb_transaction_lock(tdb, F_WRLCK) == -1) { + SAFE_FREE(tdb->transaction->blocks); SAFE_FREE(tdb->transaction); return -1; } @@ -460,21 +431,12 @@ int tdb_transaction_start(struct tdb_context *tdb) tdb->transaction->io_methods = tdb->methods; tdb->methods = &transaction_methods; - /* by calling this transaction write here, we ensure that we don't grow the - transaction linked list due to hash table updates */ - if (transaction_write(tdb, FREELIST_TOP, tdb->transaction->hash_heads, - TDB_HASHTABLE_SIZE(tdb)) != 0) { - TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to prime hash table\n")); - tdb->ecode = TDB_ERR_IO; - tdb->methods = tdb->transaction->io_methods; - goto fail; - } - return 0; fail: tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0); tdb_transaction_unlock(tdb); + SAFE_FREE(tdb->transaction->blocks); SAFE_FREE(tdb->transaction->hash_heads); SAFE_FREE(tdb->transaction); return -1; @@ -486,6 +448,8 @@ fail: */ int tdb_transaction_cancel(struct tdb_context *tdb) { + int i; + if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n")); return -1; @@ -499,13 +463,13 @@ int tdb_transaction_cancel(struct tdb_context *tdb) tdb->map_size = tdb->transaction->old_map_size; - /* free all the transaction elements */ - while (tdb->transaction->elements) { - struct tdb_transaction_el *el = tdb->transaction->elements; - tdb->transaction->elements = el->next; - free(el->data); - free(el); + /* free all the transaction blocks */ + for (i=0;itransaction->num_blocks;i++) { + if (tdb->transaction->blocks[i] != NULL) { + free(tdb->transaction->blocks[i]); + } } + SAFE_FREE(tdb->transaction->blocks); /* remove any global lock created during the transaction */ if (tdb->global_lock.count != 0) { @@ -515,7 +479,6 @@ int tdb_transaction_cancel(struct tdb_context *tdb) /* remove any locks created during the transaction */ if (tdb->num_locks != 0) { - int i; for (i=0;inum_lockrecs;i++) { tdb_brlock(tdb,FREELIST_TOP+4*tdb->lockrecs[i].list, F_UNLCK,F_SETLKW, 0, 1); @@ -567,16 +530,24 @@ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t */ static tdb_len_t tdb_recovery_size(struct tdb_context *tdb) { - struct tdb_transaction_el *el; tdb_len_t recovery_size = 0; + int i; recovery_size = sizeof(uint32_t); - for (el=tdb->transaction->elements;el;el=el->next) { - if (el->offset >= tdb->transaction->old_map_size) { + for (i=0;itransaction->num_blocks;i++) { + if (i * tdb->transaction->block_size >= tdb->transaction->old_map_size) { + break; + } + if (tdb->transaction->blocks[i] == NULL) { continue; } - recovery_size += 2*sizeof(tdb_off_t) + el->length; - } + recovery_size += 2*sizeof(tdb_off_t); + if (i == tdb->transaction->num_blocks-1) { + recovery_size += tdb->transaction->last_block_size; + } else { + recovery_size += tdb->transaction->block_size; + } + } return recovery_size; } @@ -669,7 +640,6 @@ static int tdb_recovery_allocate(struct tdb_context *tdb, static int transaction_setup_recovery(struct tdb_context *tdb, tdb_off_t *magic_offset) { - struct tdb_transaction_el *el; tdb_len_t recovery_size; unsigned char *data, *p; const struct tdb_methods *methods = tdb->transaction->io_methods; @@ -677,6 +647,7 @@ static int transaction_setup_recovery(struct tdb_context *tdb, tdb_off_t recovery_offset, recovery_max_size; tdb_off_t old_map_size = tdb->transaction->old_map_size; uint32_t magic, tailer; + int i; /* check that the recovery area has enough space @@ -704,30 +675,43 @@ static int transaction_setup_recovery(struct tdb_context *tdb, /* build the recovery data into a single blob to allow us to do a single large write, which should be more efficient */ p = data + sizeof(*rec); - for (el=tdb->transaction->elements;el;el=el->next) { - if (el->offset >= old_map_size) { + for (i=0;itransaction->num_blocks;i++) { + tdb_off_t offset; + tdb_len_t length; + + if (tdb->transaction->blocks[i] == NULL) { continue; } - if (el->offset + el->length > tdb->transaction->old_map_size) { + + offset = i * tdb->transaction->block_size; + length = tdb->transaction->block_size; + if (i == tdb->transaction->num_blocks-1) { + length = tdb->transaction->last_block_size; + } + + if (offset >= old_map_size) { + continue; + } + if (offset + length > tdb->transaction->old_map_size) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n")); free(data); tdb->ecode = TDB_ERR_CORRUPT; return -1; } - memcpy(p, &el->offset, 4); - memcpy(p+4, &el->length, 4); + memcpy(p, &offset, 4); + memcpy(p+4, &length, 4); if (DOCONV()) { tdb_convert(p, 8); } /* the recovery area contains the old data, not the new data, so we have to call the original tdb_read method to get it */ - if (methods->tdb_read(tdb, el->offset, p + 8, el->length, 0) != 0) { + if (methods->tdb_read(tdb, offset, p + 8, length, 0) != 0) { free(data); tdb->ecode = TDB_ERR_IO; return -1; } - p += 8 + el->length; + p += 8 + length; } /* and the tailer */ @@ -780,6 +764,7 @@ int tdb_transaction_commit(struct tdb_context *tdb) const struct tdb_methods *methods; tdb_off_t magic_offset = 0; uint32_t zero = 0; + int i; if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n")); @@ -799,7 +784,7 @@ int tdb_transaction_commit(struct tdb_context *tdb) } /* check for a null transaction */ - if (tdb->transaction->elements == NULL) { + if (tdb->transaction->blocks == NULL) { tdb_transaction_cancel(tdb); return 0; } @@ -858,10 +843,21 @@ int tdb_transaction_commit(struct tdb_context *tdb) } /* perform all the writes */ - while (tdb->transaction->elements) { - struct tdb_transaction_el *el = tdb->transaction->elements; + for (i=0;itransaction->num_blocks;i++) { + tdb_off_t offset; + tdb_len_t length; - if (methods->tdb_write(tdb, el->offset, el->data, el->length) == -1) { + if (tdb->transaction->blocks[i] == NULL) { + continue; + } + + offset = i * tdb->transaction->block_size; + length = tdb->transaction->block_size; + if (i == tdb->transaction->num_blocks-1) { + length = tdb->transaction->last_block_size; + } + + if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n")); /* we've overwritten part of the data and @@ -876,11 +872,12 @@ int tdb_transaction_commit(struct tdb_context *tdb) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n")); return -1; } - tdb->transaction->elements = el->next; - free(el->data); - free(el); + SAFE_FREE(tdb->transaction->blocks[i]); } + SAFE_FREE(tdb->transaction->blocks); + tdb->transaction->num_blocks = 0; + if (!(tdb->flags & TDB_NOSYNC)) { /* ensure the new data is on disk */ if (transaction_sync(tdb, 0, tdb->map_size) == -1) { -- cgit From a1902700c45935f0a1071100810e91142ceb273e Mon Sep 17 00:00:00 2001 From: Andrew Tridgell Date: Mon, 14 Jan 2008 15:38:43 +1100 Subject: merged tdb fix from ctdb tree (This used to be commit c91e9c785b5bf4b0c479edf8eb33da22bf615387) --- source3/lib/tdb/common/transaction.c | 75 ++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 2da09face0..0ecfb9b7ff 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -87,6 +87,7 @@ */ + /* hold the context of any current transaction */ @@ -280,6 +281,63 @@ fail: return -1; } + +/* + write while in a transaction - this varient never expands the transaction blocks, it only + updates existing blocks. This means it cannot change the recovery size +*/ +static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off, + const void *buf, tdb_len_t len) +{ + uint32_t blk; + + /* break it up into block sized chunks */ + while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { + tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); + if (transaction_write_existing(tdb, off, buf, len2) != 0) { + return -1; + } + len -= len2; + off += len2; + if (buf != NULL) { + buf = (const void *)(len2 + (const char *)buf); + } + } + + if (len == 0) { + return 0; + } + + blk = off / tdb->transaction->block_size; + off = off % tdb->transaction->block_size; + + if (tdb->transaction->num_blocks <= blk || + tdb->transaction->blocks[blk] == NULL) { + return 0; + } + + /* overwrite part of an existing block */ + if (buf == NULL) { + memset(tdb->transaction->blocks[blk] + off, 0, len); + } else { + memcpy(tdb->transaction->blocks[blk] + off, buf, len); + } + if (blk == tdb->transaction->num_blocks-1) { + if (len + off > tdb->transaction->last_block_size) { + tdb->transaction->last_block_size = len + off; + } + } + + return 0; + +fail: + TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", + (blk*tdb->transaction->block_size) + off, len)); + tdb->transaction->transaction_error = 1; + return -1; +} + + /* accelerated hash chain head search, using the cached hash heads */ @@ -629,6 +687,10 @@ static int tdb_recovery_allocate(struct tdb_context *tdb, TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n")); return -1; } + if (transaction_write_existing(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n")); + return -1; + } return 0; } @@ -726,6 +788,12 @@ static int transaction_setup_recovery(struct tdb_context *tdb, tdb->ecode = TDB_ERR_IO; return -1; } + if (transaction_write_existing(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery data\n")); + free(data); + tdb->ecode = TDB_ERR_IO; + return -1; + } /* as we don't have ordered writes, we have to sync the recovery data before we update the magic to indicate that the recovery @@ -747,6 +815,11 @@ static int transaction_setup_recovery(struct tdb_context *tdb, tdb->ecode = TDB_ERR_IO; return -1; } + if (transaction_write_existing(tdb, *magic_offset, &magic, sizeof(magic)) == -1) { + TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery magic\n")); + tdb->ecode = TDB_ERR_IO; + return -1; + } /* ensure the recovery magic marker is on disk */ if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) { @@ -778,6 +851,7 @@ int tdb_transaction_commit(struct tdb_context *tdb) return -1; } + if (tdb->transaction->nesting != 0) { tdb->transaction->nesting--; return 0; @@ -916,6 +990,7 @@ int tdb_transaction_commit(struct tdb_context *tdb) /* use a transaction cancel to free memory and remove the transaction locks */ tdb_transaction_cancel(tdb); + return 0; } -- cgit From 7e2b8eb0797e7bf1f3c4e27fcbd775983080affc Mon Sep 17 00:00:00 2001 From: Jeremy Allison Date: Tue, 15 Jan 2008 17:20:50 -0800 Subject: Port from ctdb: minor fix to transaction_write_existing: tridge. Jeremy. (This used to be commit 874425c8f680fb2f737b46a3177b239e69302af5) --- source3/lib/tdb/common/transaction.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 0ecfb9b7ff..ea0e3a93f3 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -316,25 +316,15 @@ static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off, return 0; } - /* overwrite part of an existing block */ - if (buf == NULL) { - memset(tdb->transaction->blocks[blk] + off, 0, len); - } else { - memcpy(tdb->transaction->blocks[blk] + off, buf, len); - } - if (blk == tdb->transaction->num_blocks-1) { - if (len + off > tdb->transaction->last_block_size) { - tdb->transaction->last_block_size = len + off; - } + if (blk == tdb->transaction->num_blocks-1 && + off + len > tdb->transaction->last_block_size) { + len = tdb->transaction->last_block_size - off; } - return 0; + /* overwrite part of an existing block */ + memcpy(tdb->transaction->blocks[blk] + off, buf, len); -fail: - TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", - (blk*tdb->transaction->block_size) + off, len)); - tdb->transaction->transaction_error = 1; - return -1; + return 0; } -- cgit From c308d76305d0a598ce03c516ff0c3b3a8875c89c Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Wed, 16 Jan 2008 20:29:41 +0100 Subject: Fix some C++ warnings (This used to be commit 625241c4773ae5c80dd0cb0c07a86aff633c1ede) --- source3/lib/tdb/common/transaction.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index ea0e3a93f3..c3e7a4e2c0 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -219,9 +219,12 @@ static int transaction_write(struct tdb_context *tdb, tdb_off_t off, uint8_t **new_blocks; /* expand the blocks array */ if (tdb->transaction->blocks == NULL) { - new_blocks = malloc((blk+1)*sizeof(uint8_t *)); + new_blocks = (uint8_t **)malloc( + (blk+1)*sizeof(uint8_t *)); } else { - new_blocks = realloc(tdb->transaction->blocks, (blk+1)*sizeof(uint8_t *)); + new_blocks = (uint8_t **)realloc( + tdb->transaction->blocks, + (blk+1)*sizeof(uint8_t *)); } if (new_blocks == NULL) { tdb->ecode = TDB_ERR_OOM; -- cgit From 6b5980e47e86df77e43324e705d8509dea4ac0d4 Mon Sep 17 00:00:00 2001 From: Andrew Tridgell Date: Thu, 31 Jan 2008 09:48:20 +1100 Subject: merged tdb transaction fix (This used to be commit 1a1c7954368a7f168a57f86f4f857cf70258e37a) --- source3/lib/tdb/common/transaction.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index c3e7a4e2c0..4e2127be64 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -321,6 +321,9 @@ static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off, if (blk == tdb->transaction->num_blocks-1 && off + len > tdb->transaction->last_block_size) { + if (off >= tdb->transaction->last_block_size) { + return 0; + } len = tdb->transaction->last_block_size - off; } -- cgit From 04691de379fa692b22fc9f0b58c0bd829ce7cc7a Mon Sep 17 00:00:00 2001 From: Simo Sorce Date: Wed, 3 Sep 2008 10:44:09 -0400 Subject: The msync manpage reports that msync *must* be called before munmap. Failure to do so may result in lost data. Fix an ifdef check, I really think we meant to check HAVE_MMAP here. (This used to be commit 74c8575b3f3b90ea21ae6aa7ccd95947838af956) --- source3/lib/tdb/common/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'source3/lib/tdb/common/transaction.c') diff --git a/source3/lib/tdb/common/transaction.c b/source3/lib/tdb/common/transaction.c index 4e2127be64..7acda640c8 100644 --- a/source3/lib/tdb/common/transaction.c +++ b/source3/lib/tdb/common/transaction.c @@ -563,7 +563,7 @@ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n")); return -1; } -#ifdef MS_SYNC +#ifdef HAVE_MMAP if (tdb->map_ptr) { tdb_off_t moffset = offset & ~(tdb->page_size-1); if (msync(moffset + (char *)tdb->map_ptr, -- cgit