summaryrefslogtreecommitdiff
path: root/Master/tlpkg/tlperl/lib/DBM
diff options
context:
space:
mode:
Diffstat (limited to 'Master/tlpkg/tlperl/lib/DBM')
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep.pm630
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep.pod1096
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/Array.pm414
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/Cookbook.pod199
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/Engine.pm2325
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/File.pm277
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/Hash.pm136
-rwxr-xr-xMaster/tlpkg/tlperl/lib/DBM/Deep/Internals.pod281
8 files changed, 5358 insertions, 0 deletions
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep.pm b/Master/tlpkg/tlperl/lib/DBM/Deep.pm
new file mode 100755
index 00000000000..e4a21faad69
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep.pm
@@ -0,0 +1,630 @@
+package DBM::Deep;
+
+use 5.006_000;
+
+use strict;
+use warnings;
+
+our $VERSION = q(1.0013);
+
+use Data::Dumper ();
+use Fcntl qw( :flock );
+use Scalar::Util ();
+
+use DBM::Deep::Engine;
+use DBM::Deep::File;
+
+use overload
+ '""' => sub { overload::StrVal( $_[0] ) },
+ fallback => 1;
+
+use constant DEBUG => 0;
+
+##
+# Setup constants for users to pass to new()
+##
+sub TYPE_HASH () { DBM::Deep::Engine->SIG_HASH }
+sub TYPE_ARRAY () { DBM::Deep::Engine->SIG_ARRAY }
+
+# This is used in all the children of this class in their TIE<type> methods.
+sub _get_args {
+ my $proto = shift;
+
+ my $args;
+ if (scalar(@_) > 1) {
+ if ( @_ % 2 ) {
+ $proto->_throw_error( "Odd number of parameters to " . (caller(1))[2] );
+ }
+ $args = {@_};
+ }
+ elsif ( ref $_[0] ) {
+ unless ( eval { local $SIG{'__DIE__'}; %{$_[0]} || 1 } ) {
+ $proto->_throw_error( "Not a hashref in args to " . (caller(1))[2] );
+ }
+ $args = $_[0];
+ }
+ else {
+ $args = { file => shift };
+ }
+
+ return $args;
+}
+
+sub new {
+ ##
+ # Class constructor method for Perl OO interface.
+ # Calls tie() and returns blessed reference to tied hash or array,
+ # providing a hybrid OO/tie interface.
+ ##
+ my $class = shift;
+ my $args = $class->_get_args( @_ );
+
+ ##
+ # Check if we want a tied hash or array.
+ ##
+ my $self;
+ if (defined($args->{type}) && $args->{type} eq TYPE_ARRAY) {
+ $class = 'DBM::Deep::Array';
+ require DBM::Deep::Array;
+ tie @$self, $class, %$args;
+ }
+ else {
+ $class = 'DBM::Deep::Hash';
+ require DBM::Deep::Hash;
+ tie %$self, $class, %$args;
+ }
+
+ return bless $self, $class;
+}
+
+# This initializer is called from the various TIE* methods. new() calls tie(),
+# which allows for a single point of entry.
+sub _init {
+ my $class = shift;
+ my ($args) = @_;
+
+ $args->{storage} = DBM::Deep::File->new( $args )
+ unless exists $args->{storage};
+
+ # locking implicitly enables autoflush
+ if ($args->{locking}) { $args->{autoflush} = 1; }
+
+ # These are the defaults to be optionally overridden below
+ my $self = bless {
+ type => TYPE_HASH,
+ base_offset => undef,
+ staleness => undef,
+
+ storage => undef,
+ engine => undef,
+ }, $class;
+
+ $args->{engine} = DBM::Deep::Engine->new( { %{$args}, obj => $self } )
+ unless exists $args->{engine};
+
+ # Grab the parameters we want to use
+ foreach my $param ( keys %$self ) {
+ next unless exists $args->{$param};
+ $self->{$param} = $args->{$param};
+ }
+
+ eval {
+ local $SIG{'__DIE__'};
+
+ $self->lock;
+ $self->_engine->setup_fh( $self );
+ $self->_storage->set_inode;
+ $self->unlock;
+ }; if ( $@ ) {
+ my $e = $@;
+ eval { local $SIG{'__DIE__'}; $self->unlock; };
+ die $e;
+ }
+
+ return $self;
+}
+
+sub TIEHASH {
+ shift;
+ require DBM::Deep::Hash;
+ return DBM::Deep::Hash->TIEHASH( @_ );
+}
+
+sub TIEARRAY {
+ shift;
+ require DBM::Deep::Array;
+ return DBM::Deep::Array->TIEARRAY( @_ );
+}
+
+sub lock {
+ my $self = shift->_get_self;
+ return $self->_storage->lock( $self, @_ );
+}
+
+sub unlock {
+ my $self = shift->_get_self;
+ return $self->_storage->unlock( $self, @_ );
+}
+
+sub _copy_value {
+ my $self = shift->_get_self;
+ my ($spot, $value) = @_;
+
+ if ( !ref $value ) {
+ ${$spot} = $value;
+ }
+ else {
+ # This assumes hash or array only. This is a bad assumption moving forward.
+ # -RobK, 2008-05-27
+ my $r = Scalar::Util::reftype( $value );
+ my $tied;
+ if ( $r eq 'ARRAY' ) {
+ $tied = tied(@$value);
+ }
+ else {
+ $tied = tied(%$value);
+ }
+
+ if ( eval { local $SIG{__DIE__}; $tied->isa( 'DBM::Deep' ) } ) {
+ ${$spot} = $tied->_repr;
+ $tied->_copy_node( ${$spot} );
+ }
+ else {
+ if ( $r eq 'ARRAY' ) {
+ ${$spot} = [ @{$value} ];
+ }
+ else {
+ ${$spot} = { %{$value} };
+ }
+ }
+
+ my $c = Scalar::Util::blessed( $value );
+ if ( defined $c && !$c->isa( 'DBM::Deep') ) {
+ ${$spot} = bless ${$spot}, $c
+ }
+ }
+
+ return 1;
+}
+
+#sub _copy_node {
+# die "Must be implemented in a child class\n";
+#}
+#
+#sub _repr {
+# die "Must be implemented in a child class\n";
+#}
+
+sub export {
+ ##
+ # Recursively export into standard Perl hashes and arrays.
+ ##
+ my $self = shift->_get_self;
+
+ my $temp = $self->_repr;
+
+ $self->lock();
+ $self->_copy_node( $temp );
+ $self->unlock();
+
+ my $classname = $self->_engine->get_classname( $self );
+ if ( defined $classname ) {
+ bless $temp, $classname;
+ }
+
+ return $temp;
+}
+
+sub _check_legality {
+ my $self = shift;
+ my ($val) = @_;
+
+ my $r = Scalar::Util::reftype( $val );
+
+ return $r if !defined $r || '' eq $r;
+ return $r if 'HASH' eq $r;
+ return $r if 'ARRAY' eq $r;
+
+ DBM::Deep->_throw_error(
+ "Storage of references of type '$r' is not supported."
+ );
+}
+
+sub import {
+ # Perl calls import() on use -- ignore
+ return if !ref $_[0];
+
+ my $self = shift->_get_self;
+ my ($struct) = @_;
+
+ my $type = $self->_check_legality( $struct );
+ if ( !$type ) {
+ DBM::Deep->_throw_error( "Cannot import a scalar" );
+ }
+
+ if ( substr( $type, 0, 1 ) ne $self->_type ) {
+ DBM::Deep->_throw_error(
+ "Cannot import " . ('HASH' eq $type ? 'a hash' : 'an array')
+ . " into " . ('HASH' eq $type ? 'an array' : 'a hash')
+ );
+ }
+
+ my %seen;
+ my $recurse;
+ $recurse = sub {
+ my ($db, $val) = @_;
+
+ my $obj = 'HASH' eq Scalar::Util::reftype( $db ) ? tied(%$db) : tied(@$db);
+ $obj ||= $db;
+
+ my $r = $self->_check_legality( $val );
+ if ( 'HASH' eq $r ) {
+ while ( my ($k, $v) = each %$val ) {
+ my $r = $self->_check_legality( $v );
+ if ( $r ) {
+ my $temp = 'HASH' eq $r ? {} : [];
+ if ( my $c = Scalar::Util::blessed( $v ) ) {
+ bless $temp, $c;
+ }
+ $obj->put( $k, $temp );
+ $recurse->( $temp, $v );
+ }
+ else {
+ $obj->put( $k, $v );
+ }
+ }
+ }
+ elsif ( 'ARRAY' eq $r ) {
+ foreach my $k ( 0 .. $#$val ) {
+ my $v = $val->[$k];
+ my $r = $self->_check_legality( $v );
+ if ( $r ) {
+ my $temp = 'HASH' eq $r ? {} : [];
+ if ( my $c = Scalar::Util::blessed( $v ) ) {
+ bless $temp, $c;
+ }
+ $obj->put( $k, $temp );
+ $recurse->( $temp, $v );
+ }
+ else {
+ $obj->put( $k, $v );
+ }
+ }
+ }
+ };
+ $recurse->( $self, $struct );
+
+ return 1;
+}
+
+#XXX Need to keep track of who has a fh to this file in order to
+#XXX close them all prior to optimize on Win32/cygwin
+sub optimize {
+ ##
+ # Rebuild entire database into new file, then move
+ # it back on top of original.
+ ##
+ my $self = shift->_get_self;
+
+#XXX Need to create a new test for this
+# if ($self->_storage->{links} > 1) {
+# $self->_throw_error("Cannot optimize: reference count is greater than 1");
+# }
+
+ #XXX Do we have to lock the tempfile?
+
+ #XXX Should we use tempfile() here instead of a hard-coded name?
+ my $temp_filename = $self->_storage->{file} . '.tmp';
+ my $db_temp = DBM::Deep->new(
+ file => $temp_filename,
+ type => $self->_type,
+
+ # Bring over all the parameters that we need to bring over
+ ( map { $_ => $self->_engine->$_ } qw(
+ byte_size max_buckets data_sector_size num_txns
+ )),
+ );
+
+ $self->lock();
+ $self->_engine->clear_cache;
+ $self->_copy_node( $db_temp );
+ $db_temp->_storage->close;
+ undef $db_temp;
+
+ ##
+ # Attempt to copy user, group and permissions over to new file
+ ##
+ $self->_storage->copy_stats( $temp_filename );
+
+ # q.v. perlport for more information on this variable
+ if ( $^O eq 'MSWin32' || $^O eq 'cygwin' ) {
+ ##
+ # Potential race condition when optmizing on Win32 with locking.
+ # The Windows filesystem requires that the filehandle be closed
+ # before it is overwritten with rename(). This could be redone
+ # with a soft copy.
+ ##
+ $self->unlock();
+ $self->_storage->close;
+ }
+
+ if (!rename $temp_filename, $self->_storage->{file}) {
+ unlink $temp_filename;
+ $self->unlock();
+ $self->_throw_error("Optimize failed: Cannot copy temp file over original: $!");
+ }
+
+ $self->unlock();
+ $self->_storage->close;
+
+ $self->_storage->open;
+ $self->lock();
+ $self->_engine->setup_fh( $self );
+ $self->unlock();
+
+ return 1;
+}
+
+sub clone {
+ ##
+ # Make copy of object and return
+ ##
+ my $self = shift->_get_self;
+
+ return DBM::Deep->new(
+ type => $self->_type,
+ base_offset => $self->_base_offset,
+ staleness => $self->_staleness,
+ storage => $self->_storage,
+ engine => $self->_engine,
+ );
+}
+
+#XXX Migrate this to the engine, where it really belongs and go through some
+# API - stop poking in the innards of someone else..
+{
+ my %is_legal_filter = map {
+ $_ => ~~1,
+ } qw(
+ store_key store_value
+ fetch_key fetch_value
+ );
+
+ sub set_filter {
+ my $self = shift->_get_self;
+ my $type = lc shift;
+ my $func = shift;
+
+ if ( $is_legal_filter{$type} ) {
+ $self->_storage->{"filter_$type"} = $func;
+ return 1;
+ }
+
+ return;
+ }
+
+ sub filter_store_key { $_[0]->set_filter( store_key => $_[1] ); }
+ sub filter_store_value { $_[0]->set_filter( store_value => $_[1] ); }
+ sub filter_fetch_key { $_[0]->set_filter( fetch_key => $_[1] ); }
+ sub filter_fetch_value { $_[0]->set_filter( fetch_value => $_[1] ); }
+}
+
+sub begin_work {
+ my $self = shift->_get_self;
+ return $self->_engine->begin_work( $self, @_ );
+}
+
+sub rollback {
+ my $self = shift->_get_self;
+ return $self->_engine->rollback( $self, @_ );
+}
+
+sub commit {
+ my $self = shift->_get_self;
+ return $self->_engine->commit( $self, @_ );
+}
+
+##
+# Accessor methods
+##
+
+sub _engine {
+ my $self = $_[0]->_get_self;
+ return $self->{engine};
+}
+
+sub _storage {
+ my $self = $_[0]->_get_self;
+ return $self->{storage};
+}
+
+sub _type {
+ my $self = $_[0]->_get_self;
+ return $self->{type};
+}
+
+sub _base_offset {
+ my $self = $_[0]->_get_self;
+ return $self->{base_offset};
+}
+
+sub _staleness {
+ my $self = $_[0]->_get_self;
+ return $self->{staleness};
+}
+
+##
+# Utility methods
+##
+
+sub _throw_error {
+ my $n = 0;
+ while( 1 ) {
+ my @caller = caller( ++$n );
+ next if $caller[0] =~ m/^DBM::Deep/;
+
+ die "DBM::Deep: $_[1] at $0 line $caller[2]\n";
+ }
+}
+
+sub STORE {
+ ##
+ # Store single hash key/value or array element in database.
+ ##
+ my $self = shift->_get_self;
+ my ($key, $value) = @_;
+ warn "STORE($self, $key, $value)\n" if DEBUG;
+
+ unless ( $self->_storage->is_writable ) {
+ $self->_throw_error( 'Cannot write to a readonly filehandle' );
+ }
+
+ ##
+ # Request exclusive lock for writing
+ ##
+ $self->lock( LOCK_EX );
+
+ # User may be storing a complex value, in which case we do not want it run
+ # through the filtering system.
+ if ( !ref($value) && $self->_storage->{filter_store_value} ) {
+ $value = $self->_storage->{filter_store_value}->( $value );
+ }
+
+ $self->_engine->write_value( $self, $key, $value);
+
+ $self->unlock();
+
+ return 1;
+}
+
+sub FETCH {
+ ##
+ # Fetch single value or element given plain key or array index
+ ##
+ my $self = shift->_get_self;
+ my ($key) = @_;
+ warn "FETCH($self,$key)\n" if DEBUG;
+
+ ##
+ # Request shared lock for reading
+ ##
+ $self->lock( LOCK_SH );
+
+ my $result = $self->_engine->read_value( $self, $key);
+
+ $self->unlock();
+
+ # Filters only apply to scalar values, so the ref check is making
+ # sure the fetched bucket is a scalar, not a child hash or array.
+ return ($result && !ref($result) && $self->_storage->{filter_fetch_value})
+ ? $self->_storage->{filter_fetch_value}->($result)
+ : $result;
+}
+
+sub DELETE {
+ ##
+ # Delete single key/value pair or element given plain key or array index
+ ##
+ my $self = shift->_get_self;
+ my ($key) = @_;
+ warn "DELETE($self,$key)\n" if DEBUG;
+
+ unless ( $self->_storage->is_writable ) {
+ $self->_throw_error( 'Cannot write to a readonly filehandle' );
+ }
+
+ ##
+ # Request exclusive lock for writing
+ ##
+ $self->lock( LOCK_EX );
+
+ ##
+ # Delete bucket
+ ##
+ my $value = $self->_engine->delete_key( $self, $key);
+
+ if (defined $value && !ref($value) && $self->_storage->{filter_fetch_value}) {
+ $value = $self->_storage->{filter_fetch_value}->($value);
+ }
+
+ $self->unlock();
+
+ return $value;
+}
+
+sub EXISTS {
+ ##
+ # Check if a single key or element exists given plain key or array index
+ ##
+ my $self = shift->_get_self;
+ my ($key) = @_;
+ warn "EXISTS($self,$key)\n" if DEBUG;
+
+ ##
+ # Request shared lock for reading
+ ##
+ $self->lock( LOCK_SH );
+
+ my $result = $self->_engine->key_exists( $self, $key );
+
+ $self->unlock();
+
+ return $result;
+}
+
+sub CLEAR {
+ ##
+ # Clear all keys from hash, or all elements from array.
+ ##
+ my $self = shift->_get_self;
+ warn "CLEAR($self)\n" if DEBUG;
+
+ unless ( $self->_storage->is_writable ) {
+ $self->_throw_error( 'Cannot write to a readonly filehandle' );
+ }
+
+ ##
+ # Request exclusive lock for writing
+ ##
+ $self->lock( LOCK_EX );
+
+ #XXX Rewrite this dreck to do it in the engine as a tight loop vs.
+ # iterating over keys - such a WASTE - is this required for transactional
+ # clearning?! Surely that can be detected in the engine ...
+ if ( $self->_type eq TYPE_HASH ) {
+ my $key = $self->first_key;
+ while ( $key ) {
+ # Retrieve the key before deleting because we depend on next_key
+ my $next_key = $self->next_key( $key );
+ $self->_engine->delete_key( $self, $key, $key );
+ $key = $next_key;
+ }
+ }
+ else {
+ my $size = $self->FETCHSIZE;
+ for my $key ( 0 .. $size - 1 ) {
+ $self->_engine->delete_key( $self, $key, $key );
+ }
+ $self->STORESIZE( 0 );
+ }
+
+ $self->unlock();
+
+ return 1;
+}
+
+##
+# Public method aliases
+##
+sub put { (shift)->STORE( @_ ) }
+sub store { (shift)->STORE( @_ ) }
+sub get { (shift)->FETCH( @_ ) }
+sub fetch { (shift)->FETCH( @_ ) }
+sub delete { (shift)->DELETE( @_ ) }
+sub exists { (shift)->EXISTS( @_ ) }
+sub clear { (shift)->CLEAR( @_ ) }
+
+sub _dump_file {shift->_get_self->_engine->_dump_file;}
+
+1;
+__END__
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep.pod b/Master/tlpkg/tlperl/lib/DBM/Deep.pod
new file mode 100755
index 00000000000..50aea5fd438
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep.pod
@@ -0,0 +1,1096 @@
+=head1 NAME
+
+DBM::Deep - A pure perl multi-level hash/array DBM that supports transactions
+
+=head1 SYNOPSIS
+
+ use DBM::Deep;
+ my $db = DBM::Deep->new( "foo.db" );
+
+ $db->{key} = 'value';
+ print $db->{key};
+
+ $db->put('key' => 'value');
+ print $db->get('key');
+
+ # true multi-level support
+ $db->{my_complex} = [
+ 'hello', { perl => 'rules' },
+ 42, 99,
+ ];
+
+ $db->begin_work;
+
+ # Do stuff here
+
+ $db->rollback;
+ $db->commit;
+
+ tie my %db, 'DBM::Deep', 'foo.db';
+ $db{key} = 'value';
+ print $db{key};
+
+ tied(%db)->put('key' => 'value');
+ print tied(%db)->get('key');
+
+=head1 DESCRIPTION
+
+A unique flat-file database module, written in pure perl. True multi-level
+hash/array support (unlike MLDBM, which is faked), hybrid OO / tie()
+interface, cross-platform FTPable files, ACID transactions, and is quite fast.
+Can handle millions of keys and unlimited levels without significant
+slow-down. Written from the ground-up in pure perl -- this is NOT a wrapper
+around a C-based DBM. Out-of-the-box compatibility with Unix, Mac OS X and
+Windows.
+
+=head1 VERSION DIFFERENCES
+
+B<NOTE>: 1.0000 has significant file format differences from prior versions.
+THere is a backwards-compatibility layer at C<utils/upgrade_db.pl>. Files
+created by 1.0000 or higher are B<NOT> compatible with scripts using prior
+versions.
+
+=head1 SETUP
+
+Construction can be done OO-style (which is the recommended way), or using
+Perl's tie() function. Both are examined here.
+
+=head2 OO Construction
+
+The recommended way to construct a DBM::Deep object is to use the new()
+method, which gets you a blessed I<and> tied hash (or array) reference.
+
+ my $db = DBM::Deep->new( "foo.db" );
+
+This opens a new database handle, mapped to the file "foo.db". If this
+file does not exist, it will automatically be created. DB files are
+opened in "r+" (read/write) mode, and the type of object returned is a
+hash, unless otherwise specified (see L<OPTIONS> below).
+
+You can pass a number of options to the constructor to specify things like
+locking, autoflush, etc. This is done by passing an inline hash (or hashref):
+
+ my $db = DBM::Deep->new(
+ file => "foo.db",
+ locking => 1,
+ autoflush => 1
+ );
+
+Notice that the filename is now specified I<inside> the hash with
+the "file" parameter, as opposed to being the sole argument to the
+constructor. This is required if any options are specified.
+See L<OPTIONS> below for the complete list.
+
+You can also start with an array instead of a hash. For this, you must
+specify the C<type> parameter:
+
+ my $db = DBM::Deep->new(
+ file => "foo.db",
+ type => DBM::Deep->TYPE_ARRAY
+ );
+
+B<Note:> Specifing the C<type> parameter only takes effect when beginning
+a new DB file. If you create a DBM::Deep object with an existing file, the
+C<type> will be loaded from the file header, and an error will be thrown if
+the wrong type is passed in.
+
+=head2 Tie Construction
+
+Alternately, you can create a DBM::Deep handle by using Perl's built-in
+tie() function. The object returned from tie() can be used to call methods,
+such as lock() and unlock(). (That object can be retrieved from the tied
+variable at any time using tied() - please see L<perltie> for more info.
+
+ my %hash;
+ my $db = tie %hash, "DBM::Deep", "foo.db";
+
+ my @array;
+ my $db = tie @array, "DBM::Deep", "bar.db";
+
+As with the OO constructor, you can replace the DB filename parameter with
+a hash containing one or more options (see L<OPTIONS> just below for the
+complete list).
+
+ tie %hash, "DBM::Deep", {
+ file => "foo.db",
+ locking => 1,
+ autoflush => 1
+ };
+
+=head2 Options
+
+There are a number of options that can be passed in when constructing your
+DBM::Deep objects. These apply to both the OO- and tie- based approaches.
+
+=over
+
+=item * file
+
+Filename of the DB file to link the handle to. You can pass a full absolute
+filesystem path, partial path, or a plain filename if the file is in the
+current working directory. This is a required parameter (though q.v. fh).
+
+=item * fh
+
+If you want, you can pass in the fh instead of the file. This is most useful for doing
+something like:
+
+ my $db = DBM::Deep->new( { fh => \*DATA } );
+
+You are responsible for making sure that the fh has been opened appropriately for your
+needs. If you open it read-only and attempt to write, an exception will be thrown. If you
+open it write-only or append-only, an exception will be thrown immediately as DBM::Deep
+needs to read from the fh.
+
+=item * file_offset
+
+This is the offset within the file that the DBM::Deep db starts. Most of the time, you will
+not need to set this. However, it's there if you want it.
+
+If you pass in fh and do not set this, it will be set appropriately.
+
+=item * type
+
+This parameter specifies what type of object to create, a hash or array. Use
+one of these two constants:
+
+=over 4
+
+=item * C<DBM::Deep-E<gt>TYPE_HASH>
+
+=item * C<DBM::Deep-E<gt>TYPE_ARRAY>.
+
+=back
+
+This only takes effect when beginning a new file. This is an optional
+parameter, and defaults to C<DBM::Deep-E<gt>TYPE_HASH>.
+
+=item * locking
+
+Specifies whether locking is to be enabled. DBM::Deep uses Perl's flock()
+function to lock the database in exclusive mode for writes, and shared mode
+for reads. Pass any true value to enable. This affects the base DB handle
+I<and any child hashes or arrays> that use the same DB file. This is an
+optional parameter, and defaults to 1 (enabled). See L<LOCKING> below for
+more.
+
+=item * autoflush
+
+Specifies whether autoflush is to be enabled on the underlying filehandle.
+This obviously slows down write operations, but is required if you may have
+multiple processes accessing the same DB file (also consider enable I<locking>).
+Pass any true value to enable. This is an optional parameter, and defaults to 1
+(enabled).
+
+=item * filter_*
+
+See L</FILTERS> below.
+
+=back
+
+The following parameters may be specified in the constructor the first time the
+datafile is created. However, they will be stored in the header of the file and
+cannot be overridden by subsequent openings of the file - the values will be set
+from the values stored in the datafile's header.
+
+=over 4
+
+=item * num_txns
+
+This is the number of transactions that can be running at one time. The
+default is one - the HEAD. The minimum is one and the maximum is 255. The more
+transactions, the larger and quicker the datafile grows.
+
+See L</TRANSACTIONS> below.
+
+=item * max_buckets
+
+This is the number of entries that can be added before a reindexing. The larger
+this number is made, the larger a file gets, but the better performance you will
+have. The default and minimum number this can be is 16. The maximum is 256, but
+more than 64 isn't recommended.
+
+=item * data_sector_size
+
+This is the size in bytes of a given data sector. Data sectors will chain, so
+a value of any size can be stored. However, chaining is expensive in terms of
+time. Setting this value to something close to the expected common length of
+your scalars will improve your performance. If it is too small, your file will
+have a lot of chaining. If it is too large, your file will have a lot of dead
+space in it.
+
+The default for this is 64 bytes. The minimum value is 32 and the maximum is
+256 bytes.
+
+B<Note:> There are between 6 and 10 bytes taken up in each data sector for
+bookkeeping. (It's 4 + the number of bytes in your L</pack_size>.) This is
+included within the data_sector_size, thus the effective value is 6-10 bytes
+less than what you specified.
+
+=item * pack_size
+
+This is the size of the file pointer used throughout the file. The valid values
+are:
+
+=over 4
+
+=item * small
+
+This uses 2-byte offsets, allowing for a maximum file size of 65 KB.
+
+=item * medium (default)
+
+This uses 4-byte offsets, allowing for a maximum file size of 4 GB.
+
+=item * large
+
+This uses 8-byte offsets, allowing for a maximum file size of 16 XB
+(exabytes). This can only be enabled if your Perl is compiled for 64-bit.
+
+=back
+
+See L</LARGEFILE SUPPORT> for more information.
+
+=back
+
+=head1 TIE INTERFACE
+
+With DBM::Deep you can access your databases using Perl's standard hash/array
+syntax. Because all DBM::Deep objects are I<tied> to hashes or arrays, you can
+treat them as such. DBM::Deep will intercept all reads/writes and direct them
+to the right place -- the DB file. This has nothing to do with the
+L<TIE CONSTRUCTION> section above. This simply tells you how to use DBM::Deep
+using regular hashes and arrays, rather than calling functions like C<get()>
+and C<put()> (although those work too). It is entirely up to you how to want
+to access your databases.
+
+=head2 Hashes
+
+You can treat any DBM::Deep object like a normal Perl hash reference. Add keys,
+or even nested hashes (or arrays) using standard Perl syntax:
+
+ my $db = DBM::Deep->new( "foo.db" );
+
+ $db->{mykey} = "myvalue";
+ $db->{myhash} = {};
+ $db->{myhash}->{subkey} = "subvalue";
+
+ print $db->{myhash}->{subkey} . "\n";
+
+You can even step through hash keys using the normal Perl C<keys()> function:
+
+ foreach my $key (keys %$db) {
+ print "$key: " . $db->{$key} . "\n";
+ }
+
+Remember that Perl's C<keys()> function extracts I<every> key from the hash and
+pushes them onto an array, all before the loop even begins. If you have an
+extremely large hash, this may exhaust Perl's memory. Instead, consider using
+Perl's C<each()> function, which pulls keys/values one at a time, using very
+little memory:
+
+ while (my ($key, $value) = each %$db) {
+ print "$key: $value\n";
+ }
+
+Please note that when using C<each()>, you should always pass a direct
+hash reference, not a lookup. Meaning, you should B<never> do this:
+
+ # NEVER DO THIS
+ while (my ($key, $value) = each %{$db->{foo}}) { # BAD
+
+This causes an infinite loop, because for each iteration, Perl is calling
+FETCH() on the $db handle, resulting in a "new" hash for foo every time, so
+it effectively keeps returning the first key over and over again. Instead,
+assign a temporary variable to C<$db->{foo}>, then pass that to each().
+
+=head2 Arrays
+
+As with hashes, you can treat any DBM::Deep object like a normal Perl array
+reference. This includes inserting, removing and manipulating elements,
+and the C<push()>, C<pop()>, C<shift()>, C<unshift()> and C<splice()> functions.
+The object must have first been created using type C<DBM::Deep-E<gt>TYPE_ARRAY>,
+or simply be a nested array reference inside a hash. Example:
+
+ my $db = DBM::Deep->new(
+ file => "foo-array.db",
+ type => DBM::Deep->TYPE_ARRAY
+ );
+
+ $db->[0] = "foo";
+ push @$db, "bar", "baz";
+ unshift @$db, "bah";
+
+ my $last_elem = pop @$db; # baz
+ my $first_elem = shift @$db; # bah
+ my $second_elem = $db->[1]; # bar
+
+ my $num_elements = scalar @$db;
+
+=head1 OO INTERFACE
+
+In addition to the I<tie()> interface, you can also use a standard OO interface
+to manipulate all aspects of DBM::Deep databases. Each type of object (hash or
+array) has its own methods, but both types share the following common methods:
+C<put()>, C<get()>, C<exists()>, C<delete()> and C<clear()>. C<fetch()> and
+C<store(> are aliases to C<put()> and C<get()>, respectively.
+
+=over
+
+=item * new() / clone()
+
+These are the constructor and copy-functions.
+
+=item * put() / store()
+
+Stores a new hash key/value pair, or sets an array element value. Takes two
+arguments, the hash key or array index, and the new value. The value can be
+a scalar, hash ref or array ref. Returns true on success, false on failure.
+
+ $db->put("foo", "bar"); # for hashes
+ $db->put(1, "bar"); # for arrays
+
+=item * get() / fetch()
+
+Fetches the value of a hash key or array element. Takes one argument: the hash
+key or array index. Returns a scalar, hash ref or array ref, depending on the
+data type stored.
+
+ my $value = $db->get("foo"); # for hashes
+ my $value = $db->get(1); # for arrays
+
+=item * exists()
+
+Checks if a hash key or array index exists. Takes one argument: the hash key
+or array index. Returns true if it exists, false if not.
+
+ if ($db->exists("foo")) { print "yay!\n"; } # for hashes
+ if ($db->exists(1)) { print "yay!\n"; } # for arrays
+
+=item * delete()
+
+Deletes one hash key/value pair or array element. Takes one argument: the hash
+key or array index. Returns true on success, false if not found. For arrays,
+the remaining elements located after the deleted element are NOT moved over.
+The deleted element is essentially just undefined, which is exactly how Perl's
+internal arrays work.
+
+ $db->delete("foo"); # for hashes
+ $db->delete(1); # for arrays
+
+=item * clear()
+
+Deletes B<all> hash keys or array elements. Takes no arguments. No return
+value.
+
+ $db->clear(); # hashes or arrays
+
+=item * lock() / unlock()
+
+q.v. L</LOCKING> for more info.
+
+=item * optimize()
+
+This will compress the datafile so that it takes up as little space as possible.
+There is a freespace manager so that when space is freed up, it is used before
+extending the size of the datafile. But, that freespace just sits in the datafile
+unless C<optimize()> is called.
+
+=item * import()
+
+Unlike simple assignment, C<import()> does not tie the right-hand side. Instead,
+a copy of your data is put into the DB. C<import()> takes either an arrayref (if
+your DB is an array) or a hashref (if your DB is a hash). C<import()> will die
+if anything else is passed in.
+
+=item * export()
+
+This returns a complete copy of the data structure at the point you do the export.
+This copy is in RAM, not on disk like the DB is.
+
+=item * begin_work() / commit() / rollback()
+
+These are the transactional functions. L</TRANSACTIONS> for more information.
+
+=back
+
+=head2 Hashes
+
+For hashes, DBM::Deep supports all the common methods described above, and the
+following additional methods: C<first_key()> and C<next_key()>.
+
+=over
+
+=item * first_key()
+
+Returns the "first" key in the hash. As with built-in Perl hashes, keys are
+fetched in an undefined order (which appears random). Takes no arguments,
+returns the key as a scalar value.
+
+ my $key = $db->first_key();
+
+=item * next_key()
+
+Returns the "next" key in the hash, given the previous one as the sole argument.
+Returns undef if there are no more keys to be fetched.
+
+ $key = $db->next_key($key);
+
+=back
+
+Here are some examples of using hashes:
+
+ my $db = DBM::Deep->new( "foo.db" );
+
+ $db->put("foo", "bar");
+ print "foo: " . $db->get("foo") . "\n";
+
+ $db->put("baz", {}); # new child hash ref
+ $db->get("baz")->put("buz", "biz");
+ print "buz: " . $db->get("baz")->get("buz") . "\n";
+
+ my $key = $db->first_key();
+ while ($key) {
+ print "$key: " . $db->get($key) . "\n";
+ $key = $db->next_key($key);
+ }
+
+ if ($db->exists("foo")) { $db->delete("foo"); }
+
+=head2 Arrays
+
+For arrays, DBM::Deep supports all the common methods described above, and the
+following additional methods: C<length()>, C<push()>, C<pop()>, C<shift()>,
+C<unshift()> and C<splice()>.
+
+=over
+
+=item * length()
+
+Returns the number of elements in the array. Takes no arguments.
+
+ my $len = $db->length();
+
+=item * push()
+
+Adds one or more elements onto the end of the array. Accepts scalars, hash
+refs or array refs. No return value.
+
+ $db->push("foo", "bar", {});
+
+=item * pop()
+
+Fetches the last element in the array, and deletes it. Takes no arguments.
+Returns undef if array is empty. Returns the element value.
+
+ my $elem = $db->pop();
+
+=item * shift()
+
+Fetches the first element in the array, deletes it, then shifts all the
+remaining elements over to take up the space. Returns the element value. This
+method is not recommended with large arrays -- see L<LARGE ARRAYS> below for
+details.
+
+ my $elem = $db->shift();
+
+=item * unshift()
+
+Inserts one or more elements onto the beginning of the array, shifting all
+existing elements over to make room. Accepts scalars, hash refs or array refs.
+No return value. This method is not recommended with large arrays -- see
+<LARGE ARRAYS> below for details.
+
+ $db->unshift("foo", "bar", {});
+
+=item * splice()
+
+Performs exactly like Perl's built-in function of the same name. See L<perldoc
+-f splice> for usage -- it is too complicated to document here. This method is
+not recommended with large arrays -- see L<LARGE ARRAYS> below for details.
+
+=back
+
+Here are some examples of using arrays:
+
+ my $db = DBM::Deep->new(
+ file => "foo.db",
+ type => DBM::Deep->TYPE_ARRAY
+ );
+
+ $db->push("bar", "baz");
+ $db->unshift("foo");
+ $db->put(3, "buz");
+
+ my $len = $db->length();
+ print "length: $len\n"; # 4
+
+ for (my $k=0; $k<$len; $k++) {
+ print "$k: " . $db->get($k) . "\n";
+ }
+
+ $db->splice(1, 2, "biz", "baf");
+
+ while (my $elem = shift @$db) {
+ print "shifted: $elem\n";
+ }
+
+=head1 LOCKING
+
+Enable or disable automatic file locking by passing a boolean value to the
+C<locking> parameter when constructing your DBM::Deep object (see L<SETUP>
+above).
+
+ my $db = DBM::Deep->new(
+ file => "foo.db",
+ locking => 1
+ );
+
+This causes DBM::Deep to C<flock()> the underlying filehandle with exclusive
+mode for writes, and shared mode for reads. This is required if you have
+multiple processes accessing the same database file, to avoid file corruption.
+Please note that C<flock()> does NOT work for files over NFS. See L<DB OVER
+NFS> below for more.
+
+=head2 Explicit Locking
+
+You can explicitly lock a database, so it remains locked for multiple
+actions. This is done by calling the C<lock()> method, and passing an
+optional lock mode argument (defaults to exclusive mode). This is particularly
+useful for things like counters, where the current value needs to be fetched,
+then incremented, then stored again.
+
+ $db->lock();
+ my $counter = $db->get("counter");
+ $counter++;
+ $db->put("counter", $counter);
+ $db->unlock();
+
+ # or...
+
+ $db->lock();
+ $db->{counter}++;
+ $db->unlock();
+
+You can pass C<lock()> an optional argument, which specifies which mode to use
+(exclusive or shared). Use one of these two constants:
+C<DBM::Deep-E<gt>LOCK_EX> or C<DBM::Deep-E<gt>LOCK_SH>. These are passed
+directly to C<flock()>, and are the same as the constants defined in Perl's
+L<Fcntl> module.
+
+ $db->lock( $db->LOCK_SH );
+ # something here
+ $db->unlock();
+
+=head2 Win32/Cygwin
+
+Due to Win32 actually enforcing the read-only status of a shared lock, all
+locks on Win32 and cygwin are exclusive. This is because of how autovivification
+currently works. Hopefully, this will go away in a future release.
+
+=head1 IMPORTING/EXPORTING
+
+You can import existing complex structures by calling the C<import()> method,
+and export an entire database into an in-memory structure using the C<export()>
+method. Both are examined here.
+
+=head2 Importing
+
+Say you have an existing hash with nested hashes/arrays inside it. Instead of
+walking the structure and adding keys/elements to the database as you go,
+simply pass a reference to the C<import()> method. This recursively adds
+everything to an existing DBM::Deep object for you. Here is an example:
+
+ my $struct = {
+ key1 => "value1",
+ key2 => "value2",
+ array1 => [ "elem0", "elem1", "elem2" ],
+ hash1 => {
+ subkey1 => "subvalue1",
+ subkey2 => "subvalue2"
+ }
+ };
+
+ my $db = DBM::Deep->new( "foo.db" );
+ $db->import( $struct );
+
+ print $db->{key1} . "\n"; # prints "value1"
+
+This recursively imports the entire C<$struct> object into C<$db>, including
+all nested hashes and arrays. If the DBM::Deep object contains exsiting data,
+keys are merged with the existing ones, replacing if they already exist.
+The C<import()> method can be called on any database level (not just the base
+level), and works with both hash and array DB types.
+
+B<Note:> Make sure your existing structure has no circular references in it.
+These will cause an infinite loop when importing. There are plans to fix this
+in a later release.
+
+=head2 Exporting
+
+Calling the C<export()> method on an existing DBM::Deep object will return
+a reference to a new in-memory copy of the database. The export is done
+recursively, so all nested hashes/arrays are all exported to standard Perl
+objects. Here is an example:
+
+ my $db = DBM::Deep->new( "foo.db" );
+
+ $db->{key1} = "value1";
+ $db->{key2} = "value2";
+ $db->{hash1} = {};
+ $db->{hash1}->{subkey1} = "subvalue1";
+ $db->{hash1}->{subkey2} = "subvalue2";
+
+ my $struct = $db->export();
+
+ print $struct->{key1} . "\n"; # prints "value1"
+
+This makes a complete copy of the database in memory, and returns a reference
+to it. The C<export()> method can be called on any database level (not just
+the base level), and works with both hash and array DB types. Be careful of
+large databases -- you can store a lot more data in a DBM::Deep object than an
+in-memory Perl structure.
+
+B<Note:> Make sure your database has no circular references in it.
+These will cause an infinite loop when exporting. There are plans to fix this
+in a later release.
+
+=head1 FILTERS
+
+DBM::Deep has a number of hooks where you can specify your own Perl function
+to perform filtering on incoming or outgoing data. This is a perfect
+way to extend the engine, and implement things like real-time compression or
+encryption. Filtering applies to the base DB level, and all child hashes /
+arrays. Filter hooks can be specified when your DBM::Deep object is first
+constructed, or by calling the C<set_filter()> method at any time. There are
+four available filter hooks.
+
+=head2 set_filter()
+
+This method takes two paramters - the filter type and the filter subreference.
+The four types are:
+
+=over
+
+=item * filter_store_key
+
+This filter is called whenever a hash key is stored. It
+is passed the incoming key, and expected to return a transformed key.
+
+=item * filter_store_value
+
+This filter is called whenever a hash key or array element is stored. It
+is passed the incoming value, and expected to return a transformed value.
+
+=item * filter_fetch_key
+
+This filter is called whenever a hash key is fetched (i.e. via
+C<first_key()> or C<next_key()>). It is passed the transformed key,
+and expected to return the plain key.
+
+=item * filter_fetch_value
+
+This filter is called whenever a hash key or array element is fetched.
+It is passed the transformed value, and expected to return the plain value.
+
+=back
+
+Here are the two ways to setup a filter hook:
+
+ my $db = DBM::Deep->new(
+ file => "foo.db",
+ filter_store_value => \&my_filter_store,
+ filter_fetch_value => \&my_filter_fetch
+ );
+
+ # or...
+
+ $db->set_filter( "filter_store_value", \&my_filter_store );
+ $db->set_filter( "filter_fetch_value", \&my_filter_fetch );
+
+Your filter function will be called only when dealing with SCALAR keys or
+values. When nested hashes and arrays are being stored/fetched, filtering
+is bypassed. Filters are called as static functions, passed a single SCALAR
+argument, and expected to return a single SCALAR value. If you want to
+remove a filter, set the function reference to C<undef>:
+
+ $db->set_filter( "filter_store_value", undef );
+
+=head2 Examples
+
+Please read L<DBM::Deep::Manual> for examples of filters.
+
+=head1 ERROR HANDLING
+
+Most DBM::Deep methods return a true value for success, and call die() on
+failure. You can wrap calls in an eval block to catch the die.
+
+ my $db = DBM::Deep->new( "foo.db" ); # create hash
+ eval { $db->push("foo"); }; # ILLEGAL -- push is array-only call
+
+ print $@; # prints error message
+
+=head1 LARGEFILE SUPPORT
+
+If you have a 64-bit system, and your Perl is compiled with both LARGEFILE
+and 64-bit support, you I<may> be able to create databases larger than 4 GB.
+DBM::Deep by default uses 32-bit file offset tags, but these can be changed
+by specifying the 'pack_size' parameter when constructing the file.
+
+ DBM::Deep->new(
+ filename => $filename,
+ pack_size => 'large',
+ );
+
+This tells DBM::Deep to pack all file offsets with 8-byte (64-bit) quad words
+instead of 32-bit longs. After setting these values your DB files have a
+theoretical maximum size of 16 XB (exabytes).
+
+You can also use C<pack_size =E<gt> 'small'> in order to use 16-bit file
+offsets.
+
+B<Note:> Changing these values will B<NOT> work for existing database files.
+Only change this for new files. Once the value has been set, it is stored in
+the file's header and cannot be changed for the life of the file. These
+parameters are per-file, meaning you can access 32-bit and 64-bit files, as
+you choose.
+
+B<Note:> We have not personally tested files larger than 4 GB -- all our
+systems have only a 32-bit Perl. However, we have received user reports that
+this does indeed work.
+
+=head1 LOW-LEVEL ACCESS
+
+If you require low-level access to the underlying filehandle that DBM::Deep uses,
+you can call the C<_fh()> method, which returns the handle:
+
+ my $fh = $db->_fh();
+
+This method can be called on the root level of the datbase, or any child
+hashes or arrays. All levels share a I<root> structure, which contains things
+like the filehandle, a reference counter, and all the options specified
+when you created the object. You can get access to this file object by
+calling the C<_storage()> method.
+
+ my $file_obj = $db->_storage();
+
+This is useful for changing options after the object has already been created,
+such as enabling/disabling locking. You can also store your own temporary user
+data in this structure (be wary of name collision), which is then accessible from
+any child hash or array.
+
+=head1 CIRCULAR REFERENCES
+
+DBM::Deep has full support for circular references. Meaning you
+can have a nested hash key or array element that points to a parent object.
+This relationship is stored in the DB file, and is preserved between sessions.
+Here is an example:
+
+ my $db = DBM::Deep->new( "foo.db" );
+
+ $db->{foo} = "bar";
+ $db->{circle} = $db; # ref to self
+
+ print $db->{foo} . "\n"; # prints "bar"
+ print $db->{circle}->{foo} . "\n"; # prints "bar" again
+
+This also works as expected with array and hash references. So, the following
+works as expected:
+
+ $db->{foo} = [ 1 .. 3 ];
+ $db->{bar} = $db->{foo};
+
+ push @{$db->{foo}}, 42;
+ is( $db->{bar}[-1], 42 ); # Passes
+
+This, however, does I<not> extend to assignments from one DB file to another.
+So, the following will throw an error:
+
+ my $db1 = DBM::Deep->new( "foo.db" );
+ my $db2 = DBM::Deep->new( "bar.db" );
+
+ $db1->{foo} = [];
+ $db2->{foo} = $db1->{foo}; # dies
+
+B<Note>: Passing the object to a function that recursively walks the
+object tree (such as I<Data::Dumper> or even the built-in C<optimize()> or
+C<export()> methods) will result in an infinite loop. This will be fixed in
+a future release by adding singleton support.
+
+=head1 TRANSACTIONS
+
+As of 1.0000, DBM::Deep hass ACID transactions. Every DBM::Deep object is completely
+transaction-ready - it is not an option you have to turn on. You do have to
+specify how many transactions may run simultaneously (q.v. L</num_txns>).
+
+Three new methods have been added to support them. They are:
+
+=over 4
+
+=item * begin_work()
+
+This starts a transaction.
+
+=item * commit()
+
+This applies the changes done within the transaction to the mainline and ends
+the transaction.
+
+=item * rollback()
+
+This discards the changes done within the transaction to the mainline and ends
+the transaction.
+
+=back
+
+Transactions in DBM::Deep are done using a variant of the MVCC method, the
+same method used by the InnoDB MySQL engine.
+
+=head1 MIGRATION
+
+As of 1.0000, the file format has changed. Furthermore, DBM::Deep is now
+designed to potentially change file format between point-releases, if needed to
+support a requested feature. To aid in this, a migration script is provided
+within the CPAN distribution called C<utils/upgrade_db.pl>.
+
+B<NOTE:> This script is not installed onto your system because it carries a copy
+of every version prior to the current version.
+
+=head1 TODO
+
+The following are items that are planned to be added in future releases. These
+are separate from the L<CAVEATS, ISSUES & BUGS> below.
+
+=head2 Sub-Transactions
+
+Right now, you cannot run a transaction within a transaction. Removing this
+restriction is technically straightforward, but the combinatorial explosion of
+possible usecases hurts my head. If this is something you want to see
+immediately, please submit many testcases.
+
+=head2 Caching
+
+If a client is willing to assert upon opening the file that this process will be
+the only consumer of that datafile, then there are a number of caching
+possibilities that can be taken advantage of. This does, however, mean that
+DBM::Deep is more vulnerable to losing data due to unflushed changes. It also
+means a much larger in-memory footprint. As such, it's not clear exactly how
+this should be done. Suggestions are welcome.
+
+=head2 Ram-only
+
+The techniques used in DBM::Deep simply require a seekable contiguous
+datastore. This could just as easily be a large string as a file. By using
+substr, the STM capabilities of DBM::Deep could be used within a
+single-process. I have no idea how I'd specify this, though. Suggestions are
+welcome.
+
+=head2 Different contention resolution mechanisms
+
+Currently, the only contention resolution mechanism is last-write-wins. This
+is the mechanism used by most RDBMSes and should be good enough for most uses.
+For advanced uses of STM, other contention mechanisms will be needed. If you
+have an idea of how you'd like to see contention resolution in DBM::Deep,
+please let me know.
+
+=head1 CAVEATS, ISSUES & BUGS
+
+This section describes all the known issues with DBM::Deep. These are issues
+that are either intractable or depend on some feature within Perl working
+exactly right. It you have found something that is not listed below, please
+send an e-mail to L<rkinyon@cpan.org>. Likewise, if you think you know of a
+way around one of these issues, please let me know.
+
+=head2 References
+
+(The following assumes a high level of Perl understanding, specifically of
+references. Most users can safely skip this section.)
+
+Currently, the only references supported are HASH and ARRAY. The other reference
+types (SCALAR, CODE, GLOB, and REF) cannot be supported for various reasons.
+
+=over 4
+
+=item * GLOB
+
+These are things like filehandles and other sockets. They can't be supported
+because it's completely unclear how DBM::Deep should serialize them.
+
+=item * SCALAR / REF
+
+The discussion here refers to the following type of example:
+
+ my $x = 25;
+ $db->{key1} = \$x;
+
+ $x = 50;
+
+ # In some other process ...
+
+ my $val = ${ $db->{key1} };
+
+ is( $val, 50, "What actually gets stored in the DB file?" );
+
+The problem is one of synchronization. When the variable being referred to
+changes value, the reference isn't notified, which is kind of the point of
+references. This means that the new value won't be stored in the datafile for
+other processes to read. There is no TIEREF.
+
+It is theoretically possible to store references to values already within a
+DBM::Deep object because everything already is synchronized, but the change to
+the internals would be quite large. Specifically, DBM::Deep would have to tie
+every single value that is stored. This would bloat the RAM footprint of
+DBM::Deep at least twofold (if not more) and be a significant performance drain,
+all to support a feature that has never been requested.
+
+=item * CODE
+
+L<Data::Dump::Streamer> provides a mechanism for serializing coderefs,
+including saving off all closure state. This would allow for DBM::Deep to
+store the code for a subroutine. Then, whenever the subroutine is read, the
+code could be C<eval()>'ed into being. However, just as for SCALAR and REF,
+that closure state may change without notifying the DBM::Deep object storing
+the reference. Again, this would generally be considered a feature.
+
+=back
+
+=head2 External references and transactions
+
+If you do C<my $x = $db-E<gt>{foo};>, then start a transaction, $x will be
+referencing the database from outside the transaction. A fix for this (and other
+issues with how external references into the database) is being looked into. This
+is the skipped set of tests in t/39_singletons.t and a related issue is the focus
+of t/37_delete_edge_cases.t
+
+=head2 File corruption
+
+The current level of error handling in DBM::Deep is minimal. Files I<are> checked
+for a 32-bit signature when opened, but any other form of corruption in the
+datafile can cause segmentation faults. DBM::Deep may try to C<seek()> past
+the end of a file, or get stuck in an infinite loop depending on the level and
+type of corruption. File write operations are not checked for failure (for
+speed), so if you happen to run out of disk space, DBM::Deep will probably fail in
+a bad way. These things will be addressed in a later version of DBM::Deep.
+
+=head2 DB over NFS
+
+Beware of using DBM::Deep files over NFS. DBM::Deep uses flock(), which works
+well on local filesystems, but will NOT protect you from file corruption over
+NFS. I've heard about setting up your NFS server with a locking daemon, then
+using C<lockf()> to lock your files, but your mileage may vary there as well.
+From what I understand, there is no real way to do it. However, if you need
+access to the underlying filehandle in DBM::Deep for using some other kind of
+locking scheme like C<lockf()>, see the L<LOW-LEVEL ACCESS> section above.
+
+=head2 Copying Objects
+
+Beware of copying tied objects in Perl. Very strange things can happen.
+Instead, use DBM::Deep's C<clone()> method which safely copies the object and
+returns a new, blessed and tied hash or array to the same level in the DB.
+
+ my $copy = $db->clone();
+
+B<Note>: Since clone() here is cloning the object, not the database location, any
+modifications to either $db or $copy will be visible to both.
+
+=head2 Large Arrays
+
+Beware of using C<shift()>, C<unshift()> or C<splice()> with large arrays.
+These functions cause every element in the array to move, which can be murder
+on DBM::Deep, as every element has to be fetched from disk, then stored again in
+a different location. This will be addressed in a future version.
+
+This has been somewhat addressed so that the cost is constant, regardless of
+what is stored at those locations. So, small arrays with huge data structures in
+them are faster. But, large arrays are still large.
+
+=head2 Writeonly Files
+
+If you pass in a filehandle to new(), you may have opened it in either a
+readonly or writeonly mode. STORE will verify that the filehandle is writable.
+However, there doesn't seem to be a good way to determine if a filehandle is
+readable. And, if the filehandle isn't readable, it's not clear what will
+happen. So, don't do that.
+
+=head2 Assignments Within Transactions
+
+The following will I<not> work as one might expect:
+
+ my $x = { a => 1 };
+
+ $db->begin_work;
+ $db->{foo} = $x;
+ $db->rollback;
+
+ is( $x->{a}, 1 ); # This will fail!
+
+The problem is that the moment a reference used as the rvalue to a DBM::Deep
+object's lvalue, it becomes tied itself. This is so that future changes to
+C<$x> can be tracked within the DBM::Deep file and is considered to be a
+feature. By the time the rollback occurs, there is no knowledge that there had
+been an C<$x> or what memory location to assign an C<export()> to.
+
+B<NOTE:> This does not affect importing because imports do a walk over the
+reference to be imported in order to explicitly leave it untied.
+
+=head1 CODE COVERAGE
+
+L<Devel::Cover> is used to test the code coverage of the tests. Below is the
+L<Devel::Cover> report on this distribution's test suite.
+
+ ------------------------------------------ ------ ------ ------ ------ ------
+ File stmt bran cond sub total
+ ------------------------------------------ ------ ------ ------ ------ ------
+ blib/lib/DBM/Deep.pm 97.2 90.9 83.3 100.0 95.4
+ blib/lib/DBM/Deep/Array.pm 100.0 95.7 100.0 100.0 99.0
+ blib/lib/DBM/Deep/Engine.pm 95.6 84.7 81.6 98.4 92.5
+ blib/lib/DBM/Deep/File.pm 97.2 81.6 66.7 100.0 91.9
+ blib/lib/DBM/Deep/Hash.pm 100.0 100.0 100.0 100.0 100.0
+ Total 96.7 87.5 82.2 99.2 94.1
+ ------------------------------------------ ------ ------ ------ ------ ------
+
+=head1 MORE INFORMATION
+
+Check out the DBM::Deep Google Group at L<http://groups.google.com/group/DBM-Deep>
+or send email to L<DBM-Deep@googlegroups.com>. You can also visit #dbm-deep on
+irc.perl.org
+
+The source code repository is at L<http://svn.perl.org/modules/DBM-Deep>
+
+=head1 MAINTAINERS
+
+Rob Kinyon, L<rkinyon@cpan.org>
+
+Originally written by Joseph Huckaby, L<jhuckaby@cpan.org>
+
+=head1 SPONSORS
+
+Stonehenge Consulting (L<http://www.stonehenge.com/>) sponsored the
+developement of transactions and freespace management, leading to the 1.0000
+release. A great debt of gratitude goes out to them for their continuing
+leadership in and support of the Perl community.
+
+=head1 CONTRIBUTORS
+
+The following have contributed greatly to make DBM::Deep what it is today:
+
+=over 4
+
+=item * Adam Sah and Rich Gaushell for innumerable contributions early on.
+
+=item * Dan Golden and others at YAPC::NA 2006 for helping me design through transactions.
+
+=back
+
+=head1 SEE ALSO
+
+perltie(1), Tie::Hash(3), Digest::MD5(3), Fcntl(3), flock(2), lockf(3), nfs(5),
+Digest::SHA256(3), Crypt::Blowfish(3), Compress::Zlib(3)
+
+=head1 LICENSE
+
+Copyright (c) 2007 Rob Kinyon. All Rights Reserved.
+This is free software, you may use it and distribute it under the same terms
+as Perl itself.
+
+=cut
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/Array.pm b/Master/tlpkg/tlperl/lib/DBM/Deep/Array.pm
new file mode 100755
index 00000000000..38c51869fda
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/Array.pm
@@ -0,0 +1,414 @@
+package DBM::Deep::Array;
+
+use 5.006_000;
+
+use strict;
+use warnings;
+
+our $VERSION = q(1.0013);
+
+# This is to allow DBM::Deep::Array to handle negative indices on
+# its own. Otherwise, Perl would intercept the call to negative
+# indices for us. This was causing bugs for negative index handling.
+our $NEGATIVE_INDICES = 1;
+
+use base 'DBM::Deep';
+
+use Scalar::Util ();
+
+sub _get_self {
+ eval { local $SIG{'__DIE__'}; tied( @{$_[0]} ) } || $_[0]
+}
+
+sub _repr { [] }
+
+sub TIEARRAY {
+ my $class = shift;
+ my $args = $class->_get_args( @_ );
+
+ $args->{type} = $class->TYPE_ARRAY;
+
+ return $class->_init($args);
+}
+
+sub FETCH {
+ my $self = shift->_get_self;
+ my ($key) = @_;
+
+ $self->lock( $self->LOCK_SH );
+
+ if ( !defined $key ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use an undefined array index." );
+ }
+ elsif ( $key =~ /^-?\d+$/ ) {
+ if ( $key < 0 ) {
+ $key += $self->FETCHSIZE;
+ unless ( $key >= 0 ) {
+ $self->unlock;
+ return;
+ }
+ }
+ }
+ elsif ( $key ne 'length' ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use '$key' as an array index." );
+ }
+
+ my $rv = $self->SUPER::FETCH( $key );
+
+ $self->unlock;
+
+ return $rv;
+}
+
+sub STORE {
+ my $self = shift->_get_self;
+ my ($key, $value) = @_;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $size;
+ my $idx_is_numeric;
+ if ( !defined $key ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use an undefined array index." );
+ }
+ elsif ( $key =~ /^-?\d+$/ ) {
+ $idx_is_numeric = 1;
+ if ( $key < 0 ) {
+ $size = $self->FETCHSIZE;
+ if ( $key + $size < 0 ) {
+ die( "Modification of non-creatable array value attempted, subscript $key" );
+ }
+ $key += $size
+ }
+ }
+ elsif ( $key ne 'length' ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use '$key' as an array index." );
+ }
+
+ my $rv = $self->SUPER::STORE( $key, $value );
+
+ if ( $idx_is_numeric ) {
+ $size = $self->FETCHSIZE unless defined $size;
+ if ( $key >= $size ) {
+ $self->STORESIZE( $key + 1 );
+ }
+ }
+
+ $self->unlock;
+
+ return $rv;
+}
+
+sub EXISTS {
+ my $self = shift->_get_self;
+ my ($key) = @_;
+
+ $self->lock( $self->LOCK_SH );
+
+ if ( !defined $key ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use an undefined array index." );
+ }
+ elsif ( $key =~ /^-?\d+$/ ) {
+ if ( $key < 0 ) {
+ $key += $self->FETCHSIZE;
+ unless ( $key >= 0 ) {
+ $self->unlock;
+ return;
+ }
+ }
+ }
+ elsif ( $key ne 'length' ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use '$key' as an array index." );
+ }
+
+ my $rv = $self->SUPER::EXISTS( $key );
+
+ $self->unlock;
+
+ return $rv;
+}
+
+sub DELETE {
+ my $self = shift->_get_self;
+ my ($key) = @_;
+ warn "ARRAY::DELETE($self,$key)\n" if DBM::Deep::DEBUG;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $size = $self->FETCHSIZE;
+ if ( !defined $key ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use an undefined array index." );
+ }
+ elsif ( $key =~ /^-?\d+$/ ) {
+ if ( $key < 0 ) {
+ $key += $size;
+ unless ( $key >= 0 ) {
+ $self->unlock;
+ return;
+ }
+ }
+ }
+ elsif ( $key ne 'length' ) {
+ $self->unlock;
+ DBM::Deep->_throw_error( "Cannot use '$key' as an array index." );
+ }
+
+ my $rv = $self->SUPER::DELETE( $key );
+
+ if ($rv && $key == $size - 1) {
+ $self->STORESIZE( $key );
+ }
+
+ $self->unlock;
+
+ return $rv;
+}
+
+# Now that we have a real Reference sector, we should store arrayzize there. However,
+# arraysize needs to be transactionally-aware, so a simple location to store it isn't
+# going to work.
+sub FETCHSIZE {
+ my $self = shift->_get_self;
+
+ $self->lock( $self->LOCK_SH );
+
+ my $SAVE_FILTER = $self->_storage->{filter_fetch_value};
+ $self->_storage->{filter_fetch_value} = undef;
+
+ my $size = $self->FETCH('length') || 0;
+
+ $self->_storage->{filter_fetch_value} = $SAVE_FILTER;
+
+ $self->unlock;
+
+ return $size;
+}
+
+sub STORESIZE {
+ my $self = shift->_get_self;
+ my ($new_length) = @_;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $SAVE_FILTER = $self->_storage->{filter_store_value};
+ $self->_storage->{filter_store_value} = undef;
+
+ my $result = $self->STORE('length', $new_length, 'length');
+
+ $self->_storage->{filter_store_value} = $SAVE_FILTER;
+
+ $self->unlock;
+
+ return $result;
+}
+
+sub POP {
+ my $self = shift->_get_self;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $length = $self->FETCHSIZE();
+
+ if ($length) {
+ my $content = $self->FETCH( $length - 1 );
+ $self->DELETE( $length - 1 );
+
+ $self->unlock;
+
+ return $content;
+ }
+ else {
+ $self->unlock;
+ return;
+ }
+}
+
+sub PUSH {
+ my $self = shift->_get_self;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $length = $self->FETCHSIZE();
+
+ while (my $content = shift @_) {
+ $self->STORE( $length, $content );
+ $length++;
+ }
+
+ $self->unlock;
+
+ return $length;
+}
+
+# XXX This really needs to be something more direct within the file, not a
+# fetch and re-store. -RobK, 2007-09-20
+sub _move_value {
+ my $self = shift;
+ my ($old_key, $new_key) = @_;
+
+ return $self->_engine->make_reference( $self, $old_key, $new_key );
+}
+
+sub SHIFT {
+ my $self = shift->_get_self;
+ warn "SHIFT($self)\n" if DBM::Deep::DEBUG;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $length = $self->FETCHSIZE();
+
+ if ( !$length ) {
+ $self->unlock;
+ return;
+ }
+
+ my $content = $self->DELETE( 0 );
+
+ # Unless the deletion above has cleared the array ...
+ if ( $length > 1 ) {
+ for (my $i = 0; $i < $length - 1; $i++) {
+ $self->_move_value( $i+1, $i );
+ }
+
+ $self->DELETE( $length - 1 );
+ }
+
+ $self->unlock;
+
+ return $content;
+}
+
+sub UNSHIFT {
+ my $self = shift->_get_self;
+ my @new_elements = @_;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $length = $self->FETCHSIZE();
+ my $new_size = scalar @new_elements;
+
+ if ($length) {
+ for (my $i = $length - 1; $i >= 0; $i--) {
+ $self->_move_value( $i, $i+$new_size );
+ }
+
+ $self->STORESIZE( $length + $new_size );
+ }
+
+ for (my $i = 0; $i < $new_size; $i++) {
+ $self->STORE( $i, $new_elements[$i] );
+ }
+
+ $self->unlock;
+
+ return $length + $new_size;
+}
+
+sub SPLICE {
+ my $self = shift->_get_self;
+
+ $self->lock( $self->LOCK_EX );
+
+ my $length = $self->FETCHSIZE();
+
+ ##
+ # Calculate offset and length of splice
+ ##
+ my $offset = shift;
+ $offset = 0 unless defined $offset;
+ if ($offset < 0) { $offset += $length; }
+
+ my $splice_length;
+ if (scalar @_) { $splice_length = shift; }
+ else { $splice_length = $length - $offset; }
+ if ($splice_length < 0) { $splice_length += ($length - $offset); }
+
+ ##
+ # Setup array with new elements, and copy out old elements for return
+ ##
+ my @new_elements = @_;
+ my $new_size = scalar @new_elements;
+
+ my @old_elements = map {
+ $self->FETCH( $_ )
+ } $offset .. ($offset + $splice_length - 1);
+
+ ##
+ # Adjust array length, and shift elements to accomodate new section.
+ ##
+ if ( $new_size != $splice_length ) {
+ if ($new_size > $splice_length) {
+ for (my $i = $length - 1; $i >= $offset + $splice_length; $i--) {
+ $self->_move_value( $i, $i + ($new_size - $splice_length) );
+ }
+ $self->STORESIZE( $length + $new_size - $splice_length );
+ }
+ else {
+ for (my $i = $offset + $splice_length; $i < $length; $i++) {
+ $self->_move_value( $i, $i + ($new_size - $splice_length) );
+ }
+ for (my $i = 0; $i < $splice_length - $new_size; $i++) {
+ $self->DELETE( $length - 1 );
+ $length--;
+ }
+ }
+ }
+
+ ##
+ # Insert new elements into array
+ ##
+ for (my $i = $offset; $i < $offset + $new_size; $i++) {
+ $self->STORE( $i, shift @new_elements );
+ }
+
+ $self->unlock;
+
+ ##
+ # Return deleted section, or last element in scalar context.
+ ##
+ return wantarray ? @old_elements : $old_elements[-1];
+}
+
+# We don't need to populate it, yet.
+# It will be useful, though, when we split out HASH and ARRAY
+sub EXTEND {
+ ##
+ # Perl will call EXTEND() when the array is likely to grow.
+ # We don't care, but include it because it gets called at times.
+ ##
+}
+
+sub _copy_node {
+ my $self = shift;
+ my ($db_temp) = @_;
+
+ my $length = $self->length();
+ for (my $index = 0; $index < $length; $index++) {
+ $self->_copy_value( \$db_temp->[$index], $self->get($index) );
+ }
+
+ return 1;
+}
+
+##
+# Public method aliases
+##
+sub length { (shift)->FETCHSIZE(@_) }
+sub pop { (shift)->POP(@_) }
+sub push { (shift)->PUSH(@_) }
+sub unshift { (shift)->UNSHIFT(@_) }
+sub splice { (shift)->SPLICE(@_) }
+
+# This must be last otherwise we have to qualify all other calls to shift
+# as calls to CORE::shift
+sub shift { (CORE::shift)->SHIFT(@_) }
+
+1;
+__END__
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/Cookbook.pod b/Master/tlpkg/tlperl/lib/DBM/Deep/Cookbook.pod
new file mode 100755
index 00000000000..4ca50b97a39
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/Cookbook.pod
@@ -0,0 +1,199 @@
+=head1 NAME
+
+DBM::Deep::Cookbook
+
+=head1 DESCRIPTION
+
+This is the Cookbook for L<DBM::Deep>. It contains useful tips and tricks,
+plus some examples of how to do common tasks.
+
+=head1 RECIPES
+
+=head2 UTF8 data
+
+When you're using UTF8 data, you may run into the "Wide character in print"
+warning. To fix that in 5.8+, do the following:
+
+ my $db = DBM::Deep->new( ... );
+ binmode $db->_fh, ":utf8";
+
+In 5.6, you will have to do the following:
+
+ my $db = DBM::Deep->new( ... );
+ $db->set_filter( 'store_value' => sub { pack "U0C*", unpack "C*", $_[0] } );
+ $db->set_filter( 'retrieve_value' => sub { pack "C*", unpack "U0C*", $_[0] } );
+
+In a future version, you will be able to specify C<utf8 =E<gt> 1> and
+L<DBM::Deep> will do these things for you.
+
+=head2 Real-time Encryption Example
+
+B<NOTE>: This is just an example of how to write a filter. This most
+definitely should B<NOT> be taken as a proper way to write a filter that does
+encryption.
+
+Here is a working example that uses the I<Crypt::Blowfish> module to
+do real-time encryption / decryption of keys & values with DBM::Deep Filters.
+Please visit L<http://search.cpan.org/search?module=Crypt::Blowfish> for more
+on I<Crypt::Blowfish>. You'll also need the I<Crypt::CBC> module.
+
+ use DBM::Deep;
+ use Crypt::Blowfish;
+ use Crypt::CBC;
+
+ my $cipher = Crypt::CBC->new({
+ 'key' => 'my secret key',
+ 'cipher' => 'Blowfish',
+ 'iv' => '$KJh#(}q',
+ 'regenerate_key' => 0,
+ 'padding' => 'space',
+ 'prepend_iv' => 0
+ });
+
+ my $db = DBM::Deep->new(
+ file => "foo-encrypt.db",
+ filter_store_key => \&my_encrypt,
+ filter_store_value => \&my_encrypt,
+ filter_fetch_key => \&my_decrypt,
+ filter_fetch_value => \&my_decrypt,
+ );
+
+ $db->{key1} = "value1";
+ $db->{key2} = "value2";
+ print "key1: " . $db->{key1} . "\n";
+ print "key2: " . $db->{key2} . "\n";
+
+ undef $db;
+ exit;
+
+ sub my_encrypt {
+ return $cipher->encrypt( $_[0] );
+ }
+ sub my_decrypt {
+ return $cipher->decrypt( $_[0] );
+ }
+
+=head2 Real-time Compression Example
+
+Here is a working example that uses the I<Compress::Zlib> module to do real-time
+compression / decompression of keys & values with DBM::Deep Filters.
+Please visit L<http://search.cpan.org/search?module=Compress::Zlib> for
+more on I<Compress::Zlib>.
+
+ use DBM::Deep;
+ use Compress::Zlib;
+
+ my $db = DBM::Deep->new(
+ file => "foo-compress.db",
+ filter_store_key => \&my_compress,
+ filter_store_value => \&my_compress,
+ filter_fetch_key => \&my_decompress,
+ filter_fetch_value => \&my_decompress,
+ );
+
+ $db->{key1} = "value1";
+ $db->{key2} = "value2";
+ print "key1: " . $db->{key1} . "\n";
+ print "key2: " . $db->{key2} . "\n";
+
+ undef $db;
+ exit;
+
+ sub my_compress {
+ return Compress::Zlib::memGzip( $_[0] ) ;
+ }
+ sub my_decompress {
+ return Compress::Zlib::memGunzip( $_[0] ) ;
+ }
+
+B<Note:> Filtering of keys only applies to hashes. Array "keys" are
+actually numerical index numbers, and are not filtered.
+
+=head1 Custom Digest Algorithm
+
+DBM::Deep by default uses the I<Message Digest 5> (MD5) algorithm for hashing
+keys. However you can override this, and use another algorithm (such as SHA-256)
+or even write your own. But please note that DBM::Deep currently expects zero
+collisions, so your algorithm has to be I<perfect>, so to speak. Collision
+detection may be introduced in a later version.
+
+You can specify a custom digest algorithm by passing it into the parameter
+list for new(), passing a reference to a subroutine as the 'digest' parameter,
+and the length of the algorithm's hashes (in bytes) as the 'hash_size'
+parameter. Here is a working example that uses a 256-bit hash from the
+I<Digest::SHA256> module. Please see
+L<http://search.cpan.org/search?module=Digest::SHA256> for more information.
+
+ use DBM::Deep;
+ use Digest::SHA256;
+
+ my $context = Digest::SHA256::new(256);
+
+ my $db = DBM::Deep->new(
+ filename => "foo-sha.db",
+ digest => \&my_digest,
+ hash_size => 32,
+ );
+
+ $db->{key1} = "value1";
+ $db->{key2} = "value2";
+ print "key1: " . $db->{key1} . "\n";
+ print "key2: " . $db->{key2} . "\n";
+
+ undef $db;
+ exit;
+
+ sub my_digest {
+ return substr( $context->hash($_[0]), 0, 32 );
+ }
+
+B<Note:> Your returned digest strings must be B<EXACTLY> the number
+of bytes you specify in the hash_size parameter (in this case 32). Undefined
+behavior will occur otherwise.
+
+B<Note:> If you do choose to use a custom digest algorithm, you must set it
+every time you access this file. Otherwise, the default (MD5) will be used.
+
+=head1 PERFORMANCE
+
+Because DBM::Deep is a conncurrent datastore, every change is flushed to disk
+immediately and every read goes to disk. This means that DBM::Deep functions
+at the speed of disk (generally 10-20ms) vs. the speed of RAM (generally
+50-70ns), or at least 150-200x slower than the comparable in-memory
+datastructure in Perl.
+
+There are several techniques you can use to speed up how DBM::Deep functions.
+
+=over 4
+
+=item * Put it on a ramdisk
+
+The easiest and quickest mechanism to making DBM::Deep run faster is to create
+a ramdisk and locate the DBM::Deep file there. Doing this as an option may
+become a feature of DBM::Deep, assuming there is a good ramdisk wrapper on CPAN.
+
+=item * Work at the tightest level possible
+
+It is much faster to assign the level of your db that you are working with to
+an intermediate variable than to re-look it up every time. Thus
+
+ # BAD
+ while ( my ($k, $v) = each %{$db->{foo}{bar}{baz}} ) {
+ ...
+ }
+
+ # GOOD
+ my $x = $db->{foo}{bar}{baz};
+ while ( my ($k, $v) = each %$x ) {
+ ...
+ }
+
+=item * Make your file as tight as possible
+
+If you know that you are not going to use more than 65K in your database,
+consider using the C<pack_size =E<gt> 'small'> option. This will instruct
+DBM::Deep to use 16bit addresses, meaning that the seek times will be less.
+
+=back
+
+=cut
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/Engine.pm b/Master/tlpkg/tlperl/lib/DBM/Deep/Engine.pm
new file mode 100755
index 00000000000..fa04b4f3239
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/Engine.pm
@@ -0,0 +1,2325 @@
+package DBM::Deep::Engine;
+
+use 5.006_000;
+
+use strict;
+use warnings;
+
+our $VERSION = q(1.0013);
+
+# Never import symbols into our namespace. We are a class, not a library.
+# -RobK, 2008-05-27
+use Scalar::Util ();
+
+#use Data::Dumper ();
+
+# File-wide notes:
+# * Every method in here assumes that the storage has been appropriately
+# safeguarded. This can be anything from flock() to some sort of manual
+# mutex. But, it's the caller's responsability to make sure that this has
+# been done.
+
+# Setup file and tag signatures. These should never change.
+sub SIG_FILE () { 'DPDB' }
+sub SIG_HEADER () { 'h' }
+sub SIG_HASH () { 'H' }
+sub SIG_ARRAY () { 'A' }
+sub SIG_NULL () { 'N' }
+sub SIG_DATA () { 'D' }
+sub SIG_INDEX () { 'I' }
+sub SIG_BLIST () { 'B' }
+sub SIG_FREE () { 'F' }
+sub SIG_SIZE () { 1 }
+
+my $STALE_SIZE = 2;
+
+# Please refer to the pack() documentation for further information
+my %StP = (
+ 1 => 'C', # Unsigned char value (no order needed as it's just one byte)
+ 2 => 'n', # Unsigned short in "network" (big-endian) order
+ 4 => 'N', # Unsigned long in "network" (big-endian) order
+ 8 => 'Q', # Usigned quad (no order specified, presumably machine-dependent)
+);
+
+################################################################################
+
+sub new {
+ my $class = shift;
+ my ($args) = @_;
+
+ my $self = bless {
+ byte_size => 4,
+
+ digest => undef,
+ hash_size => 16, # In bytes
+ hash_chars => 256, # Number of chars the algorithm uses per byte
+ max_buckets => 16,
+ num_txns => 1, # The HEAD
+ trans_id => 0, # Default to the HEAD
+
+ data_sector_size => 64, # Size in bytes of each data sector
+
+ entries => {}, # This is the list of entries for transactions
+ storage => undef,
+ }, $class;
+
+ # Never allow byte_size to be set directly.
+ delete $args->{byte_size};
+ if ( defined $args->{pack_size} ) {
+ if ( lc $args->{pack_size} eq 'small' ) {
+ $args->{byte_size} = 2;
+ }
+ elsif ( lc $args->{pack_size} eq 'medium' ) {
+ $args->{byte_size} = 4;
+ }
+ elsif ( lc $args->{pack_size} eq 'large' ) {
+ $args->{byte_size} = 8;
+ }
+ else {
+ DBM::Deep->_throw_error( "Unknown pack_size value: '$args->{pack_size}'" );
+ }
+ }
+
+ # Grab the parameters we want to use
+ foreach my $param ( keys %$self ) {
+ next unless exists $args->{$param};
+ $self->{$param} = $args->{$param};
+ }
+
+ my %validations = (
+ max_buckets => { floor => 16, ceil => 256 },
+ num_txns => { floor => 1, ceil => 255 },
+ data_sector_size => { floor => 32, ceil => 256 },
+ );
+
+ while ( my ($attr, $c) = each %validations ) {
+ if ( !defined $self->{$attr}
+ || !length $self->{$attr}
+ || $self->{$attr} =~ /\D/
+ || $self->{$attr} < $c->{floor}
+ ) {
+ $self->{$attr} = '(undef)' if !defined $self->{$attr};
+ warn "Floor of $attr is $c->{floor}. Setting it to $c->{floor} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{floor};
+ }
+ elsif ( $self->{$attr} > $c->{ceil} ) {
+ warn "Ceiling of $attr is $c->{ceil}. Setting it to $c->{ceil} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{ceil};
+ }
+ }
+
+ if ( !$self->{digest} ) {
+ require Digest::MD5;
+ $self->{digest} = \&Digest::MD5::md5;
+ }
+
+ return $self;
+}
+
+################################################################################
+
+sub read_value {
+ my $self = shift;
+ my ($obj, $key) = @_;
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return;
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ my $key_md5 = $self->_apply_digest( $key );
+
+ my $value_sector = $sector->get_data_for({
+ key_md5 => $key_md5,
+ allow_head => 1,
+ });
+
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ data => undef,
+ });
+
+ $sector->write_data({
+ key_md5 => $key_md5,
+ key => $key,
+ value => $value_sector,
+ });
+ }
+
+ return $value_sector->data;
+}
+
+sub get_classname {
+ my $self = shift;
+ my ($obj) = @_;
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "How did get_classname fail (no sector for '$obj')?!" );
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ return $sector->get_classname;
+}
+
+sub make_reference {
+ my $self = shift;
+ my ($obj, $old_key, $new_key) = @_;
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "How did get_classname fail (no sector for '$obj')?!" );
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ my $old_md5 = $self->_apply_digest( $old_key );
+
+ my $value_sector = $sector->get_data_for({
+ key_md5 => $old_md5,
+ allow_head => 1,
+ });
+
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ data => undef,
+ });
+
+ $sector->write_data({
+ key_md5 => $old_md5,
+ key => $old_key,
+ value => $value_sector,
+ });
+ }
+
+ if ( $value_sector->isa( 'DBM::Deep::Engine::Sector::Reference' ) ) {
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
+ }
+ else {
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector->clone,
+ });
+ }
+}
+
+sub key_exists {
+ my $self = shift;
+ my ($obj, $key) = @_;
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return '';
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return '';
+ }
+
+ my $data = $sector->get_data_for({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 1,
+ });
+
+ # exists() returns 1 or '' for true/false.
+ return $data ? 1 : '';
+}
+
+sub delete_key {
+ my $self = shift;
+ my ($obj, $key) = @_;
+
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return;
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ return $sector->delete_key({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 0,
+ });
+}
+
+sub write_value {
+ my $self = shift;
+ my ($obj, $key, $value) = @_;
+
+ my $r = Scalar::Util::reftype( $value ) || '';
+ {
+ last if $r eq '';
+ last if $r eq 'HASH';
+ last if $r eq 'ARRAY';
+
+ DBM::Deep->_throw_error(
+ "Storage of references of type '$r' is not supported."
+ );
+ }
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep." );
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep." );
+ }
+
+ my ($class, $type);
+ if ( !defined $value ) {
+ $class = 'DBM::Deep::Engine::Sector::Null';
+ }
+ elsif ( $r eq 'ARRAY' || $r eq 'HASH' ) {
+ my $tmpvar;
+ if ( $r eq 'ARRAY' ) {
+ $tmpvar = tied @$value;
+ } elsif ( $r eq 'HASH' ) {
+ $tmpvar = tied %$value;
+ }
+
+ if ( $tmpvar ) {
+ my $is_dbm_deep = eval { local $SIG{'__DIE__'}; $tmpvar->isa( 'DBM::Deep' ); };
+
+ unless ( $is_dbm_deep ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ }
+
+ unless ( $tmpvar->_engine->storage == $self->storage ) {
+ DBM::Deep->_throw_error( "Cannot store values across DBM::Deep files. Please use export() instead." );
+ }
+
+ # First, verify if we're storing the same thing to this spot. If we are, then
+ # this should be a no-op. -EJS, 2008-05-19
+ my $loc = $sector->get_data_location_for({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 1,
+ });
+
+ if ( defined($loc) && $loc == $tmpvar->_base_offset ) {
+ return 1;
+ }
+
+ #XXX Can this use $loc?
+ my $value_sector = $self->_load_sector( $tmpvar->_base_offset );
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
+
+ return 1;
+ }
+
+ $class = 'DBM::Deep::Engine::Sector::Reference';
+ $type = substr( $r, 0, 1 );
+ }
+ else {
+ if ( tied($value) ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ }
+ $class = 'DBM::Deep::Engine::Sector::Scalar';
+ }
+
+ # Create this after loading the reference sector in case something bad happens.
+ # This way, we won't allocate value sector(s) needlessly.
+ my $value_sector = $class->new({
+ engine => $self,
+ data => $value,
+ type => $type,
+ });
+
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+
+ # This code is to make sure we write all the values in the $value to the disk
+ # and to make sure all changes to $value after the assignment are reflected
+ # on disk. This may be counter-intuitive at first, but it is correct dwimmery.
+ # NOTE - simply tying $value won't perform a STORE on each value. Hence, the
+ # copy to a temp value.
+ if ( $r eq 'ARRAY' ) {
+ my @temp = @$value;
+ tie @$value, 'DBM::Deep', {
+ base_offset => $value_sector->offset,
+ staleness => $value_sector->staleness,
+ storage => $self->storage,
+ engine => $self,
+ };
+ @$value = @temp;
+ bless $value, 'DBM::Deep::Array' unless Scalar::Util::blessed( $value );
+ }
+ elsif ( $r eq 'HASH' ) {
+ my %temp = %$value;
+ tie %$value, 'DBM::Deep', {
+ base_offset => $value_sector->offset,
+ staleness => $value_sector->staleness,
+ storage => $self->storage,
+ engine => $self,
+ };
+
+ %$value = %temp;
+ bless $value, 'DBM::Deep::Hash' unless Scalar::Util::blessed( $value );
+ }
+
+ return 1;
+}
+
+# XXX Add staleness here
+sub get_next_key {
+ my $self = shift;
+ my ($obj, $prev_key) = @_;
+
+ # XXX Need to add logic about resetting the iterator if any key in the reference has changed
+ unless ( $prev_key ) {
+ $obj->{iterator} = DBM::Deep::Iterator->new({
+ base_offset => $obj->_base_offset,
+ engine => $self,
+ });
+ }
+
+ return $obj->{iterator}->get_next_key( $obj );
+}
+
+################################################################################
+
+sub setup_fh {
+ my $self = shift;
+ my ($obj) = @_;
+
+ # We're opening the file.
+ unless ( $obj->_base_offset ) {
+ my $bytes_read = $self->_read_file_header;
+
+ # Creating a new file
+ unless ( $bytes_read ) {
+ $self->_write_file_header;
+
+ # 1) Create Array/Hash entry
+ my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $obj->_type,
+ });
+ $obj->{base_offset} = $initial_reference->offset;
+ $obj->{staleness} = $initial_reference->staleness;
+
+ $self->storage->flush;
+ }
+ # Reading from an existing file
+ else {
+ $obj->{base_offset} = $bytes_read;
+ my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ offset => $obj->_base_offset,
+ });
+ unless ( $initial_reference ) {
+ DBM::Deep->_throw_error("Corrupted file, no master index record");
+ }
+
+ unless ($obj->_type eq $initial_reference->type) {
+ DBM::Deep->_throw_error("File type mismatch");
+ }
+
+ $obj->{staleness} = $initial_reference->staleness;
+ }
+ }
+
+ return 1;
+}
+
+sub begin_work {
+ my $self = shift;
+ my ($obj) = @_;
+
+ if ( $self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot begin_work within an active transaction" );
+ }
+
+ my @slots = $self->read_txn_slots;
+ my $found;
+ for my $i ( 0 .. $#slots ) {
+ next if $slots[$i];
+
+ $slots[$i] = 1;
+ $self->set_trans_id( $i + 1 );
+ $found = 1;
+ last;
+ }
+ unless ( $found ) {
+ DBM::Deep->_throw_error( "Cannot allocate transaction ID" );
+ }
+ $self->write_txn_slots( @slots );
+
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot begin_work - no available transactions" );
+ }
+
+ return;
+}
+
+sub rollback {
+ my $self = shift;
+ my ($obj) = @_;
+
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot rollback without an active transaction" );
+ }
+
+ # Each entry is the file location for a bucket that has a modification for
+ # this transaction. The entries need to be expunged.
+ foreach my $entry (@{ $self->get_entries } ) {
+ # Remove the entry here
+ my $read_loc = $entry
+ + $self->hash_size
+ + $self->byte_size
+ + $self->byte_size
+ + ($self->trans_id - 1) * ( $self->byte_size + $STALE_SIZE );
+
+ my $data_loc = $self->storage->read_at( $read_loc, $self->byte_size );
+ $data_loc = unpack( $StP{$self->byte_size}, $data_loc );
+ $self->storage->print_at( $read_loc, pack( $StP{$self->byte_size}, 0 ) );
+
+ if ( $data_loc > 1 ) {
+ $self->_load_sector( $data_loc )->free;
+ }
+ }
+
+ $self->clear_entries;
+
+ my @slots = $self->read_txn_slots;
+ $slots[$self->trans_id-1] = 0;
+ $self->write_txn_slots( @slots );
+ $self->inc_txn_staleness_counter( $self->trans_id );
+ $self->set_trans_id( 0 );
+
+ return 1;
+}
+
+sub commit {
+ my $self = shift;
+ my ($obj) = @_;
+
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot commit without an active transaction" );
+ }
+
+ foreach my $entry (@{ $self->get_entries } ) {
+ # Overwrite the entry in head with the entry in trans_id
+ my $base = $entry
+ + $self->hash_size
+ + $self->byte_size;
+
+ my $head_loc = $self->storage->read_at( $base, $self->byte_size );
+ $head_loc = unpack( $StP{$self->byte_size}, $head_loc );
+
+ my $spot = $base + $self->byte_size + ($self->trans_id - 1) * ( $self->byte_size + $STALE_SIZE );
+ my $trans_loc = $self->storage->read_at(
+ $spot, $self->byte_size,
+ );
+
+ $self->storage->print_at( $base, $trans_loc );
+ $self->storage->print_at(
+ $spot,
+ pack( $StP{$self->byte_size} . ' ' . $StP{$STALE_SIZE}, (0) x 2 ),
+ );
+
+ if ( $head_loc > 1 ) {
+ $self->_load_sector( $head_loc )->free;
+ }
+ }
+
+ $self->clear_entries;
+
+ my @slots = $self->read_txn_slots;
+ $slots[$self->trans_id-1] = 0;
+ $self->write_txn_slots( @slots );
+ $self->inc_txn_staleness_counter( $self->trans_id );
+ $self->set_trans_id( 0 );
+
+ return 1;
+}
+
+sub read_txn_slots {
+ my $self = shift;
+ my $bl = $self->txn_bitfield_len;
+ my $num_bits = $bl * 8;
+ return split '', unpack( 'b'.$num_bits,
+ $self->storage->read_at(
+ $self->trans_loc, $bl,
+ )
+ );
+}
+
+sub write_txn_slots {
+ my $self = shift;
+ my $num_bits = $self->txn_bitfield_len * 8;
+ $self->storage->print_at( $self->trans_loc,
+ pack( 'b'.$num_bits, join('', @_) ),
+ );
+}
+
+sub get_running_txn_ids {
+ my $self = shift;
+ my @transactions = $self->read_txn_slots;
+ my @trans_ids = map { $_+1} grep { $transactions[$_] } 0 .. $#transactions;
+}
+
+sub get_txn_staleness_counter {
+ my $self = shift;
+ my ($trans_id) = @_;
+
+ # Hardcode staleness of 0 for the HEAD
+ return 0 unless $trans_id;
+
+ return unpack( $StP{$STALE_SIZE},
+ $self->storage->read_at(
+ $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
+ $STALE_SIZE,
+ )
+ );
+}
+
+sub inc_txn_staleness_counter {
+ my $self = shift;
+ my ($trans_id) = @_;
+
+ # Hardcode staleness of 0 for the HEAD
+ return 0 unless $trans_id;
+
+ $self->storage->print_at(
+ $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
+ pack( $StP{$STALE_SIZE}, $self->get_txn_staleness_counter( $trans_id ) + 1 ),
+ );
+}
+
+sub get_entries {
+ my $self = shift;
+ return [ keys %{ $self->{entries}{$self->trans_id} ||= {} } ];
+}
+
+sub add_entry {
+ my $self = shift;
+ my ($trans_id, $loc) = @_;
+
+ $self->{entries}{$trans_id} ||= {};
+ $self->{entries}{$trans_id}{$loc} = undef;
+}
+
+# If the buckets are being relocated because of a reindexing, the entries
+# mechanism needs to be made aware of it.
+sub reindex_entry {
+ my $self = shift;
+ my ($old_loc, $new_loc) = @_;
+
+ TRANS:
+ while ( my ($trans_id, $locs) = each %{ $self->{entries} } ) {
+ if ( exists $locs->{$old_loc} ) {
+ delete $locs->{$old_loc};
+ $locs->{$new_loc} = undef;
+ next TRANS;
+ }
+ }
+}
+
+sub clear_entries {
+ my $self = shift;
+ delete $self->{entries}{$self->trans_id};
+}
+
+################################################################################
+
+{
+ my $header_fixed = length( SIG_FILE ) + 1 + 4 + 4;
+ my $this_file_version = 3;
+
+ sub _write_file_header {
+ my $self = shift;
+
+ my $nt = $self->num_txns;
+ my $bl = $self->txn_bitfield_len;
+
+ my $header_var = 1 + 1 + 1 + 1 + $bl + $STALE_SIZE * ($nt - 1) + 3 * $self->byte_size;
+
+ my $loc = $self->storage->request_space( $header_fixed + $header_var );
+
+ $self->storage->print_at( $loc,
+ SIG_FILE,
+ SIG_HEADER,
+ pack('N', $this_file_version), # At this point, we're at 9 bytes
+ pack('N', $header_var), # header size
+ # --- Above is $header_fixed. Below is $header_var
+ pack('C', $self->byte_size),
+
+ # These shenanigans are to allow a 256 within a C
+ pack('C', $self->max_buckets - 1),
+ pack('C', $self->data_sector_size - 1),
+
+ pack('C', $nt),
+ pack('C' . $bl, 0 ), # Transaction activeness bitfield
+ pack($StP{$STALE_SIZE}.($nt-1), 0 x ($nt-1) ), # Transaction staleness counters
+ pack($StP{$self->byte_size}, 0), # Start of free chain (blist size)
+ pack($StP{$self->byte_size}, 0), # Start of free chain (data size)
+ pack($StP{$self->byte_size}, 0), # Start of free chain (index size)
+ );
+
+ #XXX Set these less fragilely
+ $self->set_trans_loc( $header_fixed + 4 );
+ $self->set_chains_loc( $header_fixed + 4 + $bl + $STALE_SIZE * ($nt-1) );
+
+ return;
+ }
+
+ sub _read_file_header {
+ my $self = shift;
+
+ my $buffer = $self->storage->read_at( 0, $header_fixed );
+ return unless length($buffer);
+
+ my ($file_signature, $sig_header, $file_version, $size) = unpack(
+ 'A4 A N N', $buffer
+ );
+
+ unless ( $file_signature eq SIG_FILE ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Signature not found -- file is not a Deep DB" );
+ }
+
+ unless ( $sig_header eq SIG_HEADER ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Pre-1.00 file version found" );
+ }
+
+ unless ( $file_version == $this_file_version ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error(
+ "Wrong file version found - " . $file_version .
+ " - expected " . $this_file_version
+ );
+ }
+
+ my $buffer2 = $self->storage->read_at( undef, $size );
+ my @values = unpack( 'C C C C', $buffer2 );
+
+ if ( @values != 4 || grep { !defined } @values ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error("Corrupted file - bad header");
+ }
+
+ #XXX Add warnings if values weren't set right
+ @{$self}{qw(byte_size max_buckets data_sector_size num_txns)} = @values;
+
+ # These shenangians are to allow a 256 within a C
+ $self->{max_buckets} += 1;
+ $self->{data_sector_size} += 1;
+
+ my $bl = $self->txn_bitfield_len;
+
+ my $header_var = scalar(@values) + $bl + $STALE_SIZE * ($self->num_txns - 1) + 3 * $self->byte_size;
+ unless ( $size == $header_var ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Unexpected size found ($size <-> $header_var)." );
+ }
+
+ $self->set_trans_loc( $header_fixed + scalar(@values) );
+ $self->set_chains_loc( $header_fixed + scalar(@values) + $bl + $STALE_SIZE * ($self->num_txns - 1) );
+
+ return length($buffer) + length($buffer2);
+ }
+}
+
+sub _load_sector {
+ my $self = shift;
+ my ($offset) = @_;
+
+ # Add a catch for offset of 0 or 1
+ return if !$offset || $offset <= 1;
+
+ my $type = $self->storage->read_at( $offset, 1 );
+ return if $type eq chr(0);
+
+ if ( $type eq $self->SIG_ARRAY || $type eq $self->SIG_HASH ) {
+ return DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ # XXX Don't we need key_md5 here?
+ elsif ( $type eq $self->SIG_BLIST ) {
+ return DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_INDEX ) {
+ return DBM::Deep::Engine::Sector::Index->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_NULL ) {
+ return DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_DATA ) {
+ return DBM::Deep::Engine::Sector::Scalar->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ # This was deleted from under us, so just return and let the caller figure it out.
+ elsif ( $type eq $self->SIG_FREE ) {
+ return;
+ }
+
+ DBM::Deep->_throw_error( "'$offset': Don't know what to do with type '$type'" );
+}
+
+sub _apply_digest {
+ my $self = shift;
+ return $self->{digest}->(@_);
+}
+
+sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
+sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
+sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+
+sub _add_free_sector {
+ my $self = shift;
+ my ($multiple, $offset, $size) = @_;
+
+ my $chains_offset = $multiple * $self->byte_size;
+
+ my $storage = $self->storage;
+
+ # Increment staleness.
+ # XXX Can this increment+modulo be done by "&= 0x1" ?
+ my $staleness = unpack( $StP{$STALE_SIZE}, $storage->read_at( $offset + SIG_SIZE, $STALE_SIZE ) );
+ $staleness = ($staleness + 1 ) % ( 2 ** ( 8 * $STALE_SIZE ) );
+ $storage->print_at( $offset + SIG_SIZE, pack( $StP{$STALE_SIZE}, $staleness ) );
+
+ my $old_head = $storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+
+ $storage->print_at( $self->chains_loc + $chains_offset,
+ pack( $StP{$self->byte_size}, $offset ),
+ );
+
+ # Record the old head in the new sector after the signature and staleness counter
+ $storage->print_at( $offset + SIG_SIZE + $STALE_SIZE, $old_head );
+}
+
+sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
+sub _request_data_sector { shift->_request_sector( 1, @_ ) }
+sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+
+sub _request_sector {
+ my $self = shift;
+ my ($multiple, $size) = @_;
+
+ my $chains_offset = $multiple * $self->byte_size;
+
+ my $old_head = $self->storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+ my $loc = unpack( $StP{$self->byte_size}, $old_head );
+
+ # We don't have any free sectors of the right size, so allocate a new one.
+ unless ( $loc ) {
+ my $offset = $self->storage->request_space( $size );
+
+ # Zero out the new sector. This also guarantees correct increases
+ # in the filesize.
+ $self->storage->print_at( $offset, chr(0) x $size );
+
+ return $offset;
+ }
+
+ # Read the new head after the signature and the staleness counter
+ my $new_head = $self->storage->read_at( $loc + SIG_SIZE + $STALE_SIZE, $self->byte_size );
+ $self->storage->print_at( $self->chains_loc + $chains_offset, $new_head );
+ $self->storage->print_at(
+ $loc + SIG_SIZE + $STALE_SIZE,
+ pack( $StP{$self->byte_size}, 0 ),
+ );
+
+ return $loc;
+}
+
+################################################################################
+
+sub storage { $_[0]{storage} }
+sub byte_size { $_[0]{byte_size} }
+sub hash_size { $_[0]{hash_size} }
+sub hash_chars { $_[0]{hash_chars} }
+sub num_txns { $_[0]{num_txns} }
+sub max_buckets { $_[0]{max_buckets} }
+sub blank_md5 { chr(0) x $_[0]->hash_size }
+sub data_sector_size { $_[0]{data_sector_size} }
+
+# This is a calculated value
+sub txn_bitfield_len {
+ my $self = shift;
+ unless ( exists $self->{txn_bitfield_len} ) {
+ my $temp = ($self->num_txns) / 8;
+ if ( $temp > int( $temp ) ) {
+ $temp = int( $temp ) + 1;
+ }
+ $self->{txn_bitfield_len} = $temp;
+ }
+ return $self->{txn_bitfield_len};
+}
+
+sub trans_id { $_[0]{trans_id} }
+sub set_trans_id { $_[0]{trans_id} = $_[1] }
+
+sub trans_loc { $_[0]{trans_loc} }
+sub set_trans_loc { $_[0]{trans_loc} = $_[1] }
+
+sub chains_loc { $_[0]{chains_loc} }
+sub set_chains_loc { $_[0]{chains_loc} = $_[1] }
+
+sub cache { $_[0]{cache} ||= {} }
+sub clear_cache { %{$_[0]->cache} = () }
+
+sub _dump_file {
+ my $self = shift;
+
+ # Read the header
+ my $spot = $self->_read_file_header();
+
+ my %types = (
+ 0 => 'B',
+ 1 => 'D',
+ 2 => 'I',
+ );
+
+ my %sizes = (
+ 'D' => $self->data_sector_size,
+ 'B' => DBM::Deep::Engine::Sector::BucketList->new({engine=>$self,offset=>1})->size,
+ 'I' => DBM::Deep::Engine::Sector::Index->new({engine=>$self,offset=>1})->size,
+ );
+
+ my $return = "";
+
+ # Header values
+ $return .= "NumTxns: " . $self->num_txns . $/;
+
+ # Read the free sector chains
+ my %sectors;
+ foreach my $multiple ( 0 .. 2 ) {
+ $return .= "Chains($types{$multiple}):";
+ my $old_loc = $self->chains_loc + $multiple * $self->byte_size;
+ while ( 1 ) {
+ my $loc = unpack(
+ $StP{$self->byte_size},
+ $self->storage->read_at( $old_loc, $self->byte_size ),
+ );
+
+ # We're now out of free sectors of this kind.
+ unless ( $loc ) {
+ last;
+ }
+
+ $sectors{ $types{$multiple} }{ $loc } = undef;
+ $old_loc = $loc + SIG_SIZE + $STALE_SIZE;
+ $return .= " $loc";
+ }
+ $return .= $/;
+ }
+
+ SECTOR:
+ while ( $spot < $self->storage->{end} ) {
+ # Read each sector in order.
+ my $sector = $self->_load_sector( $spot );
+ if ( !$sector ) {
+ # Find it in the free-sectors that were found already
+ foreach my $type ( keys %sectors ) {
+ if ( exists $sectors{$type}{$spot} ) {
+ my $size = $sizes{$type};
+ $return .= sprintf "%08d: %s %04d\n", $spot, 'F' . $type, $size;
+ $spot += $size;
+ next SECTOR;
+ }
+ }
+
+ die "********\n$return\nDidn't find free sector for $spot in chains\n********\n";
+ }
+ else {
+ $return .= sprintf "%08d: %s %04d", $spot, $sector->type, $sector->size;
+ if ( $sector->type eq 'D' ) {
+ $return .= ' ' . $sector->data;
+ }
+ elsif ( $sector->type eq 'A' || $sector->type eq 'H' ) {
+ $return .= ' REF: ' . $sector->get_refcount;
+ }
+ elsif ( $sector->type eq 'B' ) {
+ foreach my $bucket ( $sector->chopped_up ) {
+ $return .= "\n ";
+ $return .= sprintf "%08d", unpack($StP{$self->byte_size},
+ substr( $bucket->[-1], $self->hash_size, $self->byte_size),
+ );
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + $self->byte_size,
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ foreach my $txn ( 0 .. $self->num_txns - 2 ) {
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + 2 * $self->byte_size + $txn * ($self->byte_size + $STALE_SIZE),
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ }
+ }
+ }
+ $return .= $/;
+
+ $spot += $sector->size;
+ }
+ }
+
+ return $return;
+}
+
+################################################################################
+
+package DBM::Deep::Iterator;
+
+sub new {
+ my $class = shift;
+ my ($args) = @_;
+
+ my $self = bless {
+ breadcrumbs => [],
+ engine => $args->{engine},
+ base_offset => $args->{base_offset},
+ }, $class;
+
+ Scalar::Util::weaken( $self->{engine} );
+
+ return $self;
+}
+
+sub reset { $_[0]{breadcrumbs} = [] }
+
+sub get_sector_iterator {
+ my $self = shift;
+ my ($loc) = @_;
+
+ my $sector = $self->{engine}->_load_sector( $loc )
+ or return;
+
+ if ( $sector->isa( 'DBM::Deep::Engine::Sector::Index' ) ) {
+ return DBM::Deep::Iterator::Index->new({
+ iterator => $self,
+ sector => $sector,
+ });
+ }
+ elsif ( $sector->isa( 'DBM::Deep::Engine::Sector::BucketList' ) ) {
+ return DBM::Deep::Iterator::BucketList->new({
+ iterator => $self,
+ sector => $sector,
+ });
+ }
+
+ DBM::Deep->_throw_error( "get_sector_iterator(): Why did $loc make a $sector?" );
+}
+
+sub get_next_key {
+ my $self = shift;
+ my ($obj) = @_;
+
+ my $crumbs = $self->{breadcrumbs};
+ my $e = $self->{engine};
+
+ unless ( @$crumbs ) {
+ # This will be a Reference sector
+ my $sector = $e->_load_sector( $self->{base_offset} )
+ # If no sector is found, thist must have been deleted from under us.
+ or return;
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ my $loc = $sector->get_blist_loc
+ or return;
+
+ push @$crumbs, $self->get_sector_iterator( $loc );
+ }
+
+ FIND_NEXT_KEY: {
+ # We're at the end.
+ unless ( @$crumbs ) {
+ $self->reset;
+ return;
+ }
+
+ my $iterator = $crumbs->[-1];
+
+ # This level is done.
+ if ( $iterator->at_end ) {
+ pop @$crumbs;
+ redo FIND_NEXT_KEY;
+ }
+
+ if ( $iterator->isa( 'DBM::Deep::Iterator::Index' ) ) {
+ # If we don't have any more, it will be caught at the
+ # prior check.
+ if ( my $next = $iterator->get_next_iterator ) {
+ push @$crumbs, $next;
+ }
+ redo FIND_NEXT_KEY;
+ }
+
+ unless ( $iterator->isa( 'DBM::Deep::Iterator::BucketList' ) ) {
+ DBM::Deep->_throw_error(
+ "Should have a bucketlist iterator here - instead have $iterator"
+ );
+ }
+
+ # At this point, we have a BucketList iterator
+ my $key = $iterator->get_next_key;
+ if ( defined $key ) {
+ return $key;
+ }
+ #XXX else { $iterator->set_to_end() } ?
+
+ # We hit the end of the bucketlist iterator, so redo
+ redo FIND_NEXT_KEY;
+ }
+
+ DBM::Deep->_throw_error( "get_next_key(): How did we get here?" );
+}
+
+package DBM::Deep::Iterator::Index;
+
+sub new {
+ my $self = bless $_[1] => $_[0];
+ $self->{curr_index} = 0;
+ return $self;
+}
+
+sub at_end {
+ my $self = shift;
+ return $self->{curr_index} >= $self->{iterator}{engine}->hash_chars;
+}
+
+sub get_next_iterator {
+ my $self = shift;
+
+ my $loc;
+ while ( !$loc ) {
+ return if $self->at_end;
+ $loc = $self->{sector}->get_entry( $self->{curr_index}++ );
+ }
+
+ return $self->{iterator}->get_sector_iterator( $loc );
+}
+
+package DBM::Deep::Iterator::BucketList;
+
+sub new {
+ my $self = bless $_[1] => $_[0];
+ $self->{curr_index} = 0;
+ return $self;
+}
+
+sub at_end {
+ my $self = shift;
+ return $self->{curr_index} >= $self->{iterator}{engine}->max_buckets;
+}
+
+sub get_next_key {
+ my $self = shift;
+
+ return if $self->at_end;
+
+ my $idx = $self->{curr_index}++;
+
+ my $data_loc = $self->{sector}->get_data_location_for({
+ allow_head => 1,
+ idx => $idx,
+ }) or return;
+
+ #XXX Do we want to add corruption checks here?
+ return $self->{sector}->get_key_for( $idx )->data;
+}
+
+package DBM::Deep::Engine::Sector;
+
+sub new {
+ my $self = bless $_[1], $_[0];
+ Scalar::Util::weaken( $self->{engine} );
+ $self->_init;
+ return $self;
+}
+
+#sub _init {}
+#sub clone { DBM::Deep->_throw_error( "Must be implemented in the child class" ); }
+
+sub engine { $_[0]{engine} }
+sub offset { $_[0]{offset} }
+sub type { $_[0]{type} }
+
+sub base_size {
+ my $self = shift;
+ return $self->engine->SIG_SIZE + $STALE_SIZE;
+}
+
+sub free {
+ my $self = shift;
+
+ my $e = $self->engine;
+
+ $e->storage->print_at( $self->offset, $e->SIG_FREE );
+ # Skip staleness counter
+ $e->storage->print_at( $self->offset + $self->base_size,
+ chr(0) x ($self->size - $self->base_size),
+ );
+
+ my $free_meth = $self->free_meth;
+ $e->$free_meth( $self->offset, $self->size );
+
+ return;
+}
+
+package DBM::Deep::Engine::Sector::Data;
+
+our @ISA = qw( DBM::Deep::Engine::Sector );
+
+# This is in bytes
+sub size { $_[0]{engine}->data_sector_size }
+sub free_meth { return '_add_free_data_sector' }
+
+sub clone {
+ my $self = shift;
+ return ref($self)->new({
+ engine => $self->engine,
+ type => $self->type,
+ data => $self->data,
+ });
+}
+
+package DBM::Deep::Engine::Sector::Scalar;
+
+our @ISA = qw( DBM::Deep::Engine::Sector::Data );
+
+sub free {
+ my $self = shift;
+
+ my $chain_loc = $self->chain_loc;
+
+ $self->SUPER::free();
+
+ if ( $chain_loc ) {
+ $self->engine->_load_sector( $chain_loc )->free;
+ }
+
+ return;
+}
+
+sub type { $_[0]{engine}->SIG_DATA }
+sub _init {
+ my $self = shift;
+
+ my $engine = $self->engine;
+
+ unless ( $self->offset ) {
+ my $data_section = $self->size - $self->base_size - $engine->byte_size - 1;
+
+ $self->{offset} = $engine->_request_data_sector( $self->size );
+
+ my $data = delete $self->{data};
+ my $dlen = length $data;
+ my $continue = 1;
+ my $curr_offset = $self->offset;
+ while ( $continue ) {
+
+ my $next_offset = 0;
+
+ my ($leftover, $this_len, $chunk);
+ if ( $dlen > $data_section ) {
+ $leftover = 0;
+ $this_len = $data_section;
+ $chunk = substr( $data, 0, $this_len );
+
+ $dlen -= $data_section;
+ $next_offset = $engine->_request_data_sector( $self->size );
+ $data = substr( $data, $this_len );
+ }
+ else {
+ $leftover = $data_section - $dlen;
+ $this_len = $dlen;
+ $chunk = $data;
+
+ $continue = 0;
+ }
+
+ $engine->storage->print_at( $curr_offset, $self->type ); # Sector type
+ # Skip staleness
+ $engine->storage->print_at( $curr_offset + $self->base_size,
+ pack( $StP{$engine->byte_size}, $next_offset ), # Chain loc
+ pack( $StP{1}, $this_len ), # Data length
+ $chunk, # Data to be stored in this sector
+ chr(0) x $leftover, # Zero-fill the rest
+ );
+
+ $curr_offset = $next_offset;
+ }
+
+ return;
+ }
+}
+
+sub data_length {
+ my $self = shift;
+
+ my $buffer = $self->engine->storage->read_at(
+ $self->offset + $self->base_size + $self->engine->byte_size, 1
+ );
+
+ return unpack( $StP{1}, $buffer );
+}
+
+sub chain_loc {
+ my $self = shift;
+ return unpack(
+ $StP{$self->engine->byte_size},
+ $self->engine->storage->read_at(
+ $self->offset + $self->base_size,
+ $self->engine->byte_size,
+ ),
+ );
+}
+
+sub data {
+ my $self = shift;
+# my ($args) = @_;
+# $args ||= {};
+
+ my $data;
+ while ( 1 ) {
+ my $chain_loc = $self->chain_loc;
+
+ $data .= $self->engine->storage->read_at(
+ $self->offset + $self->base_size + $self->engine->byte_size + 1, $self->data_length,
+ );
+
+ last unless $chain_loc;
+
+ $self = $self->engine->_load_sector( $chain_loc );
+ }
+
+ return $data;
+}
+
+package DBM::Deep::Engine::Sector::Null;
+
+our @ISA = qw( DBM::Deep::Engine::Sector::Data );
+
+sub type { $_[0]{engine}->SIG_NULL }
+sub data_length { 0 }
+sub data { return }
+
+sub _init {
+ my $self = shift;
+
+ my $engine = $self->engine;
+
+ unless ( $self->offset ) {
+ my $leftover = $self->size - $self->base_size - 1 * $engine->byte_size - 1;
+
+ $self->{offset} = $engine->_request_data_sector( $self->size );
+ $engine->storage->print_at( $self->offset, $self->type ); # Sector type
+ # Skip staleness counter
+ $engine->storage->print_at( $self->offset + $self->base_size,
+ pack( $StP{$engine->byte_size}, 0 ), # Chain loc
+ pack( $StP{1}, $self->data_length ), # Data length
+ chr(0) x $leftover, # Zero-fill the rest
+ );
+
+ return;
+ }
+}
+
+package DBM::Deep::Engine::Sector::Reference;
+
+our @ISA = qw( DBM::Deep::Engine::Sector::Data );
+
+sub _init {
+ my $self = shift;
+
+ my $e = $self->engine;
+
+ unless ( $self->offset ) {
+ my $classname = Scalar::Util::blessed( delete $self->{data} );
+ my $leftover = $self->size - $self->base_size - 3 * $e->byte_size;
+
+ my $class_offset = 0;
+ if ( defined $classname ) {
+ my $class_sector = DBM::Deep::Engine::Sector::Scalar->new({
+ engine => $e,
+ data => $classname,
+ });
+ $class_offset = $class_sector->offset;
+ }
+
+ $self->{offset} = $e->_request_data_sector( $self->size );
+ $e->storage->print_at( $self->offset, $self->type ); # Sector type
+ # Skip staleness counter
+ $e->storage->print_at( $self->offset + $self->base_size,
+ pack( $StP{$e->byte_size}, 0 ), # Index/BList loc
+ pack( $StP{$e->byte_size}, $class_offset ), # Classname loc
+ pack( $StP{$e->byte_size}, 1 ), # Initial refcount
+ chr(0) x $leftover, # Zero-fill the rest
+ );
+ }
+ else {
+ $self->{type} = $e->storage->read_at( $self->offset, 1 );
+ }
+
+ $self->{staleness} = unpack(
+ $StP{$STALE_SIZE},
+ $e->storage->read_at( $self->offset + $e->SIG_SIZE, $STALE_SIZE ),
+ );
+
+ return;
+}
+
+sub staleness { $_[0]{staleness} }
+
+sub get_data_location_for {
+ my $self = shift;
+ my ($args) = @_;
+
+ # Assume that the head is not allowed unless otherwise specified.
+ $args->{allow_head} = 0 unless exists $args->{allow_head};
+
+ # Assume we don't create a new blist location unless otherwise specified.
+ $args->{create} = 0 unless exists $args->{create};
+
+ my $blist = $self->get_bucket_list({
+ key_md5 => $args->{key_md5},
+ key => $args->{key},
+ create => $args->{create},
+ });
+ return unless $blist && $blist->{found};
+
+ # At this point, $blist knows where the md5 is. What it -doesn't- know yet
+ # is whether or not this transaction has this key. That's part of the next
+ # function call.
+ my $location = $blist->get_data_location_for({
+ allow_head => $args->{allow_head},
+ }) or return;
+
+ return $location;
+}
+
+sub get_data_for {
+ my $self = shift;
+ my ($args) = @_;
+
+ my $location = $self->get_data_location_for( $args )
+ or return;
+
+ return $self->engine->_load_sector( $location );
+}
+
+sub write_data {
+ my $self = shift;
+ my ($args) = @_;
+
+ my $blist = $self->get_bucket_list({
+ key_md5 => $args->{key_md5},
+ key => $args->{key},
+ create => 1,
+ }) or DBM::Deep->_throw_error( "How did write_data fail (no blist)?!" );
+
+ # Handle any transactional bookkeeping.
+ if ( $self->engine->trans_id ) {
+ if ( ! $blist->has_md5 ) {
+ $blist->mark_deleted({
+ trans_id => 0,
+ });
+ }
+ }
+ else {
+ my @trans_ids = $self->engine->get_running_txn_ids;
+ if ( $blist->has_md5 ) {
+ if ( @trans_ids ) {
+ my $old_value = $blist->get_data_for;
+ foreach my $other_trans_id ( @trans_ids ) {
+ next if $blist->get_data_location_for({
+ trans_id => $other_trans_id,
+ allow_head => 0,
+ });
+ $blist->write_md5({
+ trans_id => $other_trans_id,
+ key => $args->{key},
+ key_md5 => $args->{key_md5},
+ value => $old_value->clone,
+ });
+ }
+ }
+ }
+ else {
+ if ( @trans_ids ) {
+ foreach my $other_trans_id ( @trans_ids ) {
+ #XXX This doesn't seem to possible to ever happen . . .
+ next if $blist->get_data_location_for({ trans_id => $other_trans_id, allow_head => 0 });
+ $blist->mark_deleted({
+ trans_id => $other_trans_id,
+ });
+ }
+ }
+ }
+ }
+
+ #XXX Is this safe to do transactionally?
+ # Free the place we're about to write to.
+ if ( $blist->get_data_location_for({ allow_head => 0 }) ) {
+ $blist->get_data_for({ allow_head => 0 })->free;
+ }
+
+ $blist->write_md5({
+ key => $args->{key},
+ key_md5 => $args->{key_md5},
+ value => $args->{value},
+ });
+}
+
+sub delete_key {
+ my $self = shift;
+ my ($args) = @_;
+
+ # XXX What should happen if this fails?
+ my $blist = $self->get_bucket_list({
+ key_md5 => $args->{key_md5},
+ }) or DBM::Deep->_throw_error( "How did delete_key fail (no blist)?!" );
+
+ # Save the location so that we can free the data
+ my $location = $blist->get_data_location_for({
+ allow_head => 0,
+ });
+ my $old_value = $location && $self->engine->_load_sector( $location );
+
+ my @trans_ids = $self->engine->get_running_txn_ids;
+
+ # If we're the HEAD and there are running txns, then we need to clone this value to the other
+ # transactions to preserve Isolation.
+ if ( $self->engine->trans_id == 0 ) {
+ if ( @trans_ids ) {
+ foreach my $other_trans_id ( @trans_ids ) {
+ next if $blist->get_data_location_for({ trans_id => $other_trans_id, allow_head => 0 });
+ $blist->write_md5({
+ trans_id => $other_trans_id,
+ key => $args->{key},
+ key_md5 => $args->{key_md5},
+ value => $old_value->clone,
+ });
+ }
+ }
+ }
+
+ my $data;
+ if ( @trans_ids ) {
+ $blist->mark_deleted( $args );
+
+ if ( $old_value ) {
+ $data = $old_value->data({ export => 1 });
+ $old_value->free;
+ }
+ }
+ else {
+ $data = $blist->delete_md5( $args );
+ }
+
+ return $data;
+}
+
+sub get_blist_loc {
+ my $self = shift;
+
+ my $e = $self->engine;
+ my $blist_loc = $e->storage->read_at( $self->offset + $self->base_size, $e->byte_size );
+ return unpack( $StP{$e->byte_size}, $blist_loc );
+}
+
+sub get_bucket_list {
+ my $self = shift;
+ my ($args) = @_;
+ $args ||= {};
+
+ # XXX Add in check here for recycling?
+
+ my $engine = $self->engine;
+
+ my $blist_loc = $self->get_blist_loc;
+
+ # There's no index or blist yet
+ unless ( $blist_loc ) {
+ return unless $args->{create};
+
+ my $blist = DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $engine,
+ key_md5 => $args->{key_md5},
+ });
+
+ $engine->storage->print_at( $self->offset + $self->base_size,
+ pack( $StP{$engine->byte_size}, $blist->offset ),
+ );
+
+ return $blist;
+ }
+
+ my $sector = $engine->_load_sector( $blist_loc )
+ or DBM::Deep->_throw_error( "Cannot read sector at $blist_loc in get_bucket_list()" );
+ my $i = 0;
+ my $last_sector = undef;
+ while ( $sector->isa( 'DBM::Deep::Engine::Sector::Index' ) ) {
+ $blist_loc = $sector->get_entry( ord( substr( $args->{key_md5}, $i++, 1 ) ) );
+ $last_sector = $sector;
+ if ( $blist_loc ) {
+ $sector = $engine->_load_sector( $blist_loc )
+ or DBM::Deep->_throw_error( "Cannot read sector at $blist_loc in get_bucket_list()" );
+ }
+ else {
+ $sector = undef;
+ last;
+ }
+ }
+
+ # This means we went through the Index sector(s) and found an empty slot
+ unless ( $sector ) {
+ return unless $args->{create};
+
+ DBM::Deep->_throw_error( "No last_sector when attempting to build a new entry" )
+ unless $last_sector;
+
+ my $blist = DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $engine,
+ key_md5 => $args->{key_md5},
+ });
+
+ $last_sector->set_entry( ord( substr( $args->{key_md5}, $i - 1, 1 ) ) => $blist->offset );
+
+ return $blist;
+ }
+
+ $sector->find_md5( $args->{key_md5} );
+
+ # See whether or not we need to reindex the bucketlist
+ # Yes, the double-braces are there for a reason. if() doesn't create a redo-able block,
+ # so we have to create a bare block within the if() for redo-purposes. Patch and idea
+ # submitted by sprout@cpan.org. -RobK, 2008-01-09
+ if ( !$sector->has_md5 && $args->{create} && $sector->{idx} == -1 ) {{
+ my $redo;
+
+ my $new_index = DBM::Deep::Engine::Sector::Index->new({
+ engine => $engine,
+ });
+
+ my %blist_cache;
+ #XXX q.v. the comments for this function.
+ foreach my $entry ( $sector->chopped_up ) {
+ my ($spot, $md5) = @{$entry};
+ my $idx = ord( substr( $md5, $i, 1 ) );
+
+ # XXX This is inefficient
+ my $blist = $blist_cache{$idx}
+ ||= DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $engine,
+ });
+
+ $new_index->set_entry( $idx => $blist->offset );
+
+ my $new_spot = $blist->write_at_next_open( $md5 );
+ $engine->reindex_entry( $spot => $new_spot );
+ }
+
+ # Handle the new item separately.
+ {
+ my $idx = ord( substr( $args->{key_md5}, $i, 1 ) );
+
+ # If all the previous blist's items have been thrown into one
+ # blist and the new item belongs in there too, we need
+ # another index.
+ if ( keys %blist_cache == 1 and each %blist_cache == $idx ) {
+ ++$i, ++$redo;
+ } else {
+ my $blist = $blist_cache{$idx}
+ ||= DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $engine,
+ });
+
+ $new_index->set_entry( $idx => $blist->offset );
+
+ #XXX THIS IS HACKY!
+ $blist->find_md5( $args->{key_md5} );
+ $blist->write_md5({
+ key => $args->{key},
+ key_md5 => $args->{key_md5},
+ value => DBM::Deep::Engine::Sector::Null->new({
+ engine => $engine,
+ data => undef,
+ }),
+ });
+ }
+# my $blist = $blist_cache{$idx}
+# ||= DBM::Deep::Engine::Sector::BucketList->new({
+# engine => $engine,
+# });
+#
+# $new_index->set_entry( $idx => $blist->offset );
+#
+# #XXX THIS IS HACKY!
+# $blist->find_md5( $args->{key_md5} );
+# $blist->write_md5({
+# key => $args->{key},
+# key_md5 => $args->{key_md5},
+# value => DBM::Deep::Engine::Sector::Null->new({
+# engine => $engine,
+# data => undef,
+# }),
+# });
+ }
+
+ if ( $last_sector ) {
+ $last_sector->set_entry(
+ ord( substr( $args->{key_md5}, $i - 1, 1 ) ),
+ $new_index->offset,
+ );
+ } else {
+ $engine->storage->print_at( $self->offset + $self->base_size,
+ pack( $StP{$engine->byte_size}, $new_index->offset ),
+ );
+ }
+
+ $sector->clear;
+ $sector->free;
+
+ if ( $redo ) {
+ (undef, $sector) = %blist_cache;
+ $last_sector = $new_index;
+ redo;
+ }
+
+ $sector = $blist_cache{ ord( substr( $args->{key_md5}, $i, 1 ) ) };
+ $sector->find_md5( $args->{key_md5} );
+ }}
+
+ return $sector;
+}
+
+sub get_class_offset {
+ my $self = shift;
+
+ my $e = $self->engine;
+ return unpack(
+ $StP{$e->byte_size},
+ $e->storage->read_at(
+ $self->offset + $self->base_size + 1 * $e->byte_size, $e->byte_size,
+ ),
+ );
+}
+
+sub get_classname {
+ my $self = shift;
+
+ my $class_offset = $self->get_class_offset;
+
+ return unless $class_offset;
+
+ return $self->engine->_load_sector( $class_offset )->data;
+}
+
+sub data {
+ my $self = shift;
+ my ($args) = @_;
+ $args ||= {};
+
+ my $obj;
+ unless ( $obj = $self->engine->cache->{ $self->offset } ) {
+ $obj = DBM::Deep->new({
+ type => $self->type,
+ base_offset => $self->offset,
+ staleness => $self->staleness,
+ storage => $self->engine->storage,
+ engine => $self->engine,
+ });
+
+ if ( $self->engine->storage->{autobless} ) {
+ my $classname = $self->get_classname;
+ if ( defined $classname ) {
+ bless $obj, $classname;
+ }
+ }
+
+ $self->engine->cache->{$self->offset} = $obj;
+ }
+
+ # We're not exporting, so just return.
+ unless ( $args->{export} ) {
+ return $obj;
+ }
+
+ # We shouldn't export if this is still referred to.
+ if ( $self->get_refcount > 1 ) {
+ return $obj;
+ }
+
+ return $obj->export;
+}
+
+sub free {
+ my $self = shift;
+
+ # We're not ready to be removed yet.
+ if ( $self->decrement_refcount > 0 ) {
+ return;
+ }
+
+ # Rebless the object into DBM::Deep::Null.
+ eval { %{ $self->engine->cache->{ $self->offset } } = (); };
+ eval { @{ $self->engine->cache->{ $self->offset } } = (); };
+ bless $self->engine->cache->{ $self->offset }, 'DBM::Deep::Null';
+ delete $self->engine->cache->{ $self->offset };
+
+ my $blist_loc = $self->get_blist_loc;
+ $self->engine->_load_sector( $blist_loc )->free if $blist_loc;
+
+ my $class_loc = $self->get_class_offset;
+ $self->engine->_load_sector( $class_loc )->free if $class_loc;
+
+ $self->SUPER::free();
+}
+
+sub increment_refcount {
+ my $self = shift;
+
+ my $refcount = $self->get_refcount;
+
+ $refcount++;
+
+ $self->write_refcount( $refcount );
+
+ return $refcount;
+}
+
+sub decrement_refcount {
+ my $self = shift;
+
+ my $refcount = $self->get_refcount;
+
+ $refcount--;
+
+ $self->write_refcount( $refcount );
+
+ return $refcount;
+}
+
+sub get_refcount {
+ my $self = shift;
+
+ my $e = $self->engine;
+ return unpack(
+ $StP{$e->byte_size},
+ $e->storage->read_at(
+ $self->offset + $self->base_size + 2 * $e->byte_size, $e->byte_size,
+ ),
+ );
+}
+
+sub write_refcount {
+ my $self = shift;
+ my ($num) = @_;
+
+ my $e = $self->engine;
+ $e->storage->print_at(
+ $self->offset + $self->base_size + 2 * $e->byte_size,
+ pack( $StP{$e->byte_size}, $num ),
+ );
+}
+
+package DBM::Deep::Engine::Sector::BucketList;
+
+our @ISA = qw( DBM::Deep::Engine::Sector );
+
+sub _init {
+ my $self = shift;
+
+ my $engine = $self->engine;
+
+ unless ( $self->offset ) {
+ my $leftover = $self->size - $self->base_size;
+
+ $self->{offset} = $engine->_request_blist_sector( $self->size );
+ $engine->storage->print_at( $self->offset, $engine->SIG_BLIST ); # Sector type
+ # Skip staleness counter
+ $engine->storage->print_at( $self->offset + $self->base_size,
+ chr(0) x $leftover, # Zero-fill the data
+ );
+ }
+
+ if ( $self->{key_md5} ) {
+ $self->find_md5;
+ }
+
+ return $self;
+}
+
+sub clear {
+ my $self = shift;
+ $self->engine->storage->print_at( $self->offset + $self->base_size,
+ chr(0) x ($self->size - $self->base_size), # Zero-fill the data
+ );
+}
+
+sub size {
+ my $self = shift;
+ unless ( $self->{size} ) {
+ my $e = $self->engine;
+ # Base + numbuckets * bucketsize
+ $self->{size} = $self->base_size + $e->max_buckets * $self->bucket_size;
+ }
+ return $self->{size};
+}
+
+sub free_meth { return '_add_free_blist_sector' }
+
+sub free {
+ my $self = shift;
+
+ my $e = $self->engine;
+ foreach my $bucket ( $self->chopped_up ) {
+ my $rest = $bucket->[-1];
+
+ # Delete the keysector
+ my $l = unpack( $StP{$e->byte_size}, substr( $rest, $e->hash_size, $e->byte_size ) );
+ my $s = $e->_load_sector( $l ); $s->free if $s;
+
+ # Delete the HEAD sector
+ $l = unpack( $StP{$e->byte_size},
+ substr( $rest,
+ $e->hash_size + $e->byte_size,
+ $e->byte_size,
+ ),
+ );
+ $s = $e->_load_sector( $l ); $s->free if $s;
+
+ foreach my $txn ( 0 .. $e->num_txns - 2 ) {
+ my $l = unpack( $StP{$e->byte_size},
+ substr( $rest,
+ $e->hash_size + 2 * $e->byte_size + $txn * ($e->byte_size + $STALE_SIZE),
+ $e->byte_size,
+ ),
+ );
+ my $s = $e->_load_sector( $l ); $s->free if $s;
+ }
+ }
+
+ $self->SUPER::free();
+}
+
+sub bucket_size {
+ my $self = shift;
+ unless ( $self->{bucket_size} ) {
+ my $e = $self->engine;
+ # Key + head (location) + transactions (location + staleness-counter)
+ my $location_size = $e->byte_size + $e->byte_size + ($e->num_txns - 1) * ($e->byte_size + $STALE_SIZE);
+ $self->{bucket_size} = $e->hash_size + $location_size;
+ }
+ return $self->{bucket_size};
+}
+
+# XXX This is such a poor hack. I need to rethink this code.
+sub chopped_up {
+ my $self = shift;
+
+ my $e = $self->engine;
+
+ my @buckets;
+ foreach my $idx ( 0 .. $e->max_buckets - 1 ) {
+ my $spot = $self->offset + $self->base_size + $idx * $self->bucket_size;
+ my $md5 = $e->storage->read_at( $spot, $e->hash_size );
+
+ #XXX If we're chopping, why would we ever have the blank_md5?
+ last if $md5 eq $e->blank_md5;
+
+ my $rest = $e->storage->read_at( undef, $self->bucket_size - $e->hash_size );
+ push @buckets, [ $spot, $md5 . $rest ];
+ }
+
+ return @buckets;
+}
+
+sub write_at_next_open {
+ my $self = shift;
+ my ($entry) = @_;
+
+ #XXX This is such a hack!
+ $self->{_next_open} = 0 unless exists $self->{_next_open};
+
+ my $spot = $self->offset + $self->base_size + $self->{_next_open}++ * $self->bucket_size;
+ $self->engine->storage->print_at( $spot, $entry );
+
+ return $spot;
+}
+
+sub has_md5 {
+ my $self = shift;
+ unless ( exists $self->{found} ) {
+ $self->find_md5;
+ }
+ return $self->{found};
+}
+
+sub find_md5 {
+ my $self = shift;
+
+ $self->{found} = undef;
+ $self->{idx} = -1;
+
+ if ( @_ ) {
+ $self->{key_md5} = shift;
+ }
+
+ # If we don't have an MD5, then what are we supposed to do?
+ unless ( exists $self->{key_md5} ) {
+ DBM::Deep->_throw_error( "Cannot find_md5 without a key_md5 set" );
+ }
+
+ my $e = $self->engine;
+ foreach my $idx ( 0 .. $e->max_buckets - 1 ) {
+ my $potential = $e->storage->read_at(
+ $self->offset + $self->base_size + $idx * $self->bucket_size, $e->hash_size,
+ );
+
+ if ( $potential eq $e->blank_md5 ) {
+ $self->{idx} = $idx;
+ return;
+ }
+
+ if ( $potential eq $self->{key_md5} ) {
+ $self->{found} = 1;
+ $self->{idx} = $idx;
+ return;
+ }
+ }
+
+ return;
+}
+
+sub write_md5 {
+ my $self = shift;
+ my ($args) = @_;
+
+ DBM::Deep->_throw_error( "write_md5: no key" ) unless exists $args->{key};
+ DBM::Deep->_throw_error( "write_md5: no key_md5" ) unless exists $args->{key_md5};
+ DBM::Deep->_throw_error( "write_md5: no value" ) unless exists $args->{value};
+
+ my $engine = $self->engine;
+
+ $args->{trans_id} = $engine->trans_id unless exists $args->{trans_id};
+
+ my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
+ $engine->add_entry( $args->{trans_id}, $spot );
+
+ unless ($self->{found}) {
+ my $key_sector = DBM::Deep::Engine::Sector::Scalar->new({
+ engine => $engine,
+ data => $args->{key},
+ });
+
+ $engine->storage->print_at( $spot,
+ $args->{key_md5},
+ pack( $StP{$engine->byte_size}, $key_sector->offset ),
+ );
+ }
+
+ my $loc = $spot
+ + $engine->hash_size
+ + $engine->byte_size;
+
+ if ( $args->{trans_id} ) {
+ $loc += $engine->byte_size + ($args->{trans_id} - 1) * ( $engine->byte_size + $STALE_SIZE );
+
+ $engine->storage->print_at( $loc,
+ pack( $StP{$engine->byte_size}, $args->{value}->offset ),
+ pack( $StP{$STALE_SIZE}, $engine->get_txn_staleness_counter( $args->{trans_id} ) ),
+ );
+ }
+ else {
+ $engine->storage->print_at( $loc,
+ pack( $StP{$engine->byte_size}, $args->{value}->offset ),
+ );
+ }
+}
+
+sub mark_deleted {
+ my $self = shift;
+ my ($args) = @_;
+ $args ||= {};
+
+ my $engine = $self->engine;
+
+ $args->{trans_id} = $engine->trans_id unless exists $args->{trans_id};
+
+ my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
+ $engine->add_entry( $args->{trans_id}, $spot );
+
+ my $loc = $spot
+ + $engine->hash_size
+ + $engine->byte_size;
+
+ if ( $args->{trans_id} ) {
+ $loc += $engine->byte_size + ($args->{trans_id} - 1) * ( $engine->byte_size + $STALE_SIZE );
+
+ $engine->storage->print_at( $loc,
+ pack( $StP{$engine->byte_size}, 1 ), # 1 is the marker for deleted
+ pack( $StP{$STALE_SIZE}, $engine->get_txn_staleness_counter( $args->{trans_id} ) ),
+ );
+ }
+ else {
+ $engine->storage->print_at( $loc,
+ pack( $StP{$engine->byte_size}, 1 ), # 1 is the marker for deleted
+ );
+ }
+
+}
+
+sub delete_md5 {
+ my $self = shift;
+ my ($args) = @_;
+
+ my $engine = $self->engine;
+ return undef unless $self->{found};
+
+ # Save the location so that we can free the data
+ my $location = $self->get_data_location_for({
+ allow_head => 0,
+ });
+ my $key_sector = $self->get_key_for;
+
+ my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
+ $engine->storage->print_at( $spot,
+ $engine->storage->read_at(
+ $spot + $self->bucket_size,
+ $self->bucket_size * ( $engine->max_buckets - $self->{idx} - 1 ),
+ ),
+ chr(0) x $self->bucket_size,
+ );
+
+ $key_sector->free;
+
+ my $data_sector = $self->engine->_load_sector( $location );
+ my $data = $data_sector->data({ export => 1 });
+ $data_sector->free;
+
+ return $data;
+}
+
+sub get_data_location_for {
+ my $self = shift;
+ my ($args) = @_;
+ $args ||= {};
+
+ $args->{allow_head} = 0 unless exists $args->{allow_head};
+ $args->{trans_id} = $self->engine->trans_id unless exists $args->{trans_id};
+ $args->{idx} = $self->{idx} unless exists $args->{idx};
+
+ my $e = $self->engine;
+
+ my $spot = $self->offset + $self->base_size
+ + $args->{idx} * $self->bucket_size
+ + $e->hash_size
+ + $e->byte_size;
+
+ if ( $args->{trans_id} ) {
+ $spot += $e->byte_size + ($args->{trans_id} - 1) * ( $e->byte_size + $STALE_SIZE );
+ }
+
+ my $buffer = $e->storage->read_at(
+ $spot,
+ $e->byte_size + $STALE_SIZE,
+ );
+ my ($loc, $staleness) = unpack( $StP{$e->byte_size} . ' ' . $StP{$STALE_SIZE}, $buffer );
+
+ # XXX Merge the two if-clauses below
+ if ( $args->{trans_id} ) {
+ # We have found an entry that is old, so get rid of it
+ if ( $staleness != (my $s = $e->get_txn_staleness_counter( $args->{trans_id} ) ) ) {
+ $e->storage->print_at(
+ $spot,
+ pack( $StP{$e->byte_size} . ' ' . $StP{$STALE_SIZE}, (0) x 2 ),
+ );
+ $loc = 0;
+ }
+ }
+
+ # If we're in a transaction and we never wrote to this location, try the
+ # HEAD instead.
+ if ( $args->{trans_id} && !$loc && $args->{allow_head} ) {
+ return $self->get_data_location_for({
+ trans_id => 0,
+ allow_head => 1,
+ idx => $args->{idx},
+ });
+ }
+
+ return $loc <= 1 ? 0 : $loc;
+}
+
+sub get_data_for {
+ my $self = shift;
+ my ($args) = @_;
+ $args ||= {};
+
+ return unless $self->{found};
+ my $location = $self->get_data_location_for({
+ allow_head => $args->{allow_head},
+ });
+ return $self->engine->_load_sector( $location );
+}
+
+sub get_key_for {
+ my $self = shift;
+ my ($idx) = @_;
+ $idx = $self->{idx} unless defined $idx;
+
+ if ( $idx >= $self->engine->max_buckets ) {
+ DBM::Deep->_throw_error( "get_key_for(): Attempting to retrieve $idx" );
+ }
+
+ my $location = $self->engine->storage->read_at(
+ $self->offset + $self->base_size + $idx * $self->bucket_size + $self->engine->hash_size,
+ $self->engine->byte_size,
+ );
+ $location = unpack( $StP{$self->engine->byte_size}, $location );
+ DBM::Deep->_throw_error( "get_key_for: No location?" ) unless $location;
+
+ return $self->engine->_load_sector( $location );
+}
+
+package DBM::Deep::Engine::Sector::Index;
+
+our @ISA = qw( DBM::Deep::Engine::Sector );
+
+sub _init {
+ my $self = shift;
+
+ my $engine = $self->engine;
+
+ unless ( $self->offset ) {
+ my $leftover = $self->size - $self->base_size;
+
+ $self->{offset} = $engine->_request_index_sector( $self->size );
+ $engine->storage->print_at( $self->offset, $engine->SIG_INDEX ); # Sector type
+ # Skip staleness counter
+ $engine->storage->print_at( $self->offset + $self->base_size,
+ chr(0) x $leftover, # Zero-fill the rest
+ );
+ }
+
+ return $self;
+}
+
+#XXX Change here
+sub size {
+ my $self = shift;
+ unless ( $self->{size} ) {
+ my $e = $self->engine;
+ $self->{size} = $self->base_size + $e->byte_size * $e->hash_chars;
+ }
+ return $self->{size};
+}
+
+sub free_meth { return '_add_free_index_sector' }
+
+sub free {
+ my $self = shift;
+ my $e = $self->engine;
+
+ for my $i ( 0 .. $e->hash_chars - 1 ) {
+ my $l = $self->get_entry( $i ) or next;
+ $e->_load_sector( $l )->free;
+ }
+
+ $self->SUPER::free();
+}
+
+sub _loc_for {
+ my $self = shift;
+ my ($idx) = @_;
+ return $self->offset + $self->base_size + $idx * $self->engine->byte_size;
+}
+
+sub get_entry {
+ my $self = shift;
+ my ($idx) = @_;
+
+ my $e = $self->engine;
+
+ DBM::Deep->_throw_error( "get_entry: Out of range ($idx)" )
+ if $idx < 0 || $idx >= $e->hash_chars;
+
+ return unpack(
+ $StP{$e->byte_size},
+ $e->storage->read_at( $self->_loc_for( $idx ), $e->byte_size ),
+ );
+}
+
+sub set_entry {
+ my $self = shift;
+ my ($idx, $loc) = @_;
+
+ my $e = $self->engine;
+
+ DBM::Deep->_throw_error( "set_entry: Out of range ($idx)" )
+ if $idx < 0 || $idx >= $e->hash_chars;
+
+ $self->engine->storage->print_at(
+ $self->_loc_for( $idx ),
+ pack( $StP{$e->byte_size}, $loc ),
+ );
+}
+
+# This was copied from MARCEL's Class::Null. However, I couldn't use it because
+# I need an undef value, not an implementation of the Null Class pattern.
+package DBM::Deep::Null;
+
+use overload
+ 'bool' => sub { undef },
+ '""' => sub { undef },
+ '0+' => sub { undef },
+ fallback => 1,
+ nomethod => 'AUTOLOAD';
+
+sub AUTOLOAD { return; }
+
+1;
+__END__
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/File.pm b/Master/tlpkg/tlperl/lib/DBM/Deep/File.pm
new file mode 100755
index 00000000000..13342d82465
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/File.pm
@@ -0,0 +1,277 @@
+package DBM::Deep::File;
+
+use 5.006_000;
+
+use strict;
+use warnings;
+
+our $VERSION = q(1.0013);
+
+use Fcntl qw( :DEFAULT :flock :seek );
+
+use constant DEBUG => 0;
+
+sub new {
+ my $class = shift;
+ my ($args) = @_;
+
+ my $self = bless {
+ autobless => 1,
+ autoflush => 1,
+ end => 0,
+ fh => undef,
+ file => undef,
+ file_offset => 0,
+ locking => 1,
+ locked => 0,
+#XXX Migrate this to the engine, where it really belongs.
+ filter_store_key => undef,
+ filter_store_value => undef,
+ filter_fetch_key => undef,
+ filter_fetch_value => undef,
+ }, $class;
+
+ # Grab the parameters we want to use
+ foreach my $param ( keys %$self ) {
+ next unless exists $args->{$param};
+ $self->{$param} = $args->{$param};
+ }
+
+ if ( $self->{fh} && !$self->{file_offset} ) {
+ $self->{file_offset} = tell( $self->{fh} );
+ }
+
+ $self->open unless $self->{fh};
+
+ return $self;
+}
+
+sub open {
+ my $self = shift;
+
+ # Adding O_BINARY should remove the need for the binmode below. However,
+ # I'm not going to remove it because I don't have the Win32 chops to be
+ # absolutely certain everything will be ok.
+ my $flags = O_CREAT | O_BINARY;
+
+ if ( !-e $self->{file} || -w _ ) {
+ $flags |= O_RDWR;
+ }
+ else {
+ $flags |= O_RDONLY;
+ }
+
+ my $fh;
+ sysopen( $fh, $self->{file}, $flags )
+ or die "DBM::Deep: Cannot sysopen file '$self->{file}': $!\n";
+ $self->{fh} = $fh;
+
+ # Even though we use O_BINARY, better be safe than sorry.
+ binmode $fh;
+
+ if ($self->{autoflush}) {
+ my $old = select $fh;
+ $|=1;
+ select $old;
+ }
+
+ return 1;
+}
+
+sub close {
+ my $self = shift;
+
+ if ( $self->{fh} ) {
+ close $self->{fh};
+ $self->{fh} = undef;
+ }
+
+ return 1;
+}
+
+sub set_inode {
+ my $self = shift;
+
+ unless ( defined $self->{inode} ) {
+ my @stats = stat($self->{fh});
+ $self->{inode} = $stats[1];
+ $self->{end} = $stats[7];
+ }
+
+ return 1;
+}
+
+sub print_at {
+ my $self = shift;
+ my $loc = shift;
+
+ local ($/,$\);
+
+ my $fh = $self->{fh};
+ if ( defined $loc ) {
+ seek( $fh, $loc + $self->{file_offset}, SEEK_SET );
+ }
+
+ if ( DEBUG ) {
+ my $caller = join ':', (caller)[0,2];
+ my $len = length( join '', @_ );
+ warn "($caller) print_at( " . (defined $loc ? $loc : '<undef>') . ", $len )\n";
+ }
+
+ print( $fh @_ ) or die "Internal Error (print_at($loc)): $!\n";
+
+ return 1;
+}
+
+sub read_at {
+ my $self = shift;
+ my ($loc, $size) = @_;
+
+ local ($/,$\);
+
+ my $fh = $self->{fh};
+ if ( defined $loc ) {
+ seek( $fh, $loc + $self->{file_offset}, SEEK_SET );
+ }
+
+ if ( DEBUG ) {
+ my $caller = join ':', (caller)[0,2];
+ warn "($caller) read_at( " . (defined $loc ? $loc : '<undef>') . ", $size )\n";
+ }
+
+ my $buffer;
+ read( $fh, $buffer, $size);
+
+ return $buffer;
+}
+
+sub DESTROY {
+ my $self = shift;
+ return unless $self;
+
+ $self->close;
+
+ return;
+}
+
+sub request_space {
+ my $self = shift;
+ my ($size) = @_;
+
+ #XXX Do I need to reset $self->{end} here? I need a testcase
+ my $loc = $self->{end};
+ $self->{end} += $size;
+
+ return $loc;
+}
+
+##
+# If db locking is set, flock() the db file. If called multiple
+# times before unlock(), then the same number of unlocks() must
+# be called before the lock is released.
+##
+sub lock {
+ my $self = shift;
+ my ($obj, $type) = @_;
+
+ $type = LOCK_EX unless defined $type;
+
+ #XXX This is a temporary fix for Win32 and autovivification. It
+ # needs to improve somehow. -RobK, 2008-03-09
+ if ( $^O eq 'MSWin32' || $^O eq 'cygwin' ) {
+ $type = LOCK_EX;
+ }
+
+ if (!defined($self->{fh})) { return; }
+
+ #XXX This either needs to allow for upgrading a shared lock to an
+ # exclusive lock or something else with autovivification.
+ # -RobK, 2008-03-09
+ if ($self->{locking}) {
+ if (!$self->{locked}) {
+ flock($self->{fh}, $type);
+
+ # refresh end counter in case file has changed size
+ my @stats = stat($self->{fh});
+ $self->{end} = $stats[7];
+
+ # double-check file inode, in case another process
+ # has optimize()d our file while we were waiting.
+ if (defined($self->{inode}) && $stats[1] != $self->{inode}) {
+ $self->close;
+ $self->open;
+
+ #XXX This needs work
+ $obj->{engine}->setup_fh( $obj );
+
+ flock($self->{fh}, $type); # re-lock
+
+ # This may not be necessary after re-opening
+ $self->{end} = (stat($self->{fh}))[7]; # re-end
+ }
+ }
+ $self->{locked}++;
+
+ return 1;
+ }
+
+ return;
+}
+
+##
+# If db locking is set, unlock the db file. See note in lock()
+# regarding calling lock() multiple times.
+##
+sub unlock {
+ my $self = shift;
+
+ if (!defined($self->{fh})) { return; }
+
+ if ($self->{locking} && $self->{locked} > 0) {
+ $self->{locked}--;
+ if (!$self->{locked}) { flock($self->{fh}, LOCK_UN); }
+
+ return 1;
+ }
+
+ return;
+}
+
+sub flush {
+ my $self = shift;
+
+ # Flush the filehandle
+ my $old_fh = select $self->{fh};
+ my $old_af = $|; $| = 1; $| = $old_af;
+ select $old_fh;
+
+ return 1;
+}
+
+# Taken from http://www.perlmonks.org/?node_id=691054
+sub is_writable {
+ my $self = shift;
+
+ my $fh = $self->{fh};
+ return unless defined $fh;
+ return unless defined fileno $fh;
+ local $\ = ''; # just in case
+ no warnings; # temporarily disable warnings
+ local $^W; # temporarily disable warnings
+ return print $fh '';
+}
+
+sub copy_stats {
+ my $self = shift;
+ my ($temp_filename) = @_;
+
+ my @stats = stat( $self->{fh} );
+ my $perms = $stats[2] & 07777;
+ my $uid = $stats[4];
+ my $gid = $stats[5];
+ chown( $uid, $gid, $temp_filename );
+ chmod( $perms, $temp_filename );
+}
+
+1;
+__END__
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/Hash.pm b/Master/tlpkg/tlperl/lib/DBM/Deep/Hash.pm
new file mode 100755
index 00000000000..6db4e218f3f
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/Hash.pm
@@ -0,0 +1,136 @@
+package DBM::Deep::Hash;
+
+use 5.006_000;
+
+use strict;
+use warnings;
+
+our $VERSION = q(1.0013);
+
+use base 'DBM::Deep';
+
+sub _get_self {
+ eval { local $SIG{'__DIE__'}; tied( %{$_[0]} ) } || $_[0]
+}
+
+sub _repr { return {} }
+
+sub TIEHASH {
+ ##
+ # Tied hash constructor method, called by Perl's tie() function.
+ ##
+ my $class = shift;
+ my $args = $class->_get_args( @_ );
+
+ $args->{type} = $class->TYPE_HASH;
+
+ return $class->_init($args);
+}
+
+sub FETCH {
+ my $self = shift->_get_self;
+ DBM::Deep->_throw_error( "Cannot use an undefined hash key." ) unless defined $_[0];
+ my $key = ($self->_storage->{filter_store_key})
+ ? $self->_storage->{filter_store_key}->($_[0])
+ : $_[0];
+
+ return $self->SUPER::FETCH( $key, $_[0] );
+}
+
+sub STORE {
+ my $self = shift->_get_self;
+ DBM::Deep->_throw_error( "Cannot use an undefined hash key." ) unless defined $_[0];
+ my $key = ($self->_storage->{filter_store_key})
+ ? $self->_storage->{filter_store_key}->($_[0])
+ : $_[0];
+ my $value = $_[1];
+
+ return $self->SUPER::STORE( $key, $value, $_[0] );
+}
+
+sub EXISTS {
+ my $self = shift->_get_self;
+ DBM::Deep->_throw_error( "Cannot use an undefined hash key." ) unless defined $_[0];
+ my $key = ($self->_storage->{filter_store_key})
+ ? $self->_storage->{filter_store_key}->($_[0])
+ : $_[0];
+
+ return $self->SUPER::EXISTS( $key );
+}
+
+sub DELETE {
+ my $self = shift->_get_self;
+ DBM::Deep->_throw_error( "Cannot use an undefined hash key." ) unless defined $_[0];
+ my $key = ($self->_storage->{filter_store_key})
+ ? $self->_storage->{filter_store_key}->($_[0])
+ : $_[0];
+
+ return $self->SUPER::DELETE( $key, $_[0] );
+}
+
+sub FIRSTKEY {
+ ##
+ # Locate and return first key (in no particular order)
+ ##
+ my $self = shift->_get_self;
+
+ ##
+ # Request shared lock for reading
+ ##
+ $self->lock( $self->LOCK_SH );
+
+ my $result = $self->_engine->get_next_key( $self );
+
+ $self->unlock();
+
+ return ($result && $self->_storage->{filter_fetch_key})
+ ? $self->_storage->{filter_fetch_key}->($result)
+ : $result;
+}
+
+sub NEXTKEY {
+ ##
+ # Return next key (in no particular order), given previous one
+ ##
+ my $self = shift->_get_self;
+
+ my $prev_key = ($self->_storage->{filter_store_key})
+ ? $self->_storage->{filter_store_key}->($_[0])
+ : $_[0];
+
+ ##
+ # Request shared lock for reading
+ ##
+ $self->lock( $self->LOCK_SH );
+
+ my $result = $self->_engine->get_next_key( $self, $prev_key );
+
+ $self->unlock();
+
+ return ($result && $self->_storage->{filter_fetch_key})
+ ? $self->_storage->{filter_fetch_key}->($result)
+ : $result;
+}
+
+##
+# Public method aliases
+##
+sub first_key { (shift)->FIRSTKEY(@_) }
+sub next_key { (shift)->NEXTKEY(@_) }
+
+sub _copy_node {
+ my $self = shift;
+ my ($db_temp) = @_;
+
+ my $key = $self->first_key();
+ while ($key) {
+ my $value = $self->get($key);
+ $self->_copy_value( \$db_temp->{$key}, $value );
+ $key = $self->next_key($key);
+ }
+
+ return 1;
+}
+
+1;
+__END__
diff --git a/Master/tlpkg/tlperl/lib/DBM/Deep/Internals.pod b/Master/tlpkg/tlperl/lib/DBM/Deep/Internals.pod
new file mode 100755
index 00000000000..132bc9eff17
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/DBM/Deep/Internals.pod
@@ -0,0 +1,281 @@
+=head1 NAME
+
+DBM::Deep::Internals
+
+=head1 DESCRIPTION
+
+B<NOTE>: This document is out-of-date. It describes an intermediate file
+format used during the development from 0.983 to 1.0000. It will be rewritten
+soon.
+
+This is a document describing the internal workings of L<DBM::Deep>. It is
+not necessary to read this document if you only intend to be a user. This
+document is intended for people who either want a deeper understanding of
+specifics of how L<DBM::Deep> works or who wish to help program
+L<DBM::Deep>.
+
+=head1 CLASS LAYOUT
+
+L<DBM::Deep> is broken up into five classes in three inheritance hierarchies.
+
+=over 4
+
+=item *
+
+L<DBM::Deep> is the parent of L<DBM::Deep::Array> and L<DBM::Deep::Hash>.
+These classes form the immediate interface to the outside world. They are the
+classes that provide the TIE mechanisms as well as the OO methods.
+
+=item *
+
+L<DBM::Deep::Engine> is the layer that deals with the mechanics of reading
+and writing to the file. This is where the logic of the file layout is
+handled.
+
+=item *
+
+L<DBM::Deep::File> is the layer that deals with the physical file. As a
+singleton that every other object has a reference to, it also provides a place
+to handle datastructure-wide items, such as transactions.
+
+=back
+
+=head1 FILE LAYOUT
+
+DBM::Deep uses a tagged file layout. Every section has a tag, a size, and then
+the data.
+
+=head2 File header
+
+=over 4
+
+=item * File Signature
+
+The first four bytes are 'DPDB' in network byte order, signifying that this is
+a DBM::Deep file.
+
+=item * File tag/size
+
+This is the tagging of the file header. The file used by versions prior to
+1.00 had a different fifth byte, allowing the difference to the determined.
+
+=item * Version
+
+This is four bytes containing the file version. This lets the file format change over time.
+
+=item * Constants
+
+These are the file-wide constants that determine how the file is laid out.
+They can only be set upon file creation.
+
+=item * Transaction information
+
+The current running transactions are stored here, as is the next transaction
+ID.
+
+=item * Freespace information
+
+Pointers into the next free sectors of the various sector sizes (Index,
+Bucketlist, and Data) are stored here.
+
+=back
+
+=head2 Index
+
+The Index parts can be tagged either as Hash, Array, or Index. The latter
+is if there was a reindexing due to a bucketlist growing too large. The others
+are the root index for their respective datatypes. The index consists of a
+tag, a size, and then 256 sections containing file locations. Each section
+corresponds to each value representable in a byte.
+
+The index is used as follows - whenever a hashed key is being looked up, the
+first byte is used to determine which location to go to from the root index.
+Then, if that's also an index, the second byte is used, and so forth until a
+bucketlist is found.
+
+=head2 Bucketlist
+
+This is the part that contains the link to the data section. A bucketlist
+defaults to being 16 buckets long (modifiable by the I<max_buckets>
+parameter used when creating a new file). Each bucket contains an MD5 and a
+location of the appropriate key section.
+
+=head2 Key area
+
+This is the part that handles transactional awareness. There are
+I<max_buckets> sections. Each section contains the location to the data
+section, a transaction ID, and whether that transaction considers this key to
+be deleted or not.
+
+=head2 Data area
+
+This is the part that actual stores the key, value, and class (if
+appropriate). The layout is:
+
+=over 4
+
+=item * tag
+
+=item * length of the value
+
+=item * the actual value
+
+=item * keylength
+
+=item * the actual key
+
+=item * a byte indicating if this value has a classname
+
+=item * the classname (if one is there)
+
+=back
+
+The key is stored after the value because the value is requested more often
+than the key.
+
+=head1 PERFORMANCE
+
+L<DBM::Deep> is written completely in Perl. It also is a multi-process DBM
+that uses the datafile as a method of synchronizing between multiple
+processes. This is unlike most RDBMSes like MySQL and Oracle. Furthermore,
+unlike all RDBMSes, L<DBM::Deep> stores both the data and the structure of
+that data as it would appear in a Perl program.
+
+=head2 CPU
+
+DBM::Deep attempts to be CPU-light. As it stores all the data on disk,
+DBM::Deep is I/O-bound, not CPU-bound.
+
+=head2 RAM
+
+DBM::Deep uses extremely little RAM relative to the amount of data you can
+access. You can iterate through a million keys (using C<each()>) without
+increasing your memeory usage at all.
+
+=head2 DISK
+
+DBM::Deep is I/O-bound, pure and simple. The faster your disk, the faster
+DBM::Deep will be. Currently, when performing C<my $x = $db-E<gt>{foo}>, there
+are a minimum of 4 seeks and 1332 + N bytes read (where N is the length of your
+data). (All values assume a medium filesize.) The actions taken are:
+
+=over 4
+
+=item 1 Lock the file
+
+=item 1 Perform a stat() to determine if the inode has changed
+
+=item 1 Go to the primary index for the $db (1 seek)
+
+=item 1 Read the tag/size of the primary index (5 bytes)
+
+=item 1 Read the body of the primary index (1024 bytes)
+
+=item 1 Go to the bucketlist for this MD5 (1 seek)
+
+=item 1 Read the tag/size of the bucketlist (5 bytes)
+
+=item 1 Read the body of the bucketlist (144 bytes)
+
+=item 1 Go to the keys location for this MD5 (1 seek)
+
+=item 1 Read the tag/size of the keys section (5 bytes)
+
+=item 1 Read the body of the keys location (144 bytes)
+
+=item 1 Go to the data section that corresponds to this transaction ID. (1 seek)
+
+=item 1 Read the tag/size of the data section (5 bytes)
+
+=item 1 Read the value for this data (N bytes)
+
+=item 1 Unlock the file
+
+=back
+
+Every additional level of indexing (if there are enough keys) requires an
+additional seek and the reading of 1029 additional bytes. If the value is
+blessed, an additional 1 seek and 9 + M bytes are read (where M is the length
+of the classname).
+
+Arrays are (currently) even worse because they're considered "funny hashes"
+with the length stored as just another key. This means that if you do any sort
+of lookup with a negative index, this entire process is performed twice - once
+for the length and once for the value.
+
+=head1 ACTUAL TESTS
+
+=head2 SPEED
+
+Obviously, DBM::Deep isn't going to be as fast as some C-based DBMs, such as
+the almighty I<BerkeleyDB>. But it makes up for it in features like true
+multi-level hash/array support, and cross-platform FTPable files. Even so,
+DBM::Deep is still pretty fast, and the speed stays fairly consistent, even
+with huge databases. Here is some test data:
+
+ Adding 1,000,000 keys to new DB file...
+
+ At 100 keys, avg. speed is 2,703 keys/sec
+ At 200 keys, avg. speed is 2,642 keys/sec
+ At 300 keys, avg. speed is 2,598 keys/sec
+ At 400 keys, avg. speed is 2,578 keys/sec
+ At 500 keys, avg. speed is 2,722 keys/sec
+ At 600 keys, avg. speed is 2,628 keys/sec
+ At 700 keys, avg. speed is 2,700 keys/sec
+ At 800 keys, avg. speed is 2,607 keys/sec
+ At 900 keys, avg. speed is 2,190 keys/sec
+ At 1,000 keys, avg. speed is 2,570 keys/sec
+ At 2,000 keys, avg. speed is 2,417 keys/sec
+ At 3,000 keys, avg. speed is 1,982 keys/sec
+ At 4,000 keys, avg. speed is 1,568 keys/sec
+ At 5,000 keys, avg. speed is 1,533 keys/sec
+ At 6,000 keys, avg. speed is 1,787 keys/sec
+ At 7,000 keys, avg. speed is 1,977 keys/sec
+ At 8,000 keys, avg. speed is 2,028 keys/sec
+ At 9,000 keys, avg. speed is 2,077 keys/sec
+ At 10,000 keys, avg. speed is 2,031 keys/sec
+ At 20,000 keys, avg. speed is 1,970 keys/sec
+ At 30,000 keys, avg. speed is 2,050 keys/sec
+ At 40,000 keys, avg. speed is 2,073 keys/sec
+ At 50,000 keys, avg. speed is 1,973 keys/sec
+ At 60,000 keys, avg. speed is 1,914 keys/sec
+ At 70,000 keys, avg. speed is 2,091 keys/sec
+ At 80,000 keys, avg. speed is 2,103 keys/sec
+ At 90,000 keys, avg. speed is 1,886 keys/sec
+ At 100,000 keys, avg. speed is 1,970 keys/sec
+ At 200,000 keys, avg. speed is 2,053 keys/sec
+ At 300,000 keys, avg. speed is 1,697 keys/sec
+ At 400,000 keys, avg. speed is 1,838 keys/sec
+ At 500,000 keys, avg. speed is 1,941 keys/sec
+ At 600,000 keys, avg. speed is 1,930 keys/sec
+ At 700,000 keys, avg. speed is 1,735 keys/sec
+ At 800,000 keys, avg. speed is 1,795 keys/sec
+ At 900,000 keys, avg. speed is 1,221 keys/sec
+ At 1,000,000 keys, avg. speed is 1,077 keys/sec
+
+This test was performed on a PowerMac G4 1gHz running Mac OS X 10.3.2 & Perl
+5.8.1, with an 80GB Ultra ATA/100 HD spinning at 7200RPM. The hash keys and
+values were between 6 - 12 chars in length. The DB file ended up at 210MB.
+Run time was 12 min 3 sec.
+
+=head2 MEMORY USAGE
+
+One of the great things about L<DBM::Deep> is that it uses very little memory.
+Even with huge databases (1,000,000+ keys) you will not see much increased
+memory on your process. L<DBM::Deep> relies solely on the filesystem for storing
+and fetching data. Here is output from I<top> before even opening a database
+handle:
+
+ PID USER PRI NI SIZE RSS SHARE STAT %CPU %MEM TIME COMMAND
+ 22831 root 11 0 2716 2716 1296 R 0.0 0.2 0:07 perl
+
+Basically the process is taking 2,716K of memory. And here is the same
+process after storing and fetching 1,000,000 keys:
+
+ PID USER PRI NI SIZE RSS SHARE STAT %CPU %MEM TIME COMMAND
+ 22831 root 14 0 2772 2772 1328 R 0.0 0.2 13:32 perl
+
+Notice the memory usage increased by only 56K. Test was performed on a 700mHz
+x86 box running Linux RedHat 7.2 & Perl 5.6.1.
+
+=cut