summaryrefslogtreecommitdiff
path: root/Master/tlpkg/tlperl/lib/Tie
diff options
context:
space:
mode:
Diffstat (limited to 'Master/tlpkg/tlperl/lib/Tie')
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Array.pm287
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/File.pm2632
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Handle.pm201
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Hash.pm257
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Hash/NamedCapture.pm62
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Memoize.pm128
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/RefHash.pm274
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Registry.pm45
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Scalar.pm163
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/StdHandle.pm71
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/SubstrHash.pm215
-rw-r--r--Master/tlpkg/tlperl/lib/Tie/Watch.pm560
12 files changed, 4895 insertions, 0 deletions
diff --git a/Master/tlpkg/tlperl/lib/Tie/Array.pm b/Master/tlpkg/tlperl/lib/Tie/Array.pm
new file mode 100644
index 00000000000..af8f51e9f51
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Array.pm
@@ -0,0 +1,287 @@
+package Tie::Array;
+
+use 5.006_001;
+use strict;
+use Carp;
+our $VERSION = '1.03';
+
+# Pod documentation after __END__ below.
+
+sub DESTROY { }
+sub EXTEND { }
+sub UNSHIFT { scalar shift->SPLICE(0,0,@_) }
+sub SHIFT { shift->SPLICE(0,1) }
+sub CLEAR { shift->STORESIZE(0) }
+
+sub PUSH
+{
+ my $obj = shift;
+ my $i = $obj->FETCHSIZE;
+ $obj->STORE($i++, shift) while (@_);
+}
+
+sub POP
+{
+ my $obj = shift;
+ my $newsize = $obj->FETCHSIZE - 1;
+ my $val;
+ if ($newsize >= 0)
+ {
+ $val = $obj->FETCH($newsize);
+ $obj->STORESIZE($newsize);
+ }
+ $val;
+}
+
+sub SPLICE {
+ my $obj = shift;
+ my $sz = $obj->FETCHSIZE;
+ my $off = (@_) ? shift : 0;
+ $off += $sz if ($off < 0);
+ my $len = (@_) ? shift : $sz - $off;
+ $len += $sz - $off if $len < 0;
+ my @result;
+ for (my $i = 0; $i < $len; $i++) {
+ push(@result,$obj->FETCH($off+$i));
+ }
+ $off = $sz if $off > $sz;
+ $len -= $off + $len - $sz if $off + $len > $sz;
+ if (@_ > $len) {
+ # Move items up to make room
+ my $d = @_ - $len;
+ my $e = $off+$len;
+ $obj->EXTEND($sz+$d);
+ for (my $i=$sz-1; $i >= $e; $i--) {
+ my $val = $obj->FETCH($i);
+ $obj->STORE($i+$d,$val);
+ }
+ }
+ elsif (@_ < $len) {
+ # Move items down to close the gap
+ my $d = $len - @_;
+ my $e = $off+$len;
+ for (my $i=$off+$len; $i < $sz; $i++) {
+ my $val = $obj->FETCH($i);
+ $obj->STORE($i-$d,$val);
+ }
+ $obj->STORESIZE($sz-$d);
+ }
+ for (my $i=0; $i < @_; $i++) {
+ $obj->STORE($off+$i,$_[$i]);
+ }
+ return wantarray ? @result : pop @result;
+}
+
+sub EXISTS {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define an EXISTS method";
+}
+
+sub DELETE {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a DELETE method";
+}
+
+package Tie::StdArray;
+use vars qw(@ISA);
+@ISA = 'Tie::Array';
+
+sub TIEARRAY { bless [], $_[0] }
+sub FETCHSIZE { scalar @{$_[0]} }
+sub STORESIZE { $#{$_[0]} = $_[1]-1 }
+sub STORE { $_[0]->[$_[1]] = $_[2] }
+sub FETCH { $_[0]->[$_[1]] }
+sub CLEAR { @{$_[0]} = () }
+sub POP { pop(@{$_[0]}) }
+sub PUSH { my $o = shift; push(@$o,@_) }
+sub SHIFT { shift(@{$_[0]}) }
+sub UNSHIFT { my $o = shift; unshift(@$o,@_) }
+sub EXISTS { exists $_[0]->[$_[1]] }
+sub DELETE { delete $_[0]->[$_[1]] }
+
+sub SPLICE
+{
+ my $ob = shift;
+ my $sz = $ob->FETCHSIZE;
+ my $off = @_ ? shift : 0;
+ $off += $sz if $off < 0;
+ my $len = @_ ? shift : $sz-$off;
+ return splice(@$ob,$off,$len,@_);
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Tie::Array - base class for tied arrays
+
+=head1 SYNOPSIS
+
+ package Tie::NewArray;
+ use Tie::Array;
+ @ISA = ('Tie::Array');
+
+ # mandatory methods
+ sub TIEARRAY { ... }
+ sub FETCH { ... }
+ sub FETCHSIZE { ... }
+
+ sub STORE { ... } # mandatory if elements writeable
+ sub STORESIZE { ... } # mandatory if elements can be added/deleted
+ sub EXISTS { ... } # mandatory if exists() expected to work
+ sub DELETE { ... } # mandatory if delete() expected to work
+
+ # optional methods - for efficiency
+ sub CLEAR { ... }
+ sub PUSH { ... }
+ sub POP { ... }
+ sub SHIFT { ... }
+ sub UNSHIFT { ... }
+ sub SPLICE { ... }
+ sub EXTEND { ... }
+ sub DESTROY { ... }
+
+ package Tie::NewStdArray;
+ use Tie::Array;
+
+ @ISA = ('Tie::StdArray');
+
+ # all methods provided by default
+
+ package main;
+
+ $object = tie @somearray,Tie::NewArray;
+ $object = tie @somearray,Tie::StdArray;
+ $object = tie @somearray,Tie::NewStdArray;
+
+
+
+=head1 DESCRIPTION
+
+This module provides methods for array-tying classes. See
+L<perltie> for a list of the functions required in order to tie an array
+to a package. The basic B<Tie::Array> package provides stub C<DESTROY>,
+and C<EXTEND> methods that do nothing, stub C<DELETE> and C<EXISTS>
+methods that croak() if the delete() or exists() builtins are ever called
+on the tied array, and implementations of C<PUSH>, C<POP>, C<SHIFT>,
+C<UNSHIFT>, C<SPLICE> and C<CLEAR> in terms of basic C<FETCH>, C<STORE>,
+C<FETCHSIZE>, C<STORESIZE>.
+
+The B<Tie::StdArray> package provides efficient methods required for tied arrays
+which are implemented as blessed references to an "inner" perl array.
+It inherits from B<Tie::Array>, and should cause tied arrays to behave exactly
+like standard arrays, allowing for selective overloading of methods.
+
+For developers wishing to write their own tied arrays, the required methods
+are briefly defined below. See the L<perltie> section for more detailed
+descriptive, as well as example code:
+
+=over 4
+
+=item TIEARRAY classname, LIST
+
+The class method is invoked by the command C<tie @array, classname>. Associates
+an array instance with the specified class. C<LIST> would represent
+additional arguments (along the lines of L<AnyDBM_File> and compatriots) needed
+to complete the association. The method should return an object of a class which
+provides the methods below.
+
+=item STORE this, index, value
+
+Store datum I<value> into I<index> for the tied array associated with
+object I<this>. If this makes the array larger then
+class's mapping of C<undef> should be returned for new positions.
+
+=item FETCH this, index
+
+Retrieve the datum in I<index> for the tied array associated with
+object I<this>.
+
+=item FETCHSIZE this
+
+Returns the total number of items in the tied array associated with
+object I<this>. (Equivalent to C<scalar(@array)>).
+
+=item STORESIZE this, count
+
+Sets the total number of items in the tied array associated with
+object I<this> to be I<count>. If this makes the array larger then
+class's mapping of C<undef> should be returned for new positions.
+If the array becomes smaller then entries beyond count should be
+deleted.
+
+=item EXTEND this, count
+
+Informative call that array is likely to grow to have I<count> entries.
+Can be used to optimize allocation. This method need do nothing.
+
+=item EXISTS this, key
+
+Verify that the element at index I<key> exists in the tied array I<this>.
+
+The B<Tie::Array> implementation is a stub that simply croaks.
+
+=item DELETE this, key
+
+Delete the element at index I<key> from the tied array I<this>.
+
+The B<Tie::Array> implementation is a stub that simply croaks.
+
+=item CLEAR this
+
+Clear (remove, delete, ...) all values from the tied array associated with
+object I<this>.
+
+=item DESTROY this
+
+Normal object destructor method.
+
+=item PUSH this, LIST
+
+Append elements of LIST to the array.
+
+=item POP this
+
+Remove last element of the array and return it.
+
+=item SHIFT this
+
+Remove the first element of the array (shifting other elements down)
+and return it.
+
+=item UNSHIFT this, LIST
+
+Insert LIST elements at the beginning of the array, moving existing elements
+up to make room.
+
+=item SPLICE this, offset, length, LIST
+
+Perform the equivalent of C<splice> on the array.
+
+I<offset> is optional and defaults to zero, negative values count back
+from the end of the array.
+
+I<length> is optional and defaults to rest of the array.
+
+I<LIST> may be empty.
+
+Returns a list of the original I<length> elements at I<offset>.
+
+=back
+
+=head1 CAVEATS
+
+There is no support at present for tied @ISA. There is a potential conflict
+between magic entries needed to notice setting of @ISA, and those needed to
+implement 'tie'.
+
+Very little consideration has been given to the behaviour of tied arrays
+when C<$[> is not default value of zero.
+
+=head1 AUTHOR
+
+Nick Ing-Simmons E<lt>nik@tiuk.ti.comE<gt>
+
+=cut
diff --git a/Master/tlpkg/tlperl/lib/Tie/File.pm b/Master/tlpkg/tlperl/lib/Tie/File.pm
new file mode 100644
index 00000000000..9528ab1bfdb
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/File.pm
@@ -0,0 +1,2632 @@
+
+package Tie::File;
+require 5.005;
+use Carp ':DEFAULT', 'confess';
+use POSIX 'SEEK_SET';
+use Fcntl 'O_CREAT', 'O_RDWR', 'LOCK_EX', 'LOCK_SH', 'O_WRONLY', 'O_RDONLY';
+sub O_ACCMODE () { O_RDONLY | O_RDWR | O_WRONLY }
+
+
+$VERSION = "0.97_02";
+my $DEFAULT_MEMORY_SIZE = 1<<21; # 2 megabytes
+my $DEFAULT_AUTODEFER_THRESHHOLD = 3; # 3 records
+my $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD = 65536; # 16 disk blocksful
+
+my %good_opt = map {$_ => 1, "-$_" => 1}
+ qw(memory dw_size mode recsep discipline
+ autodefer autochomp autodefer_threshhold concurrent);
+
+sub TIEARRAY {
+ if (@_ % 2 != 0) {
+ croak "usage: tie \@array, $_[0], filename, [option => value]...";
+ }
+ my ($pack, $file, %opts) = @_;
+
+ # transform '-foo' keys into 'foo' keys
+ for my $key (keys %opts) {
+ unless ($good_opt{$key}) {
+ croak("$pack: Unrecognized option '$key'\n");
+ }
+ my $okey = $key;
+ if ($key =~ s/^-+//) {
+ $opts{$key} = delete $opts{$okey};
+ }
+ }
+
+ if ($opts{concurrent}) {
+ croak("$pack: concurrent access not supported yet\n");
+ }
+
+ unless (defined $opts{memory}) {
+ # default is the larger of the default cache size and the
+ # deferred-write buffer size (if specified)
+ $opts{memory} = $DEFAULT_MEMORY_SIZE;
+ $opts{memory} = $opts{dw_size}
+ if defined $opts{dw_size} && $opts{dw_size} > $DEFAULT_MEMORY_SIZE;
+ # Dora Winifred Read
+ }
+ $opts{dw_size} = $opts{memory} unless defined $opts{dw_size};
+ if ($opts{dw_size} > $opts{memory}) {
+ croak("$pack: dw_size may not be larger than total memory allocation\n");
+ }
+ # are we in deferred-write mode?
+ $opts{defer} = 0 unless defined $opts{defer};
+ $opts{deferred} = {}; # no records are presently deferred
+ $opts{deferred_s} = 0; # count of total bytes in ->{deferred}
+ $opts{deferred_max} = -1; # empty
+
+ # What's a good way to arrange that this class can be overridden?
+ $opts{cache} = Tie::File::Cache->new($opts{memory});
+
+ # autodeferment is enabled by default
+ $opts{autodefer} = 1 unless defined $opts{autodefer};
+ $opts{autodeferring} = 0; # but is not initially active
+ $opts{ad_history} = [];
+ $opts{autodefer_threshhold} = $DEFAULT_AUTODEFER_THRESHHOLD
+ unless defined $opts{autodefer_threshhold};
+ $opts{autodefer_filelen_threshhold} = $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD
+ unless defined $opts{autodefer_filelen_threshhold};
+
+ $opts{offsets} = [0];
+ $opts{filename} = $file;
+ unless (defined $opts{recsep}) {
+ $opts{recsep} = _default_recsep();
+ }
+ $opts{recseplen} = length($opts{recsep});
+ if ($opts{recseplen} == 0) {
+ croak "Empty record separator not supported by $pack";
+ }
+
+ $opts{autochomp} = 1 unless defined $opts{autochomp};
+
+ $opts{mode} = O_CREAT|O_RDWR unless defined $opts{mode};
+ $opts{rdonly} = (($opts{mode} & O_ACCMODE) == O_RDONLY);
+ $opts{sawlastrec} = undef;
+
+ my $fh;
+
+ if (UNIVERSAL::isa($file, 'GLOB')) {
+ # We use 1 here on the theory that some systems
+ # may not indicate failure if we use 0.
+ # MSWin32 does not indicate failure with 0, but I don't know if
+ # it will indicate failure with 1 or not.
+ unless (seek $file, 1, SEEK_SET) {
+ croak "$pack: your filehandle does not appear to be seekable";
+ }
+ seek $file, 0, SEEK_SET; # put it back
+ $fh = $file; # setting binmode is the user's problem
+ } elsif (ref $file) {
+ croak "usage: tie \@array, $pack, filename, [option => value]...";
+ } else {
+ # $fh = \do { local *FH }; # XXX this is buggy
+ if ($] < 5.006) {
+ # perl 5.005 and earlier don't autovivify filehandles
+ require Symbol;
+ $fh = Symbol::gensym();
+ }
+ sysopen $fh, $file, $opts{mode}, 0666 or return;
+ binmode $fh;
+ ++$opts{ourfh};
+ }
+ { my $ofh = select $fh; $| = 1; select $ofh } # autoflush on write
+ if (defined $opts{discipline} && $] >= 5.006) {
+ # This avoids a compile-time warning under 5.005
+ eval 'binmode($fh, $opts{discipline})';
+ croak $@ if $@ =~ /unknown discipline/i;
+ die if $@;
+ }
+ $opts{fh} = $fh;
+
+ bless \%opts => $pack;
+}
+
+sub FETCH {
+ my ($self, $n) = @_;
+ my $rec;
+
+ # check the defer buffer
+ $rec = $self->{deferred}{$n} if exists $self->{deferred}{$n};
+ $rec = $self->_fetch($n) unless defined $rec;
+
+ # inlined _chomp1
+ substr($rec, - $self->{recseplen}) = ""
+ if defined $rec && $self->{autochomp};
+ $rec;
+}
+
+# Chomp many records in-place; return nothing useful
+sub _chomp {
+ my $self = shift;
+ return unless $self->{autochomp};
+ if ($self->{autochomp}) {
+ for (@_) {
+ next unless defined;
+ substr($_, - $self->{recseplen}) = "";
+ }
+ }
+}
+
+# Chomp one record in-place; return modified record
+sub _chomp1 {
+ my ($self, $rec) = @_;
+ return $rec unless $self->{autochomp};
+ return unless defined $rec;
+ substr($rec, - $self->{recseplen}) = "";
+ $rec;
+}
+
+sub _fetch {
+ my ($self, $n) = @_;
+
+ # check the record cache
+ { my $cached = $self->{cache}->lookup($n);
+ return $cached if defined $cached;
+ }
+
+ if ($#{$self->{offsets}} < $n) {
+ return if $self->{eof}; # request for record beyond end of file
+ my $o = $self->_fill_offsets_to($n);
+ # If it's still undefined, there is no such record, so return 'undef'
+ return unless defined $o;
+ }
+
+ my $fh = $self->{FH};
+ $self->_seek($n); # we can do this now that offsets is populated
+ my $rec = $self->_read_record;
+
+# If we happen to have just read the first record, check to see if
+# the length of the record matches what 'tell' says. If not, Tie::File
+# won't work, and should drop dead.
+#
+# if ($n == 0 && defined($rec) && tell($self->{fh}) != length($rec)) {
+# if (defined $self->{discipline}) {
+# croak "I/O discipline $self->{discipline} not supported";
+# } else {
+# croak "File encoding not supported";
+# }
+# }
+
+ $self->{cache}->insert($n, $rec) if defined $rec && not $self->{flushing};
+ $rec;
+}
+
+sub STORE {
+ my ($self, $n, $rec) = @_;
+ die "STORE called from _check_integrity!" if $DIAGNOSTIC;
+
+ $self->_fixrecs($rec);
+
+ if ($self->{autodefer}) {
+ $self->_annotate_ad_history($n);
+ }
+
+ return $self->_store_deferred($n, $rec) if $self->_is_deferring;
+
+
+ # We need this to decide whether the new record will fit
+ # It incidentally populates the offsets table
+ # Note we have to do this before we alter the cache
+ # 20020324 Wait, but this DOES alter the cache. TODO BUG?
+ my $oldrec = $self->_fetch($n);
+
+ if (not defined $oldrec) {
+ # We're storing a record beyond the end of the file
+ $self->_extend_file_to($n+1);
+ $oldrec = $self->{recsep};
+ }
+# return if $oldrec eq $rec; # don't bother
+ my $len_diff = length($rec) - length($oldrec);
+
+ # length($oldrec) here is not consistent with text mode TODO XXX BUG
+ $self->_mtwrite($rec, $self->{offsets}[$n], length($oldrec));
+ $self->_oadjust([$n, 1, $rec]);
+ $self->{cache}->update($n, $rec);
+}
+
+sub _store_deferred {
+ my ($self, $n, $rec) = @_;
+ $self->{cache}->remove($n);
+ my $old_deferred = $self->{deferred}{$n};
+
+ if (defined $self->{deferred_max} && $n > $self->{deferred_max}) {
+ $self->{deferred_max} = $n;
+ }
+ $self->{deferred}{$n} = $rec;
+
+ my $len_diff = length($rec);
+ $len_diff -= length($old_deferred) if defined $old_deferred;
+ $self->{deferred_s} += $len_diff;
+ $self->{cache}->adj_limit(-$len_diff);
+ if ($self->{deferred_s} > $self->{dw_size}) {
+ $self->_flush;
+ } elsif ($self->_cache_too_full) {
+ $self->_cache_flush;
+ }
+}
+
+# Remove a single record from the deferred-write buffer without writing it
+# The record need not be present
+sub _delete_deferred {
+ my ($self, $n) = @_;
+ my $rec = delete $self->{deferred}{$n};
+ return unless defined $rec;
+
+ if (defined $self->{deferred_max}
+ && $n == $self->{deferred_max}) {
+ undef $self->{deferred_max};
+ }
+
+ $self->{deferred_s} -= length $rec;
+ $self->{cache}->adj_limit(length $rec);
+}
+
+sub FETCHSIZE {
+ my $self = shift;
+ my $n = $self->{eof} ? $#{$self->{offsets}} : $self->_fill_offsets;
+
+ my $top_deferred = $self->_defer_max;
+ $n = $top_deferred+1 if defined $top_deferred && $n < $top_deferred+1;
+ $n;
+}
+
+sub STORESIZE {
+ my ($self, $len) = @_;
+
+ if ($self->{autodefer}) {
+ $self->_annotate_ad_history('STORESIZE');
+ }
+
+ my $olen = $self->FETCHSIZE;
+ return if $len == $olen; # Woo-hoo!
+
+ # file gets longer
+ if ($len > $olen) {
+ if ($self->_is_deferring) {
+ for ($olen .. $len-1) {
+ $self->_store_deferred($_, $self->{recsep});
+ }
+ } else {
+ $self->_extend_file_to($len);
+ }
+ return;
+ }
+
+ # file gets shorter
+ if ($self->_is_deferring) {
+ # TODO maybe replace this with map-plus-assignment?
+ for (grep $_ >= $len, keys %{$self->{deferred}}) {
+ $self->_delete_deferred($_);
+ }
+ $self->{deferred_max} = $len-1;
+ }
+
+ $self->_seek($len);
+ $self->_chop_file;
+ $#{$self->{offsets}} = $len;
+# $self->{offsets}[0] = 0; # in case we just chopped this
+
+ $self->{cache}->remove(grep $_ >= $len, $self->{cache}->ckeys);
+}
+
+### OPTIMIZE ME
+### It should not be necessary to do FETCHSIZE
+### Just seek to the end of the file.
+sub PUSH {
+ my $self = shift;
+ $self->SPLICE($self->FETCHSIZE, scalar(@_), @_);
+
+ # No need to return:
+ # $self->FETCHSIZE; # because av.c takes care of this for me
+}
+
+sub POP {
+ my $self = shift;
+ my $size = $self->FETCHSIZE;
+ return if $size == 0;
+# print STDERR "# POPPITY POP POP POP\n";
+ scalar $self->SPLICE($size-1, 1);
+}
+
+sub SHIFT {
+ my $self = shift;
+ scalar $self->SPLICE(0, 1);
+}
+
+sub UNSHIFT {
+ my $self = shift;
+ $self->SPLICE(0, 0, @_);
+ # $self->FETCHSIZE; # av.c takes care of this for me
+}
+
+sub CLEAR {
+ my $self = shift;
+
+ if ($self->{autodefer}) {
+ $self->_annotate_ad_history('CLEAR');
+ }
+
+ $self->_seekb(0);
+ $self->_chop_file;
+ $self->{cache}->set_limit($self->{memory});
+ $self->{cache}->empty;
+ @{$self->{offsets}} = (0);
+ %{$self->{deferred}}= ();
+ $self->{deferred_s} = 0;
+ $self->{deferred_max} = -1;
+}
+
+sub EXTEND {
+ my ($self, $n) = @_;
+
+ # No need to pre-extend anything in this case
+ return if $self->_is_deferring;
+
+ $self->_fill_offsets_to($n);
+ $self->_extend_file_to($n);
+}
+
+sub DELETE {
+ my ($self, $n) = @_;
+
+ if ($self->{autodefer}) {
+ $self->_annotate_ad_history('DELETE');
+ }
+
+ my $lastrec = $self->FETCHSIZE-1;
+ my $rec = $self->FETCH($n);
+ $self->_delete_deferred($n) if $self->_is_deferring;
+ if ($n == $lastrec) {
+ $self->_seek($n);
+ $self->_chop_file;
+ $#{$self->{offsets}}--;
+ $self->{cache}->remove($n);
+ # perhaps in this case I should also remove trailing null records?
+ # 20020316
+ # Note that delete @a[-3..-1] deletes the records in the wrong order,
+ # so we only chop the very last one out of the file. We could repair this
+ # by tracking deleted records inside the object.
+ } elsif ($n < $lastrec) {
+ $self->STORE($n, "");
+ }
+ $rec;
+}
+
+sub EXISTS {
+ my ($self, $n) = @_;
+ return 1 if exists $self->{deferred}{$n};
+ $n < $self->FETCHSIZE;
+}
+
+sub SPLICE {
+ my $self = shift;
+
+ if ($self->{autodefer}) {
+ $self->_annotate_ad_history('SPLICE');
+ }
+
+ $self->_flush if $self->_is_deferring; # move this up?
+ if (wantarray) {
+ $self->_chomp(my @a = $self->_splice(@_));
+ @a;
+ } else {
+ $self->_chomp1(scalar $self->_splice(@_));
+ }
+}
+
+sub DESTROY {
+ my $self = shift;
+ $self->flush if $self->_is_deferring;
+ $self->{cache}->delink if defined $self->{cache}; # break circular link
+ if ($self->{fh} and $self->{ourfh}) {
+ delete $self->{ourfh};
+ close delete $self->{fh};
+ }
+}
+
+sub _splice {
+ my ($self, $pos, $nrecs, @data) = @_;
+ my @result;
+
+ $pos = 0 unless defined $pos;
+
+ # Deal with negative and other out-of-range positions
+ # Also set default for $nrecs
+ {
+ my $oldsize = $self->FETCHSIZE;
+ $nrecs = $oldsize unless defined $nrecs;
+ my $oldpos = $pos;
+
+ if ($pos < 0) {
+ $pos += $oldsize;
+ if ($pos < 0) {
+ croak "Modification of non-creatable array value attempted, subscript $oldpos";
+ }
+ }
+
+ if ($pos > $oldsize) {
+ return unless @data;
+ $pos = $oldsize; # This is what perl does for normal arrays
+ }
+
+ # The manual is very unclear here
+ if ($nrecs < 0) {
+ $nrecs = $oldsize - $pos + $nrecs;
+ $nrecs = 0 if $nrecs < 0;
+ }
+
+ # nrecs is too big---it really means "until the end"
+ # 20030507
+ if ($nrecs + $pos > $oldsize) {
+ $nrecs = $oldsize - $pos;
+ }
+ }
+
+ $self->_fixrecs(@data);
+ my $data = join '', @data;
+ my $datalen = length $data;
+ my $oldlen = 0;
+
+ # compute length of data being removed
+ for ($pos .. $pos+$nrecs-1) {
+ last unless defined $self->_fill_offsets_to($_);
+ my $rec = $self->_fetch($_);
+ last unless defined $rec;
+ push @result, $rec;
+
+ # Why don't we just use length($rec) here?
+ # Because that record might have come from the cache. _splice
+ # might have been called to flush out the deferred-write records,
+ # and in this case length($rec) is the length of the record to be
+ # *written*, not the length of the actual record in the file. But
+ # the offsets are still true. 20020322
+ $oldlen += $self->{offsets}[$_+1] - $self->{offsets}[$_]
+ if defined $self->{offsets}[$_+1];
+ }
+ $self->_fill_offsets_to($pos+$nrecs);
+
+ # Modify the file
+ $self->_mtwrite($data, $self->{offsets}[$pos], $oldlen);
+ # Adjust the offsets table
+ $self->_oadjust([$pos, $nrecs, @data]);
+
+ { # Take this read cache stuff out into a separate function
+ # You made a half-attempt to put it into _oadjust.
+ # Finish something like that up eventually.
+ # STORE also needs to do something similarish
+
+ # update the read cache, part 1
+ # modified records
+ for ($pos .. $pos+$nrecs-1) {
+ my $new = $data[$_-$pos];
+ if (defined $new) {
+ $self->{cache}->update($_, $new);
+ } else {
+ $self->{cache}->remove($_);
+ }
+ }
+
+ # update the read cache, part 2
+ # moved records - records past the site of the change
+ # need to be renumbered
+ # Maybe merge this with the previous block?
+ {
+ my @oldkeys = grep $_ >= $pos + $nrecs, $self->{cache}->ckeys;
+ my @newkeys = map $_-$nrecs+@data, @oldkeys;
+ $self->{cache}->rekey(\@oldkeys, \@newkeys);
+ }
+
+ # Now there might be too much data in the cache, if we spliced out
+ # some short records and spliced in some long ones. If so, flush
+ # the cache.
+ $self->_cache_flush;
+ }
+
+ # Yes, the return value of 'splice' *is* actually this complicated
+ wantarray ? @result : @result ? $result[-1] : undef;
+}
+
+
+# write data into the file
+# $data is the data to be written.
+# it should be written at position $pos, and should overwrite
+# exactly $len of the following bytes.
+# Note that if length($data) > $len, the subsequent bytes will have to
+# be moved up, and if length($data) < $len, they will have to
+# be moved down
+sub _twrite {
+ my ($self, $data, $pos, $len) = @_;
+
+ unless (defined $pos) {
+ die "\$pos was undefined in _twrite";
+ }
+
+ my $len_diff = length($data) - $len;
+
+ if ($len_diff == 0) { # Woo-hoo!
+ my $fh = $self->{fh};
+ $self->_seekb($pos);
+ $self->_write_record($data);
+ return; # well, that was easy.
+ }
+
+ # the two records are of different lengths
+ # our strategy here: rewrite the tail of the file,
+ # reading ahead one buffer at a time
+ # $bufsize is required to be at least as large as the data we're overwriting
+ my $bufsize = _bufsize($len_diff);
+ my ($writepos, $readpos) = ($pos, $pos+$len);
+ my $next_block;
+ my $more_data;
+
+ # Seems like there ought to be a way to avoid the repeated code
+ # and the special case here. The read(1) is also a little weird.
+ # Think about this.
+ do {
+ $self->_seekb($readpos);
+ my $br = read $self->{fh}, $next_block, $bufsize;
+ $more_data = read $self->{fh}, my($dummy), 1;
+ $self->_seekb($writepos);
+ $self->_write_record($data);
+ $readpos += $br;
+ $writepos += length $data;
+ $data = $next_block;
+ } while $more_data;
+ $self->_seekb($writepos);
+ $self->_write_record($next_block);
+
+ # There might be leftover data at the end of the file
+ $self->_chop_file if $len_diff < 0;
+}
+
+# _iwrite(D, S, E)
+# Insert text D at position S.
+# Let C = E-S-|D|. If C < 0; die.
+# Data in [S,S+C) is copied to [S+D,S+D+C) = [S+D,E).
+# Data in [S+C = E-D, E) is returned. Data in [E, oo) is untouched.
+#
+# In a later version, don't read the entire intervening area into
+# memory at once; do the copying block by block.
+sub _iwrite {
+ my $self = shift;
+ my ($D, $s, $e) = @_;
+ my $d = length $D;
+ my $c = $e-$s-$d;
+ local *FH = $self->{fh};
+ confess "Not enough space to insert $d bytes between $s and $e"
+ if $c < 0;
+ confess "[$s,$e) is an invalid insertion range" if $e < $s;
+
+ $self->_seekb($s);
+ read FH, my $buf, $e-$s;
+
+ $D .= substr($buf, 0, $c, "");
+
+ $self->_seekb($s);
+ $self->_write_record($D);
+
+ return $buf;
+}
+
+# Like _twrite, but the data-pos-len triple may be repeated; you may
+# write several chunks. All the writing will be done in
+# one pass. Chunks SHALL be in ascending order and SHALL NOT overlap.
+sub _mtwrite {
+ my $self = shift;
+ my $unwritten = "";
+ my $delta = 0;
+
+ @_ % 3 == 0
+ or die "Arguments to _mtwrite did not come in groups of three";
+
+ while (@_) {
+ my ($data, $pos, $len) = splice @_, 0, 3;
+ my $end = $pos + $len; # The OLD end of the segment to be replaced
+ $data = $unwritten . $data;
+ $delta -= length($unwritten);
+ $unwritten = "";
+ $pos += $delta; # This is where the data goes now
+ my $dlen = length $data;
+ $self->_seekb($pos);
+ if ($len >= $dlen) { # the data will fit
+ $self->_write_record($data);
+ $delta += ($dlen - $len); # everything following moves down by this much
+ $data = ""; # All the data in the buffer has been written
+ } else { # won't fit
+ my $writable = substr($data, 0, $len - $delta, "");
+ $self->_write_record($writable);
+ $delta += ($dlen - $len); # everything following moves down by this much
+ }
+
+ # At this point we've written some but maybe not all of the data.
+ # There might be a gap to close up, or $data might still contain a
+ # bunch of unwritten data that didn't fit.
+ my $ndlen = length $data;
+ if ($delta == 0) {
+ $self->_write_record($data);
+ } elsif ($delta < 0) {
+ # upcopy (close up gap)
+ if (@_) {
+ $self->_upcopy($end, $end + $delta, $_[1] - $end);
+ } else {
+ $self->_upcopy($end, $end + $delta);
+ }
+ } else {
+ # downcopy (insert data that didn't fit; replace this data in memory
+ # with _later_ data that doesn't fit)
+ if (@_) {
+ $unwritten = $self->_downcopy($data, $end, $_[1] - $end);
+ } else {
+ # Make the file longer to accommodate the last segment that doesn'
+ $unwritten = $self->_downcopy($data, $end);
+ }
+ }
+ }
+}
+
+# Copy block of data of length $len from position $spos to position $dpos
+# $dpos must be <= $spos
+#
+# If $len is undefined, go all the way to the end of the file
+# and then truncate it ($spos - $dpos bytes will be removed)
+sub _upcopy {
+ my $blocksize = 8192;
+ my ($self, $spos, $dpos, $len) = @_;
+ if ($dpos > $spos) {
+ die "source ($spos) was upstream of destination ($dpos) in _upcopy";
+ } elsif ($dpos == $spos) {
+ return;
+ }
+
+ while (! defined ($len) || $len > 0) {
+ my $readsize = ! defined($len) ? $blocksize
+ : $len > $blocksize ? $blocksize
+ : $len;
+
+ my $fh = $self->{fh};
+ $self->_seekb($spos);
+ my $bytes_read = read $fh, my($data), $readsize;
+ $self->_seekb($dpos);
+ if ($data eq "") {
+ $self->_chop_file;
+ last;
+ }
+ $self->_write_record($data);
+ $spos += $bytes_read;
+ $dpos += $bytes_read;
+ $len -= $bytes_read if defined $len;
+ }
+}
+
+# Write $data into a block of length $len at position $pos,
+# moving everything in the block forwards to make room.
+# Instead of writing the last length($data) bytes from the block
+# (because there isn't room for them any longer) return them.
+#
+# Undefined $len means 'until the end of the file'
+sub _downcopy {
+ my $blocksize = 8192;
+ my ($self, $data, $pos, $len) = @_;
+ my $fh = $self->{fh};
+
+ while (! defined $len || $len > 0) {
+ my $readsize = ! defined($len) ? $blocksize
+ : $len > $blocksize? $blocksize : $len;
+ $self->_seekb($pos);
+ read $fh, my($old), $readsize;
+ my $last_read_was_short = length($old) < $readsize;
+ $data .= $old;
+ my $writable;
+ if ($last_read_was_short) {
+ # If last read was short, then $data now contains the entire rest
+ # of the file, so there's no need to write only one block of it
+ $writable = $data;
+ $data = "";
+ } else {
+ $writable = substr($data, 0, $readsize, "");
+ }
+ last if $writable eq "";
+ $self->_seekb($pos);
+ $self->_write_record($writable);
+ last if $last_read_was_short && $data eq "";
+ $len -= $readsize if defined $len;
+ $pos += $readsize;
+ }
+ return $data;
+}
+
+# Adjust the object data structures following an '_mtwrite'
+# Arguments are
+# [$pos, $nrecs, @length] items
+# indicating that $nrecs records were removed at $recpos (a record offset)
+# and replaced with records of length @length...
+# Arguments guarantee that $recpos is strictly increasing.
+# No return value
+sub _oadjust {
+ my $self = shift;
+ my $delta = 0;
+ my $delta_recs = 0;
+ my $prev_end = -1;
+ my %newkeys;
+
+ for (@_) {
+ my ($pos, $nrecs, @data) = @$_;
+ $pos += $delta_recs;
+
+ # Adjust the offsets of the records after the previous batch up
+ # to the first new one of this batch
+ for my $i ($prev_end+2 .. $pos - 1) {
+ $self->{offsets}[$i] += $delta;
+ $newkey{$i} = $i + $delta_recs;
+ }
+
+ $prev_end = $pos + @data - 1; # last record moved on this pass
+
+ # Remove the offsets for the removed records;
+ # replace with the offsets for the inserted records
+ my @newoff = ($self->{offsets}[$pos] + $delta);
+ for my $i (0 .. $#data) {
+ my $newlen = length $data[$i];
+ push @newoff, $newoff[$i] + $newlen;
+ $delta += $newlen;
+ }
+
+ for my $i ($pos .. $pos+$nrecs-1) {
+ last if $i+1 > $#{$self->{offsets}};
+ my $oldlen = $self->{offsets}[$i+1] - $self->{offsets}[$i];
+ $delta -= $oldlen;
+ }
+
+# # also this data has changed, so update it in the cache
+# for (0 .. $#data) {
+# $self->{cache}->update($pos + $_, $data[$_]);
+# }
+# if ($delta_recs) {
+# my @oldkeys = grep $_ >= $pos + @data, $self->{cache}->ckeys;
+# my @newkeys = map $_ + $delta_recs, @oldkeys;
+# $self->{cache}->rekey(\@oldkeys, \@newkeys);
+# }
+
+ # replace old offsets with new
+ splice @{$self->{offsets}}, $pos, $nrecs+1, @newoff;
+ # What if we just spliced out the end of the offsets table?
+ # shouldn't we clear $self->{eof}? Test for this XXX BUG TODO
+
+ $delta_recs += @data - $nrecs; # net change in total number of records
+ }
+
+ # The trailing records at the very end of the file
+ if ($delta) {
+ for my $i ($prev_end+2 .. $#{$self->{offsets}}) {
+ $self->{offsets}[$i] += $delta;
+ }
+ }
+
+ # If we scrubbed out all known offsets, regenerate the trivial table
+ # that knows that the file does indeed start at 0.
+ $self->{offsets}[0] = 0 unless @{$self->{offsets}};
+ # If the file got longer, the offsets table is no longer complete
+ # $self->{eof} = 0 if $delta_recs > 0;
+
+ # Now there might be too much data in the cache, if we spliced out
+ # some short records and spliced in some long ones. If so, flush
+ # the cache.
+ $self->_cache_flush;
+}
+
+# If a record does not already end with the appropriate terminator
+# string, append one.
+sub _fixrecs {
+ my $self = shift;
+ for (@_) {
+ $_ = "" unless defined $_;
+ $_ .= $self->{recsep}
+ unless substr($_, - $self->{recseplen}) eq $self->{recsep};
+ }
+}
+
+
+################################################################
+#
+# Basic read, write, and seek
+#
+
+# seek to the beginning of record #$n
+# Assumes that the offsets table is already correctly populated
+#
+# Note that $n=-1 has a special meaning here: It means the start of
+# the last known record; this may or may not be the very last record
+# in the file, depending on whether the offsets table is fully populated.
+#
+sub _seek {
+ my ($self, $n) = @_;
+ my $o = $self->{offsets}[$n];
+ defined($o)
+ or confess("logic error: undefined offset for record $n");
+ seek $self->{fh}, $o, SEEK_SET
+ or confess "Couldn't seek filehandle: $!"; # "Should never happen."
+}
+
+# seek to byte $b in the file
+sub _seekb {
+ my ($self, $b) = @_;
+ seek $self->{fh}, $b, SEEK_SET
+ or die "Couldn't seek filehandle: $!"; # "Should never happen."
+}
+
+# populate the offsets table up to the beginning of record $n
+# return the offset of record $n
+sub _fill_offsets_to {
+ my ($self, $n) = @_;
+
+ return $self->{offsets}[$n] if $self->{eof};
+
+ my $fh = $self->{fh};
+ local *OFF = $self->{offsets};
+ my $rec;
+
+ until ($#OFF >= $n) {
+ $self->_seek(-1); # tricky -- see comment at _seek
+ $rec = $self->_read_record;
+ if (defined $rec) {
+ push @OFF, int(tell $fh); # Tels says that int() saves memory here
+ } else {
+ $self->{eof} = 1;
+ return; # It turns out there is no such record
+ }
+ }
+
+ # we have now read all the records up to record n-1,
+ # so we can return the offset of record n
+ $OFF[$n];
+}
+
+sub _fill_offsets {
+ my ($self) = @_;
+
+ my $fh = $self->{fh};
+ local *OFF = $self->{offsets};
+
+ $self->_seek(-1); # tricky -- see comment at _seek
+
+ # Tels says that inlining read_record() would make this loop
+ # five times faster. 20030508
+ while ( defined $self->_read_record()) {
+ # int() saves us memory here
+ push @OFF, int(tell $fh);
+ }
+
+ $self->{eof} = 1;
+ $#OFF;
+}
+
+# assumes that $rec is already suitably terminated
+sub _write_record {
+ my ($self, $rec) = @_;
+ my $fh = $self->{fh};
+ local $\ = "";
+ print $fh $rec
+ or die "Couldn't write record: $!"; # "Should never happen."
+# $self->{_written} += length($rec);
+}
+
+sub _read_record {
+ my $self = shift;
+ my $rec;
+ { local $/ = $self->{recsep};
+ my $fh = $self->{fh};
+ $rec = <$fh>;
+ }
+ return unless defined $rec;
+ if (substr($rec, -$self->{recseplen}) ne $self->{recsep}) {
+ # improperly terminated final record --- quietly fix it.
+# my $ac = substr($rec, -$self->{recseplen});
+# $ac =~ s/\n/\\n/g;
+ $self->{sawlastrec} = 1;
+ unless ($self->{rdonly}) {
+ local $\ = "";
+ my $fh = $self->{fh};
+ print $fh $self->{recsep};
+ }
+ $rec .= $self->{recsep};
+ }
+# $self->{_read} += length($rec) if defined $rec;
+ $rec;
+}
+
+sub _rw_stats {
+ my $self = shift;
+ @{$self}{'_read', '_written'};
+}
+
+################################################################
+#
+# Read cache management
+
+sub _cache_flush {
+ my ($self) = @_;
+ $self->{cache}->reduce_size_to($self->{memory} - $self->{deferred_s});
+}
+
+sub _cache_too_full {
+ my $self = shift;
+ $self->{cache}->bytes + $self->{deferred_s} >= $self->{memory};
+}
+
+################################################################
+#
+# File custodial services
+#
+
+
+# We have read to the end of the file and have the offsets table
+# entirely populated. Now we need to write a new record beyond
+# the end of the file. We prepare for this by writing
+# empty records into the file up to the position we want
+#
+# assumes that the offsets table already contains the offset of record $n,
+# if it exists, and extends to the end of the file if not.
+sub _extend_file_to {
+ my ($self, $n) = @_;
+ $self->_seek(-1); # position after the end of the last record
+ my $pos = $self->{offsets}[-1];
+
+ # the offsets table has one entry more than the total number of records
+ my $extras = $n - $#{$self->{offsets}};
+
+ # Todo : just use $self->{recsep} x $extras here?
+ while ($extras-- > 0) {
+ $self->_write_record($self->{recsep});
+ push @{$self->{offsets}}, int(tell $self->{fh});
+ }
+}
+
+# Truncate the file at the current position
+sub _chop_file {
+ my $self = shift;
+ truncate $self->{fh}, tell($self->{fh});
+}
+
+
+# compute the size of a buffer suitable for moving
+# all the data in a file forward $n bytes
+# ($n may be negative)
+# The result should be at least $n.
+sub _bufsize {
+ my $n = shift;
+ return 8192 if $n <= 0;
+ my $b = $n & ~8191;
+ $b += 8192 if $n & 8191;
+ $b;
+}
+
+################################################################
+#
+# Miscellaneous public methods
+#
+
+# Lock the file
+sub flock {
+ my ($self, $op) = @_;
+ unless (@_ <= 3) {
+ my $pack = ref $self;
+ croak "Usage: $pack\->flock([OPERATION])";
+ }
+ my $fh = $self->{fh};
+ $op = LOCK_EX unless defined $op;
+ my $locked = flock $fh, $op;
+
+ if ($locked && ($op & (LOCK_EX | LOCK_SH))) {
+ # If you're locking the file, then presumably it's because
+ # there might have been a write access by another process.
+ # In that case, the read cache contents and the offsets table
+ # might be invalid, so discard them. 20030508
+ $self->{offsets} = [0];
+ $self->{cache}->empty;
+ }
+
+ $locked;
+}
+
+# Get/set autochomp option
+sub autochomp {
+ my $self = shift;
+ if (@_) {
+ my $old = $self->{autochomp};
+ $self->{autochomp} = shift;
+ $old;
+ } else {
+ $self->{autochomp};
+ }
+}
+
+# Get offset table entries; returns offset of nth record
+sub offset {
+ my ($self, $n) = @_;
+
+ if ($#{$self->{offsets}} < $n) {
+ return if $self->{eof}; # request for record beyond the end of file
+ my $o = $self->_fill_offsets_to($n);
+ # If it's still undefined, there is no such record, so return 'undef'
+ return unless defined $o;
+ }
+
+ $self->{offsets}[$n];
+}
+
+sub discard_offsets {
+ my $self = shift;
+ $self->{offsets} = [0];
+}
+
+################################################################
+#
+# Matters related to deferred writing
+#
+
+# Defer writes
+sub defer {
+ my $self = shift;
+ $self->_stop_autodeferring;
+ @{$self->{ad_history}} = ();
+ $self->{defer} = 1;
+}
+
+# Flush deferred writes
+#
+# This could be better optimized to write the file in one pass, instead
+# of one pass per block of records. But that will require modifications
+# to _twrite, so I should have a good _twrite test suite first.
+sub flush {
+ my $self = shift;
+
+ $self->_flush;
+ $self->{defer} = 0;
+}
+
+sub _old_flush {
+ my $self = shift;
+ my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
+
+ while (@writable) {
+ # gather all consecutive records from the front of @writable
+ my $first_rec = shift @writable;
+ my $last_rec = $first_rec+1;
+ ++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
+ --$last_rec;
+ $self->_fill_offsets_to($last_rec);
+ $self->_extend_file_to($last_rec);
+ $self->_splice($first_rec, $last_rec-$first_rec+1,
+ @{$self->{deferred}}{$first_rec .. $last_rec});
+ }
+
+ $self->_discard; # clear out defered-write-cache
+}
+
+sub _flush {
+ my $self = shift;
+ my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
+ my @args;
+ my @adjust;
+
+ while (@writable) {
+ # gather all consecutive records from the front of @writable
+ my $first_rec = shift @writable;
+ my $last_rec = $first_rec+1;
+ ++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
+ --$last_rec;
+ my $end = $self->_fill_offsets_to($last_rec+1);
+ if (not defined $end) {
+ $self->_extend_file_to($last_rec);
+ $end = $self->{offsets}[$last_rec];
+ }
+ my ($start) = $self->{offsets}[$first_rec];
+ push @args,
+ join("", @{$self->{deferred}}{$first_rec .. $last_rec}), # data
+ $start, # position
+ $end-$start; # length
+ push @adjust, [$first_rec, # starting at this position...
+ $last_rec-$first_rec+1, # this many records...
+ # are replaced with these...
+ @{$self->{deferred}}{$first_rec .. $last_rec},
+ ];
+ }
+
+ $self->_mtwrite(@args); # write multiple record groups
+ $self->_discard; # clear out defered-write-cache
+ $self->_oadjust(@adjust);
+}
+
+# Discard deferred writes and disable future deferred writes
+sub discard {
+ my $self = shift;
+ $self->_discard;
+ $self->{defer} = 0;
+}
+
+# Discard deferred writes, but retain old deferred writing mode
+sub _discard {
+ my $self = shift;
+ %{$self->{deferred}} = ();
+ $self->{deferred_s} = 0;
+ $self->{deferred_max} = -1;
+ $self->{cache}->set_limit($self->{memory});
+}
+
+# Deferred writing is enabled, either explicitly ($self->{defer})
+# or automatically ($self->{autodeferring})
+sub _is_deferring {
+ my $self = shift;
+ $self->{defer} || $self->{autodeferring};
+}
+
+# The largest record number of any deferred record
+sub _defer_max {
+ my $self = shift;
+ return $self->{deferred_max} if defined $self->{deferred_max};
+ my $max = -1;
+ for my $key (keys %{$self->{deferred}}) {
+ $max = $key if $key > $max;
+ }
+ $self->{deferred_max} = $max;
+ $max;
+}
+
+################################################################
+#
+# Matters related to autodeferment
+#
+
+# Get/set autodefer option
+sub autodefer {
+ my $self = shift;
+ if (@_) {
+ my $old = $self->{autodefer};
+ $self->{autodefer} = shift;
+ if ($old) {
+ $self->_stop_autodeferring;
+ @{$self->{ad_history}} = ();
+ }
+ $old;
+ } else {
+ $self->{autodefer};
+ }
+}
+
+# The user is trying to store record #$n Record that in the history,
+# and then enable (or disable) autodeferment if that seems useful.
+# Note that it's OK for $n to be a non-number, as long as the function
+# is prepared to deal with that. Nobody else looks at the ad_history.
+#
+# Now, what does the ad_history mean, and what is this function doing?
+# Essentially, the idea is to enable autodeferring when we see that the
+# user has made three consecutive STORE calls to three consecutive records.
+# ("Three" is actually ->{autodefer_threshhold}.)
+# A STORE call for record #$n inserts $n into the autodefer history,
+# and if the history contains three consecutive records, we enable
+# autodeferment. An ad_history of [X, Y] means that the most recent
+# STOREs were for records X, X+1, ..., Y, in that order.
+#
+# Inserting a nonconsecutive number erases the history and starts over.
+#
+# Performing a special operation like SPLICE erases the history.
+#
+# There's one special case: CLEAR means that CLEAR was just called.
+# In this case, we prime the history with [-2, -1] so that if the next
+# write is for record 0, autodeferring goes on immediately. This is for
+# the common special case of "@a = (...)".
+#
+sub _annotate_ad_history {
+ my ($self, $n) = @_;
+ return unless $self->{autodefer}; # feature is disabled
+ return if $self->{defer}; # already in explicit defer mode
+ return unless $self->{offsets}[-1] >= $self->{autodefer_filelen_threshhold};
+
+ local *H = $self->{ad_history};
+ if ($n eq 'CLEAR') {
+ @H = (-2, -1); # prime the history with fake records
+ $self->_stop_autodeferring;
+ } elsif ($n =~ /^\d+$/) {
+ if (@H == 0) {
+ @H = ($n, $n);
+ } else { # @H == 2
+ if ($H[1] == $n-1) { # another consecutive record
+ $H[1]++;
+ if ($H[1] - $H[0] + 1 >= $self->{autodefer_threshhold}) {
+ $self->{autodeferring} = 1;
+ }
+ } else { # nonconsecutive- erase and start over
+ @H = ($n, $n);
+ $self->_stop_autodeferring;
+ }
+ }
+ } else { # SPLICE or STORESIZE or some such
+ @H = ();
+ $self->_stop_autodeferring;
+ }
+}
+
+# If autodeferring was enabled, cut it out and discard the history
+sub _stop_autodeferring {
+ my $self = shift;
+ if ($self->{autodeferring}) {
+ $self->_flush;
+ }
+ $self->{autodeferring} = 0;
+}
+
+################################################################
+
+
+# This is NOT a method. It is here for two reasons:
+# 1. To factor a fairly complicated block out of the constructor
+# 2. To provide access for the test suite, which need to be sure
+# files are being written properly.
+sub _default_recsep {
+ my $recsep = $/;
+ if ($^O eq 'MSWin32') { # Dos too?
+ # Windows users expect files to be terminated with \r\n
+ # But $/ is set to \n instead
+ # Note that this also transforms \n\n into \r\n\r\n.
+ # That is a feature.
+ $recsep =~ s/\n/\r\n/g;
+ }
+ $recsep;
+}
+
+# Utility function for _check_integrity
+sub _ci_warn {
+ my $msg = shift;
+ $msg =~ s/\n/\\n/g;
+ $msg =~ s/\r/\\r/g;
+ print "# $msg\n";
+}
+
+# Given a file, make sure the cache is consistent with the
+# file contents and the internal data structures are consistent with
+# each other. Returns true if everything checks out, false if not
+#
+# The $file argument is no longer used. It is retained for compatibility
+# with the existing test suite.
+sub _check_integrity {
+ my ($self, $file, $warn) = @_;
+ my $rsl = $self->{recseplen};
+ my $rs = $self->{recsep};
+ my $good = 1;
+ local *_; # local $_ does not work here
+ local $DIAGNOSTIC = 1;
+
+ if (not defined $rs) {
+ _ci_warn("recsep is undef!");
+ $good = 0;
+ } elsif ($rs eq "") {
+ _ci_warn("recsep is empty!");
+ $good = 0;
+ } elsif ($rsl != length $rs) {
+ my $ln = length $rs;
+ _ci_warn("recsep <$rs> has length $ln, should be $rsl");
+ $good = 0;
+ }
+
+ if (not defined $self->{offsets}[0]) {
+ _ci_warn("offset 0 is missing!");
+ $good = 0;
+
+ } elsif ($self->{offsets}[0] != 0) {
+ _ci_warn("rec 0: offset <$self->{offsets}[0]> s/b 0!");
+ $good = 0;
+ }
+
+ my $cached = 0;
+ {
+ local *F = $self->{fh};
+ seek F, 0, SEEK_SET;
+ local $. = 0;
+ local $/ = $rs;
+
+ while (<F>) {
+ my $n = $. - 1;
+ my $cached = $self->{cache}->_produce($n);
+ my $offset = $self->{offsets}[$.];
+ my $ao = tell F;
+ if (defined $offset && $offset != $ao) {
+ _ci_warn("rec $n: offset <$offset> actual <$ao>");
+ $good = 0;
+ }
+ if (defined $cached && $_ ne $cached && ! $self->{deferred}{$n}) {
+ $good = 0;
+ _ci_warn("rec $n: cached <$cached> actual <$_>");
+ }
+ if (defined $cached && substr($cached, -$rsl) ne $rs) {
+ $good = 0;
+ _ci_warn("rec $n in the cache is missing the record separator");
+ }
+ if (! defined $offset && $self->{eof}) {
+ $good = 0;
+ _ci_warn("The offset table was marked complete, but it is missing element $.");
+ }
+ }
+ if (@{$self->{offsets}} > $.+1) {
+ $good = 0;
+ my $n = @{$self->{offsets}};
+ _ci_warn("The offset table has $n items, but the file has only $.");
+ }
+
+ my $deferring = $self->_is_deferring;
+ for my $n ($self->{cache}->ckeys) {
+ my $r = $self->{cache}->_produce($n);
+ $cached += length($r);
+ next if $n+1 <= $.; # checked this already
+ _ci_warn("spurious caching of record $n");
+ $good = 0;
+ }
+ my $b = $self->{cache}->bytes;
+ if ($cached != $b) {
+ _ci_warn("cache size is $b, should be $cached");
+ $good = 0;
+ }
+ }
+
+ # That cache has its own set of tests
+ $good = 0 unless $self->{cache}->_check_integrity;
+
+ # Now let's check the deferbuffer
+ # Unless deferred writing is enabled, it should be empty
+ if (! $self->_is_deferring && %{$self->{deferred}}) {
+ _ci_warn("deferred writing disabled, but deferbuffer nonempty");
+ $good = 0;
+ }
+
+ # Any record in the deferbuffer should *not* be present in the readcache
+ my $deferred_s = 0;
+ while (my ($n, $r) = each %{$self->{deferred}}) {
+ $deferred_s += length($r);
+ if (defined $self->{cache}->_produce($n)) {
+ _ci_warn("record $n is in the deferbuffer *and* the readcache");
+ $good = 0;
+ }
+ if (substr($r, -$rsl) ne $rs) {
+ _ci_warn("rec $n in the deferbuffer is missing the record separator");
+ $good = 0;
+ }
+ }
+
+ # Total size of deferbuffer should match internal total
+ if ($deferred_s != $self->{deferred_s}) {
+ _ci_warn("buffer size is $self->{deferred_s}, should be $deferred_s");
+ $good = 0;
+ }
+
+ # Total size of deferbuffer should not exceed the specified limit
+ if ($deferred_s > $self->{dw_size}) {
+ _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit of $self->{dw_size}");
+ $good = 0;
+ }
+
+ # Total size of cached data should not exceed the specified limit
+ if ($deferred_s + $cached > $self->{memory}) {
+ my $total = $deferred_s + $cached;
+ _ci_warn("total stored data size is $total which exceeds the limit of $self->{memory}");
+ $good = 0;
+ }
+
+ # Stuff related to autodeferment
+ if (!$self->{autodefer} && @{$self->{ad_history}}) {
+ _ci_warn("autodefer is disabled, but ad_history is nonempty");
+ $good = 0;
+ }
+ if ($self->{autodeferring} && $self->{defer}) {
+ _ci_warn("both autodeferring and explicit deferring are active");
+ $good = 0;
+ }
+ if (@{$self->{ad_history}} == 0) {
+ # That's OK, no additional tests required
+ } elsif (@{$self->{ad_history}} == 2) {
+ my @non_number = grep !/^-?\d+$/, @{$self->{ad_history}};
+ if (@non_number) {
+ my $msg;
+ { local $" = ')(';
+ $msg = "ad_history contains non-numbers (@{$self->{ad_history}})";
+ }
+ _ci_warn($msg);
+ $good = 0;
+ } elsif ($self->{ad_history}[1] < $self->{ad_history}[0]) {
+ _ci_warn("ad_history has nonsensical values @{$self->{ad_history}}");
+ $good = 0;
+ }
+ } else {
+ _ci_warn("ad_history has bad length <@{$self->{ad_history}}>");
+ $good = 0;
+ }
+
+ $good;
+}
+
+################################################################
+#
+# Tie::File::Cache
+#
+# Read cache
+
+package Tie::File::Cache;
+$Tie::File::Cache::VERSION = $Tie::File::VERSION;
+use Carp ':DEFAULT', 'confess';
+
+sub HEAP () { 0 }
+sub HASH () { 1 }
+sub MAX () { 2 }
+sub BYTES() { 3 }
+#sub STAT () { 4 } # Array with request statistics for each record
+#sub MISS () { 5 } # Total number of cache misses
+#sub REQ () { 6 } # Total number of cache requests
+use strict 'vars';
+
+sub new {
+ my ($pack, $max) = @_;
+ local *_;
+ croak "missing argument to ->new" unless defined $max;
+ my $self = [];
+ bless $self => $pack;
+ @$self = (Tie::File::Heap->new($self), {}, $max, 0);
+ $self;
+}
+
+sub adj_limit {
+ my ($self, $n) = @_;
+ $self->[MAX] += $n;
+}
+
+sub set_limit {
+ my ($self, $n) = @_;
+ $self->[MAX] = $n;
+}
+
+# For internal use only
+# Will be called by the heap structure to notify us that a certain
+# piece of data has moved from one heap element to another.
+# $k is the hash key of the item
+# $n is the new index into the heap at which it is stored
+# If $n is undefined, the item has been removed from the heap.
+sub _heap_move {
+ my ($self, $k, $n) = @_;
+ if (defined $n) {
+ $self->[HASH]{$k} = $n;
+ } else {
+ delete $self->[HASH]{$k};
+ }
+}
+
+sub insert {
+ my ($self, $key, $val) = @_;
+ local *_;
+ croak "missing argument to ->insert" unless defined $key;
+ unless (defined $self->[MAX]) {
+ confess "undefined max" ;
+ }
+ confess "undefined val" unless defined $val;
+ return if length($val) > $self->[MAX];
+
+# if ($self->[STAT]) {
+# $self->[STAT][$key] = 1;
+# return;
+# }
+
+ my $oldnode = $self->[HASH]{$key};
+ if (defined $oldnode) {
+ my $oldval = $self->[HEAP]->set_val($oldnode, $val);
+ $self->[BYTES] -= length($oldval);
+ } else {
+ $self->[HEAP]->insert($key, $val);
+ }
+ $self->[BYTES] += length($val);
+ $self->flush if $self->[BYTES] > $self->[MAX];
+}
+
+sub expire {
+ my $self = shift;
+ my $old_data = $self->[HEAP]->popheap;
+ return unless defined $old_data;
+ $self->[BYTES] -= length $old_data;
+ $old_data;
+}
+
+sub remove {
+ my ($self, @keys) = @_;
+ my @result;
+
+# if ($self->[STAT]) {
+# for my $key (@keys) {
+# $self->[STAT][$key] = 0;
+# }
+# return;
+# }
+
+ for my $key (@keys) {
+ next unless exists $self->[HASH]{$key};
+ my $old_data = $self->[HEAP]->remove($self->[HASH]{$key});
+ $self->[BYTES] -= length $old_data;
+ push @result, $old_data;
+ }
+ @result;
+}
+
+sub lookup {
+ my ($self, $key) = @_;
+ local *_;
+ croak "missing argument to ->lookup" unless defined $key;
+
+# if ($self->[STAT]) {
+# $self->[MISS]++ if $self->[STAT][$key]++ == 0;
+# $self->[REQ]++;
+# my $hit_rate = 1 - $self->[MISS] / $self->[REQ];
+# # Do some testing to determine this threshhold
+# $#$self = STAT - 1 if $hit_rate > 0.20;
+# }
+
+ if (exists $self->[HASH]{$key}) {
+ $self->[HEAP]->lookup($self->[HASH]{$key});
+ } else {
+ return;
+ }
+}
+
+# For internal use only
+sub _produce {
+ my ($self, $key) = @_;
+ my $loc = $self->[HASH]{$key};
+ return unless defined $loc;
+ $self->[HEAP][$loc][2];
+}
+
+# For internal use only
+sub _promote {
+ my ($self, $key) = @_;
+ $self->[HEAP]->promote($self->[HASH]{$key});
+}
+
+sub empty {
+ my ($self) = @_;
+ %{$self->[HASH]} = ();
+ $self->[BYTES] = 0;
+ $self->[HEAP]->empty;
+# @{$self->[STAT]} = ();
+# $self->[MISS] = 0;
+# $self->[REQ] = 0;
+}
+
+sub is_empty {
+ my ($self) = @_;
+ keys %{$self->[HASH]} == 0;
+}
+
+sub update {
+ my ($self, $key, $val) = @_;
+ local *_;
+ croak "missing argument to ->update" unless defined $key;
+ if (length($val) > $self->[MAX]) {
+ my ($oldval) = $self->remove($key);
+ $self->[BYTES] -= length($oldval) if defined $oldval;
+ } elsif (exists $self->[HASH]{$key}) {
+ my $oldval = $self->[HEAP]->set_val($self->[HASH]{$key}, $val);
+ $self->[BYTES] += length($val);
+ $self->[BYTES] -= length($oldval) if defined $oldval;
+ } else {
+ $self->[HEAP]->insert($key, $val);
+ $self->[BYTES] += length($val);
+ }
+ $self->flush;
+}
+
+sub rekey {
+ my ($self, $okeys, $nkeys) = @_;
+ local *_;
+ my %map;
+ @map{@$okeys} = @$nkeys;
+ croak "missing argument to ->rekey" unless defined $nkeys;
+ croak "length mismatch in ->rekey arguments" unless @$nkeys == @$okeys;
+ my %adjusted; # map new keys to heap indices
+ # You should be able to cut this to one loop TODO XXX
+ for (0 .. $#$okeys) {
+ $adjusted{$nkeys->[$_]} = delete $self->[HASH]{$okeys->[$_]};
+ }
+ while (my ($nk, $ix) = each %adjusted) {
+ # @{$self->[HASH]}{keys %adjusted} = values %adjusted;
+ $self->[HEAP]->rekey($ix, $nk);
+ $self->[HASH]{$nk} = $ix;
+ }
+}
+
+sub ckeys {
+ my $self = shift;
+ my @a = keys %{$self->[HASH]};
+ @a;
+}
+
+# Return total amount of cached data
+sub bytes {
+ my $self = shift;
+ $self->[BYTES];
+}
+
+# Expire oldest item from cache until cache size is smaller than $max
+sub reduce_size_to {
+ my ($self, $max) = @_;
+ until ($self->[BYTES] <= $max) {
+ # Note that Tie::File::Cache::expire has been inlined here
+ my $old_data = $self->[HEAP]->popheap;
+ return unless defined $old_data;
+ $self->[BYTES] -= length $old_data;
+ }
+}
+
+# Why not just $self->reduce_size_to($self->[MAX])?
+# Try this when things stabilize TODO XXX
+# If the cache is too full, expire the oldest records
+sub flush {
+ my $self = shift;
+ $self->reduce_size_to($self->[MAX]) if $self->[BYTES] > $self->[MAX];
+}
+
+# For internal use only
+sub _produce_lru {
+ my $self = shift;
+ $self->[HEAP]->expire_order;
+}
+
+BEGIN { *_ci_warn = \&Tie::File::_ci_warn }
+
+sub _check_integrity { # For CACHE
+ my $self = shift;
+ my $good = 1;
+
+ # Test HEAP
+ $self->[HEAP]->_check_integrity or $good = 0;
+
+ # Test HASH
+ my $bytes = 0;
+ for my $k (keys %{$self->[HASH]}) {
+ if ($k ne '0' && $k !~ /^[1-9][0-9]*$/) {
+ $good = 0;
+ _ci_warn "Cache hash key <$k> is non-numeric";
+ }
+
+ my $h = $self->[HASH]{$k};
+ if (! defined $h) {
+ $good = 0;
+ _ci_warn "Heap index number for key $k is undefined";
+ } elsif ($h == 0) {
+ $good = 0;
+ _ci_warn "Heap index number for key $k is zero";
+ } else {
+ my $j = $self->[HEAP][$h];
+ if (! defined $j) {
+ $good = 0;
+ _ci_warn "Heap contents key $k (=> $h) are undefined";
+ } else {
+ $bytes += length($j->[2]);
+ if ($k ne $j->[1]) {
+ $good = 0;
+ _ci_warn "Heap contents key $k (=> $h) is $j->[1], should be $k";
+ }
+ }
+ }
+ }
+
+ # Test BYTES
+ if ($bytes != $self->[BYTES]) {
+ $good = 0;
+ _ci_warn "Total data in cache is $bytes, expected $self->[BYTES]";
+ }
+
+ # Test MAX
+ if ($bytes > $self->[MAX]) {
+ $good = 0;
+ _ci_warn "Total data in cache is $bytes, exceeds maximum $self->[MAX]";
+ }
+
+ return $good;
+}
+
+sub delink {
+ my $self = shift;
+ $self->[HEAP] = undef; # Bye bye heap
+}
+
+################################################################
+#
+# Tie::File::Heap
+#
+# Heap data structure for use by cache LRU routines
+
+package Tie::File::Heap;
+use Carp ':DEFAULT', 'confess';
+$Tie::File::Heap::VERSION = $Tie::File::Cache::VERSION;
+sub SEQ () { 0 };
+sub KEY () { 1 };
+sub DAT () { 2 };
+
+sub new {
+ my ($pack, $cache) = @_;
+ die "$pack: Parent cache object $cache does not support _heap_move method"
+ unless eval { $cache->can('_heap_move') };
+ my $self = [[0,$cache,0]];
+ bless $self => $pack;
+}
+
+# Allocate a new sequence number, larger than all previously allocated numbers
+sub _nseq {
+ my $self = shift;
+ $self->[0][0]++;
+}
+
+sub _cache {
+ my $self = shift;
+ $self->[0][1];
+}
+
+sub _nelts {
+ my $self = shift;
+ $self->[0][2];
+}
+
+sub _nelts_inc {
+ my $self = shift;
+ ++$self->[0][2];
+}
+
+sub _nelts_dec {
+ my $self = shift;
+ --$self->[0][2];
+}
+
+sub is_empty {
+ my $self = shift;
+ $self->_nelts == 0;
+}
+
+sub empty {
+ my $self = shift;
+ $#$self = 0;
+ $self->[0][2] = 0;
+ $self->[0][0] = 0; # might as well reset the sequence numbers
+}
+
+# notify the parent cache object that we moved something
+sub _heap_move {
+ my $self = shift;
+ $self->_cache->_heap_move(@_);
+}
+
+# Insert a piece of data into the heap with the indicated sequence number.
+# The item with the smallest sequence number is always at the top.
+# If no sequence number is specified, allocate a new one and insert the
+# item at the bottom.
+sub insert {
+ my ($self, $key, $data, $seq) = @_;
+ $seq = $self->_nseq unless defined $seq;
+ $self->_insert_new([$seq, $key, $data]);
+}
+
+# Insert a new, fresh item at the bottom of the heap
+sub _insert_new {
+ my ($self, $item) = @_;
+ my $i = @$self;
+ $i = int($i/2) until defined $self->[$i/2];
+ $self->[$i] = $item;
+ $self->[0][1]->_heap_move($self->[$i][KEY], $i);
+ $self->_nelts_inc;
+}
+
+# Insert [$data, $seq] pair at or below item $i in the heap.
+# If $i is omitted, default to 1 (the top element.)
+sub _insert {
+ my ($self, $item, $i) = @_;
+# $self->_check_loc($i) if defined $i;
+ $i = 1 unless defined $i;
+ until (! defined $self->[$i]) {
+ if ($self->[$i][SEQ] > $item->[SEQ]) { # inserted item is older
+ ($self->[$i], $item) = ($item, $self->[$i]);
+ $self->[0][1]->_heap_move($self->[$i][KEY], $i);
+ }
+ # If either is undefined, go that way. Otherwise, choose at random
+ my $dir;
+ $dir = 0 if !defined $self->[2*$i];
+ $dir = 1 if !defined $self->[2*$i+1];
+ $dir = int(rand(2)) unless defined $dir;
+ $i = 2*$i + $dir;
+ }
+ $self->[$i] = $item;
+ $self->[0][1]->_heap_move($self->[$i][KEY], $i);
+ $self->_nelts_inc;
+}
+
+# Remove the item at node $i from the heap, moving child items upwards.
+# The item with the smallest sequence number is always at the top.
+# Moving items upwards maintains this condition.
+# Return the removed item. Return undef if there was no item at node $i.
+sub remove {
+ my ($self, $i) = @_;
+ $i = 1 unless defined $i;
+ my $top = $self->[$i];
+ return unless defined $top;
+ while (1) {
+ my $ii;
+ my ($L, $R) = (2*$i, 2*$i+1);
+
+ # If either is undefined, go the other way.
+ # Otherwise, go towards the smallest.
+ last unless defined $self->[$L] || defined $self->[$R];
+ $ii = $R if not defined $self->[$L];
+ $ii = $L if not defined $self->[$R];
+ unless (defined $ii) {
+ $ii = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R;
+ }
+
+ $self->[$i] = $self->[$ii]; # Promote child to fill vacated spot
+ $self->[0][1]->_heap_move($self->[$i][KEY], $i);
+ $i = $ii; # Fill new vacated spot
+ }
+ $self->[0][1]->_heap_move($top->[KEY], undef);
+ undef $self->[$i];
+ $self->_nelts_dec;
+ return $top->[DAT];
+}
+
+sub popheap {
+ my $self = shift;
+ $self->remove(1);
+}
+
+# set the sequence number of the indicated item to a higher number
+# than any other item in the heap, and bubble the item down to the
+# bottom.
+sub promote {
+ my ($self, $n) = @_;
+# $self->_check_loc($n);
+ $self->[$n][SEQ] = $self->_nseq;
+ my $i = $n;
+ while (1) {
+ my ($L, $R) = (2*$i, 2*$i+1);
+ my $dir;
+ last unless defined $self->[$L] || defined $self->[$R];
+ $dir = $R unless defined $self->[$L];
+ $dir = $L unless defined $self->[$R];
+ unless (defined $dir) {
+ $dir = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R;
+ }
+ @{$self}[$i, $dir] = @{$self}[$dir, $i];
+ for ($i, $dir) {
+ $self->[0][1]->_heap_move($self->[$_][KEY], $_) if defined $self->[$_];
+ }
+ $i = $dir;
+ }
+}
+
+# Return item $n from the heap, promoting its LRU status
+sub lookup {
+ my ($self, $n) = @_;
+# $self->_check_loc($n);
+ my $val = $self->[$n];
+ $self->promote($n);
+ $val->[DAT];
+}
+
+
+# Assign a new value for node $n, promoting it to the bottom of the heap
+sub set_val {
+ my ($self, $n, $val) = @_;
+# $self->_check_loc($n);
+ my $oval = $self->[$n][DAT];
+ $self->[$n][DAT] = $val;
+ $self->promote($n);
+ return $oval;
+}
+
+# The hask key has changed for an item;
+# alter the heap's record of the hash key
+sub rekey {
+ my ($self, $n, $new_key) = @_;
+# $self->_check_loc($n);
+ $self->[$n][KEY] = $new_key;
+}
+
+sub _check_loc {
+ my ($self, $n) = @_;
+ unless (1 || defined $self->[$n]) {
+ confess "_check_loc($n) failed";
+ }
+}
+
+BEGIN { *_ci_warn = \&Tie::File::_ci_warn }
+
+sub _check_integrity {
+ my $self = shift;
+ my $good = 1;
+ my %seq;
+
+ unless (eval {$self->[0][1]->isa("Tie::File::Cache")}) {
+ _ci_warn "Element 0 of heap corrupt";
+ $good = 0;
+ }
+ $good = 0 unless $self->_satisfies_heap_condition(1);
+ for my $i (2 .. $#{$self}) {
+ my $p = int($i/2); # index of parent node
+ if (defined $self->[$i] && ! defined $self->[$p]) {
+ _ci_warn "Element $i of heap defined, but parent $p isn't";
+ $good = 0;
+ }
+
+ if (defined $self->[$i]) {
+ if ($seq{$self->[$i][SEQ]}) {
+ my $seq = $self->[$i][SEQ];
+ _ci_warn "Nodes $i and $seq{$seq} both have SEQ=$seq";
+ $good = 0;
+ } else {
+ $seq{$self->[$i][SEQ]} = $i;
+ }
+ }
+ }
+
+ return $good;
+}
+
+sub _satisfies_heap_condition {
+ my $self = shift;
+ my $n = shift || 1;
+ my $good = 1;
+ for (0, 1) {
+ my $c = $n*2 + $_;
+ next unless defined $self->[$c];
+ if ($self->[$n][SEQ] >= $self->[$c]) {
+ _ci_warn "Node $n of heap does not predate node $c";
+ $good = 0 ;
+ }
+ $good = 0 unless $self->_satisfies_heap_condition($c);
+ }
+ return $good;
+}
+
+# Return a list of all the values, sorted by expiration order
+sub expire_order {
+ my $self = shift;
+ my @nodes = sort {$a->[SEQ] <=> $b->[SEQ]} $self->_nodes;
+ map { $_->[KEY] } @nodes;
+}
+
+sub _nodes {
+ my $self = shift;
+ my $i = shift || 1;
+ return unless defined $self->[$i];
+ ($self->[$i], $self->_nodes($i*2), $self->_nodes($i*2+1));
+}
+
+"Cogito, ergo sum."; # don't forget to return a true value from the file
+
+__END__
+
+=head1 NAME
+
+Tie::File - Access the lines of a disk file via a Perl array
+
+=head1 SYNOPSIS
+
+ # This file documents Tie::File version 0.97
+ use Tie::File;
+
+ tie @array, 'Tie::File', filename or die ...;
+
+ $array[13] = 'blah'; # line 13 of the file is now 'blah'
+ print $array[42]; # display line 42 of the file
+
+ $n_recs = @array; # how many records are in the file?
+ $#array -= 2; # chop two records off the end
+
+
+ for (@array) {
+ s/PERL/Perl/g; # Replace PERL with Perl everywhere in the file
+ }
+
+ # These are just like regular push, pop, unshift, shift, and splice
+ # Except that they modify the file in the way you would expect
+
+ push @array, new recs...;
+ my $r1 = pop @array;
+ unshift @array, new recs...;
+ my $r2 = shift @array;
+ @old_recs = splice @array, 3, 7, new recs...;
+
+ untie @array; # all finished
+
+
+=head1 DESCRIPTION
+
+C<Tie::File> represents a regular text file as a Perl array. Each
+element in the array corresponds to a record in the file. The first
+line of the file is element 0 of the array; the second line is element
+1, and so on.
+
+The file is I<not> loaded into memory, so this will work even for
+gigantic files.
+
+Changes to the array are reflected in the file immediately.
+
+Lazy people and beginners may now stop reading the manual.
+
+=head2 C<recsep>
+
+What is a 'record'? By default, the meaning is the same as for the
+C<E<lt>...E<gt>> operator: It's a string terminated by C<$/>, which is
+probably C<"\n">. (Minor exception: on DOS and Win32 systems, a
+'record' is a string terminated by C<"\r\n">.) You may change the
+definition of "record" by supplying the C<recsep> option in the C<tie>
+call:
+
+ tie @array, 'Tie::File', $file, recsep => 'es';
+
+This says that records are delimited by the string C<es>. If the file
+contained the following data:
+
+ Curse these pesky flies!\n
+
+then the C<@array> would appear to have four elements:
+
+ "Curse th"
+ "e p"
+ "ky fli"
+ "!\n"
+
+An undefined value is not permitted as a record separator. Perl's
+special "paragraph mode" semantics (E<agrave> la C<$/ = "">) are not
+emulated.
+
+Records read from the tied array do not have the record separator
+string on the end; this is to allow
+
+ $array[17] .= "extra";
+
+to work as expected.
+
+(See L<"autochomp">, below.) Records stored into the array will have
+the record separator string appended before they are written to the
+file, if they don't have one already. For example, if the record
+separator string is C<"\n">, then the following two lines do exactly
+the same thing:
+
+ $array[17] = "Cherry pie";
+ $array[17] = "Cherry pie\n";
+
+The result is that the contents of line 17 of the file will be
+replaced with "Cherry pie"; a newline character will separate line 17
+from line 18. This means that this code will do nothing:
+
+ chomp $array[17];
+
+Because the C<chomp>ed value will have the separator reattached when
+it is written back to the file. There is no way to create a file
+whose trailing record separator string is missing.
+
+Inserting records that I<contain> the record separator string is not
+supported by this module. It will probably produce a reasonable
+result, but what this result will be may change in a future version.
+Use 'splice' to insert records or to replace one record with several.
+
+=head2 C<autochomp>
+
+Normally, array elements have the record separator removed, so that if
+the file contains the text
+
+ Gold
+ Frankincense
+ Myrrh
+
+the tied array will appear to contain C<("Gold", "Frankincense",
+"Myrrh")>. If you set C<autochomp> to a false value, the record
+separator will not be removed. If the file above was tied with
+
+ tie @gifts, "Tie::File", $gifts, autochomp => 0;
+
+then the array C<@gifts> would appear to contain C<("Gold\n",
+"Frankincense\n", "Myrrh\n")>, or (on Win32 systems) C<("Gold\r\n",
+"Frankincense\r\n", "Myrrh\r\n")>.
+
+=head2 C<mode>
+
+Normally, the specified file will be opened for read and write access,
+and will be created if it does not exist. (That is, the flags
+C<O_RDWR | O_CREAT> are supplied in the C<open> call.) If you want to
+change this, you may supply alternative flags in the C<mode> option.
+See L<Fcntl> for a listing of available flags.
+For example:
+
+ # open the file if it exists, but fail if it does not exist
+ use Fcntl 'O_RDWR';
+ tie @array, 'Tie::File', $file, mode => O_RDWR;
+
+ # create the file if it does not exist
+ use Fcntl 'O_RDWR', 'O_CREAT';
+ tie @array, 'Tie::File', $file, mode => O_RDWR | O_CREAT;
+
+ # open an existing file in read-only mode
+ use Fcntl 'O_RDONLY';
+ tie @array, 'Tie::File', $file, mode => O_RDONLY;
+
+Opening the data file in write-only or append mode is not supported.
+
+=head2 C<memory>
+
+This is an upper limit on the amount of memory that C<Tie::File> will
+consume at any time while managing the file. This is used for two
+things: managing the I<read cache> and managing the I<deferred write
+buffer>.
+
+Records read in from the file are cached, to avoid having to re-read
+them repeatedly. If you read the same record twice, the first time it
+will be stored in memory, and the second time it will be fetched from
+the I<read cache>. The amount of data in the read cache will not
+exceed the value you specified for C<memory>. If C<Tie::File> wants
+to cache a new record, but the read cache is full, it will make room
+by expiring the least-recently visited records from the read cache.
+
+The default memory limit is 2Mib. You can adjust the maximum read
+cache size by supplying the C<memory> option. The argument is the
+desired cache size, in bytes.
+
+ # I have a lot of memory, so use a large cache to speed up access
+ tie @array, 'Tie::File', $file, memory => 20_000_000;
+
+Setting the memory limit to 0 will inhibit caching; records will be
+fetched from disk every time you examine them.
+
+The C<memory> value is not an absolute or exact limit on the memory
+used. C<Tie::File> objects contains some structures besides the read
+cache and the deferred write buffer, whose sizes are not charged
+against C<memory>.
+
+The cache itself consumes about 310 bytes per cached record, so if
+your file has many short records, you may want to decrease the cache
+memory limit, or else the cache overhead may exceed the size of the
+cached data.
+
+
+=head2 C<dw_size>
+
+(This is an advanced feature. Skip this section on first reading.)
+
+If you use deferred writing (See L<"Deferred Writing">, below) then
+data you write into the array will not be written directly to the
+file; instead, it will be saved in the I<deferred write buffer> to be
+written out later. Data in the deferred write buffer is also charged
+against the memory limit you set with the C<memory> option.
+
+You may set the C<dw_size> option to limit the amount of data that can
+be saved in the deferred write buffer. This limit may not exceed the
+total memory limit. For example, if you set C<dw_size> to 1000 and
+C<memory> to 2500, that means that no more than 1000 bytes of deferred
+writes will be saved up. The space available for the read cache will
+vary, but it will always be at least 1500 bytes (if the deferred write
+buffer is full) and it could grow as large as 2500 bytes (if the
+deferred write buffer is empty.)
+
+If you don't specify a C<dw_size>, it defaults to the entire memory
+limit.
+
+=head2 Option Format
+
+C<-mode> is a synonym for C<mode>. C<-recsep> is a synonym for
+C<recsep>. C<-memory> is a synonym for C<memory>. You get the
+idea.
+
+=head1 Public Methods
+
+The C<tie> call returns an object, say C<$o>. You may call
+
+ $rec = $o->FETCH($n);
+ $o->STORE($n, $rec);
+
+to fetch or store the record at line C<$n>, respectively; similarly
+the other tied array methods. (See L<perltie> for details.) You may
+also call the following methods on this object:
+
+=head2 C<flock>
+
+ $o->flock(MODE)
+
+will lock the tied file. C<MODE> has the same meaning as the second
+argument to the Perl built-in C<flock> function; for example
+C<LOCK_SH> or C<LOCK_EX | LOCK_NB>. (These constants are provided by
+the C<use Fcntl ':flock'> declaration.)
+
+C<MODE> is optional; the default is C<LOCK_EX>.
+
+C<Tie::File> maintains an internal table of the byte offset of each
+record it has seen in the file.
+
+When you use C<flock> to lock the file, C<Tie::File> assumes that the
+read cache is no longer trustworthy, because another process might
+have modified the file since the last time it was read. Therefore, a
+successful call to C<flock> discards the contents of the read cache
+and the internal record offset table.
+
+C<Tie::File> promises that the following sequence of operations will
+be safe:
+
+ my $o = tie @array, "Tie::File", $filename;
+ $o->flock;
+
+In particular, C<Tie::File> will I<not> read or write the file during
+the C<tie> call. (Exception: Using C<mode =E<gt> O_TRUNC> will, of
+course, erase the file during the C<tie> call. If you want to do this
+safely, then open the file without C<O_TRUNC>, lock the file, and use
+C<@array = ()>.)
+
+The best way to unlock a file is to discard the object and untie the
+array. It is probably unsafe to unlock the file without also untying
+it, because if you do, changes may remain unwritten inside the object.
+That is why there is no shortcut for unlocking. If you really want to
+unlock the file prematurely, you know what to do; if you don't know
+what to do, then don't do it.
+
+All the usual warnings about file locking apply here. In particular,
+note that file locking in Perl is B<advisory>, which means that
+holding a lock will not prevent anyone else from reading, writing, or
+erasing the file; it only prevents them from getting another lock at
+the same time. Locks are analogous to green traffic lights: If you
+have a green light, that does not prevent the idiot coming the other
+way from plowing into you sideways; it merely guarantees to you that
+the idiot does not also have a green light at the same time.
+
+=head2 C<autochomp>
+
+ my $old_value = $o->autochomp(0); # disable autochomp option
+ my $old_value = $o->autochomp(1); # enable autochomp option
+
+ my $ac = $o->autochomp(); # recover current value
+
+See L<"autochomp">, above.
+
+=head2 C<defer>, C<flush>, C<discard>, and C<autodefer>
+
+See L<"Deferred Writing">, below.
+
+=head2 C<offset>
+
+ $off = $o->offset($n);
+
+This method returns the byte offset of the start of the C<$n>th record
+in the file. If there is no such record, it returns an undefined
+value.
+
+=head1 Tying to an already-opened filehandle
+
+If C<$fh> is a filehandle, such as is returned by C<IO::File> or one
+of the other C<IO> modules, you may use:
+
+ tie @array, 'Tie::File', $fh, ...;
+
+Similarly if you opened that handle C<FH> with regular C<open> or
+C<sysopen>, you may use:
+
+ tie @array, 'Tie::File', \*FH, ...;
+
+Handles that were opened write-only won't work. Handles that were
+opened read-only will work as long as you don't try to modify the
+array. Handles must be attached to seekable sources of data---that
+means no pipes or sockets. If C<Tie::File> can detect that you
+supplied a non-seekable handle, the C<tie> call will throw an
+exception. (On Unix systems, it can detect this.)
+
+Note that Tie::File will only close any filehandles that it opened
+internally. If you passed it a filehandle as above, you "own" the
+filehandle, and are responsible for closing it after you have untied
+the @array.
+
+=head1 Deferred Writing
+
+(This is an advanced feature. Skip this section on first reading.)
+
+Normally, modifying a C<Tie::File> array writes to the underlying file
+immediately. Every assignment like C<$a[3] = ...> rewrites as much of
+the file as is necessary; typically, everything from line 3 through
+the end will need to be rewritten. This is the simplest and most
+transparent behavior. Performance even for large files is reasonably
+good.
+
+However, under some circumstances, this behavior may be excessively
+slow. For example, suppose you have a million-record file, and you
+want to do:
+
+ for (@FILE) {
+ $_ = "> $_";
+ }
+
+The first time through the loop, you will rewrite the entire file,
+from line 0 through the end. The second time through the loop, you
+will rewrite the entire file from line 1 through the end. The third
+time through the loop, you will rewrite the entire file from line 2 to
+the end. And so on.
+
+If the performance in such cases is unacceptable, you may defer the
+actual writing, and then have it done all at once. The following loop
+will perform much better for large files:
+
+ (tied @a)->defer;
+ for (@a) {
+ $_ = "> $_";
+ }
+ (tied @a)->flush;
+
+If C<Tie::File>'s memory limit is large enough, all the writing will
+done in memory. Then, when you call C<-E<gt>flush>, the entire file
+will be rewritten in a single pass.
+
+(Actually, the preceding discussion is something of a fib. You don't
+need to enable deferred writing to get good performance for this
+common case, because C<Tie::File> will do it for you automatically
+unless you specifically tell it not to. See L<"autodeferring">,
+below.)
+
+Calling C<-E<gt>flush> returns the array to immediate-write mode. If
+you wish to discard the deferred writes, you may call C<-E<gt>discard>
+instead of C<-E<gt>flush>. Note that in some cases, some of the data
+will have been written already, and it will be too late for
+C<-E<gt>discard> to discard all the changes. Support for
+C<-E<gt>discard> may be withdrawn in a future version of C<Tie::File>.
+
+Deferred writes are cached in memory up to the limit specified by the
+C<dw_size> option (see above). If the deferred-write buffer is full
+and you try to write still more deferred data, the buffer will be
+flushed. All buffered data will be written immediately, the buffer
+will be emptied, and the now-empty space will be used for future
+deferred writes.
+
+If the deferred-write buffer isn't yet full, but the total size of the
+buffer and the read cache would exceed the C<memory> limit, the oldest
+records will be expired from the read cache until the total size is
+under the limit.
+
+C<push>, C<pop>, C<shift>, C<unshift>, and C<splice> cannot be
+deferred. When you perform one of these operations, any deferred data
+is written to the file and the operation is performed immediately.
+This may change in a future version.
+
+If you resize the array with deferred writing enabled, the file will
+be resized immediately, but deferred records will not be written.
+This has a surprising consequence: C<@a = (...)> erases the file
+immediately, but the writing of the actual data is deferred. This
+might be a bug. If it is a bug, it will be fixed in a future version.
+
+=head2 Autodeferring
+
+C<Tie::File> tries to guess when deferred writing might be helpful,
+and to turn it on and off automatically.
+
+ for (@a) {
+ $_ = "> $_";
+ }
+
+In this example, only the first two assignments will be done
+immediately; after this, all the changes to the file will be deferred
+up to the user-specified memory limit.
+
+You should usually be able to ignore this and just use the module
+without thinking about deferring. However, special applications may
+require fine control over which writes are deferred, or may require
+that all writes be immediate. To disable the autodeferment feature,
+use
+
+ (tied @o)->autodefer(0);
+
+or
+
+ tie @array, 'Tie::File', $file, autodefer => 0;
+
+
+Similarly, C<-E<gt>autodefer(1)> re-enables autodeferment, and
+C<-E<gt>autodefer()> recovers the current value of the autodefer setting.
+
+
+=head1 CONCURRENT ACCESS TO FILES
+
+Caching and deferred writing are inappropriate if you want the same
+file to be accessed simultaneously from more than one process. Other
+optimizations performed internally by this module are also
+incompatible with concurrent access. A future version of this module will
+support a C<concurrent =E<gt> 1> option that enables safe concurrent access.
+
+Previous versions of this documentation suggested using C<memory
+=E<gt> 0> for safe concurrent access. This was mistaken. Tie::File
+will not support safe concurrent access before version 0.98.
+
+=head1 CAVEATS
+
+(That's Latin for 'warnings'.)
+
+=over 4
+
+=item *
+
+Reasonable effort was made to make this module efficient. Nevertheless,
+changing the size of a record in the middle of a large file will
+always be fairly slow, because everything after the new record must be
+moved.
+
+=item *
+
+The behavior of tied arrays is not precisely the same as for regular
+arrays. For example:
+
+ # This DOES print "How unusual!"
+ undef $a[10]; print "How unusual!\n" if defined $a[10];
+
+C<undef>-ing a C<Tie::File> array element just blanks out the
+corresponding record in the file. When you read it back again, you'll
+get the empty string, so the supposedly-C<undef>'ed value will be
+defined. Similarly, if you have C<autochomp> disabled, then
+
+ # This DOES print "How unusual!" if 'autochomp' is disabled
+ undef $a[10];
+ print "How unusual!\n" if $a[10];
+
+Because when C<autochomp> is disabled, C<$a[10]> will read back as
+C<"\n"> (or whatever the record separator string is.)
+
+There are other minor differences, particularly regarding C<exists>
+and C<delete>, but in general, the correspondence is extremely close.
+
+=item *
+
+I have supposed that since this module is concerned with file I/O,
+almost all normal use of it will be heavily I/O bound. This means
+that the time to maintain complicated data structures inside the
+module will be dominated by the time to actually perform the I/O.
+When there was an opportunity to spend CPU time to avoid doing I/O, I
+usually tried to take it.
+
+=item *
+
+You might be tempted to think that deferred writing is like
+transactions, with C<flush> as C<commit> and C<discard> as
+C<rollback>, but it isn't, so don't.
+
+=item *
+
+There is a large memory overhead for each record offset and for each
+cache entry: about 310 bytes per cached data record, and about 21 bytes per offset table entry.
+
+The per-record overhead will limit the maximum number of records you
+can access per file. Note that I<accessing> the length of the array
+via C<$x = scalar @tied_file> accesses B<all> records and stores their
+offsets. The same for C<foreach (@tied_file)>, even if you exit the
+loop early.
+
+=back
+
+=head1 SUBCLASSING
+
+This version promises absolutely nothing about the internals, which
+may change without notice. A future version of the module will have a
+well-defined and stable subclassing API.
+
+=head1 WHAT ABOUT C<DB_File>?
+
+People sometimes point out that L<DB_File> will do something similar,
+and ask why C<Tie::File> module is necessary.
+
+There are a number of reasons that you might prefer C<Tie::File>.
+A list is available at C<http://perl.plover.com/TieFile/why-not-DB_File>.
+
+=head1 AUTHOR
+
+Mark Jason Dominus
+
+To contact the author, send email to: C<mjd-perl-tiefile+@plover.com>
+
+To receive an announcement whenever a new version of this module is
+released, send a blank email message to
+C<mjd-perl-tiefile-subscribe@plover.com>.
+
+The most recent version of this module, including documentation and
+any news of importance, will be available at
+
+ http://perl.plover.com/TieFile/
+
+
+=head1 LICENSE
+
+C<Tie::File> version 0.97 is copyright (C) 2003 Mark Jason Dominus.
+
+This library is free software; you may redistribute it and/or modify
+it under the same terms as Perl itself.
+
+These terms are your choice of any of (1) the Perl Artistic Licence,
+or (2) version 2 of the GNU General Public License as published by the
+Free Software Foundation, or (3) any later version of the GNU General
+Public License.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this library program; it should be in the file C<COPYING>.
+If not, write to the Free Software Foundation, Inc., 51 Franklin Street,
+Fifth Floor, Boston, MA 02110-1301, USA
+
+For licensing inquiries, contact the author at:
+
+ Mark Jason Dominus
+ 255 S. Warnock St.
+ Philadelphia, PA 19107
+
+=head1 WARRANTY
+
+C<Tie::File> version 0.97 comes with ABSOLUTELY NO WARRANTY.
+For details, see the license.
+
+=head1 THANKS
+
+Gigantic thanks to Jarkko Hietaniemi, for agreeing to put this in the
+core when I hadn't written it yet, and for generally being helpful,
+supportive, and competent. (Usually the rule is "choose any one.")
+Also big thanks to Abhijit Menon-Sen for all of the same things.
+
+Special thanks to Craig Berry and Peter Prymmer (for VMS portability
+help), Randy Kobes (for Win32 portability help), Clinton Pierce and
+Autrijus Tang (for heroic eleventh-hour Win32 testing above and beyond
+the call of duty), Michael G Schwern (for testing advice), and the
+rest of the CPAN testers (for testing generally).
+
+Special thanks to Tels for suggesting several speed and memory
+optimizations.
+
+Additional thanks to:
+Edward Avis /
+Mattia Barbon /
+Tom Christiansen /
+Gerrit Haase /
+Gurusamy Sarathy /
+Jarkko Hietaniemi (again) /
+Nikola Knezevic /
+John Kominetz /
+Nick Ing-Simmons /
+Tassilo von Parseval /
+H. Dieter Pearcey /
+Slaven Rezic /
+Eric Roode /
+Peter Scott /
+Peter Somu /
+Autrijus Tang (again) /
+Tels (again) /
+Juerd Waalboer
+
+=head1 TODO
+
+More tests. (Stuff I didn't think of yet.)
+
+Paragraph mode?
+
+Fixed-length mode. Leave-blanks mode.
+
+Maybe an autolocking mode?
+
+For many common uses of the module, the read cache is a liability.
+For example, a program that inserts a single record, or that scans the
+file once, will have a cache hit rate of zero. This suggests a major
+optimization: The cache should be initially disabled. Here's a hybrid
+approach: Initially, the cache is disabled, but the cache code
+maintains statistics about how high the hit rate would be *if* it were
+enabled. When it sees the hit rate get high enough, it enables
+itself. The STAT comments in this code are the beginning of an
+implementation of this.
+
+Record locking with fcntl()? Then the module might support an undo
+log and get real transactions. What a tour de force that would be.
+
+Keeping track of the highest cached record. This would allow reads-in-a-row
+to skip the cache lookup faster (if reading from 1..N with empty cache at
+start, the last cached value will be always N-1).
+
+More tests.
+
+=cut
+
diff --git a/Master/tlpkg/tlperl/lib/Tie/Handle.pm b/Master/tlpkg/tlperl/lib/Tie/Handle.pm
new file mode 100644
index 00000000000..4e92f75a3d1
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Handle.pm
@@ -0,0 +1,201 @@
+package Tie::Handle;
+
+use 5.006_001;
+our $VERSION = '4.2';
+
+# Tie::StdHandle used to be inside Tie::Handle. For backwards compatibility
+# loading Tie::Handle has to make Tie::StdHandle available.
+use Tie::StdHandle;
+
+=head1 NAME
+
+Tie::Handle - base class definitions for tied handles
+
+=head1 SYNOPSIS
+
+ package NewHandle;
+ require Tie::Handle;
+
+ @ISA = qw(Tie::Handle);
+
+ sub READ { ... } # Provide a needed method
+ sub TIEHANDLE { ... } # Overrides inherited method
+
+
+ package main;
+
+ tie *FH, 'NewHandle';
+
+=head1 DESCRIPTION
+
+This module provides some skeletal methods for handle-tying classes. See
+L<perltie> for a list of the functions required in tying a handle to a package.
+The basic B<Tie::Handle> package provides a C<new> method, as well as methods
+C<TIEHANDLE>, C<PRINT>, C<PRINTF> and C<GETC>.
+
+For developers wishing to write their own tied-handle classes, the methods
+are summarized below. The L<perltie> section not only documents these, but
+has sample code as well:
+
+=over 4
+
+=item TIEHANDLE classname, LIST
+
+The method invoked by the command C<tie *glob, classname>. Associates a new
+glob instance with the specified class. C<LIST> would represent additional
+arguments (along the lines of L<AnyDBM_File> and compatriots) needed to
+complete the association.
+
+=item WRITE this, scalar, length, offset
+
+Write I<length> bytes of data from I<scalar> starting at I<offset>.
+
+=item PRINT this, LIST
+
+Print the values in I<LIST>
+
+=item PRINTF this, format, LIST
+
+Print the values in I<LIST> using I<format>
+
+=item READ this, scalar, length, offset
+
+Read I<length> bytes of data into I<scalar> starting at I<offset>.
+
+=item READLINE this
+
+Read a single line
+
+=item GETC this
+
+Get a single character
+
+=item CLOSE this
+
+Close the handle
+
+=item OPEN this, filename
+
+(Re-)open the handle
+
+=item BINMODE this
+
+Specify content is binary
+
+=item EOF this
+
+Test for end of file.
+
+=item TELL this
+
+Return position in the file.
+
+=item SEEK this, offset, whence
+
+Position the file.
+
+Test for end of file.
+
+=item DESTROY this
+
+Free the storage associated with the tied handle referenced by I<this>.
+This is rarely needed, as Perl manages its memory quite well. But the
+option exists, should a class wish to perform specific actions upon the
+destruction of an instance.
+
+=back
+
+=head1 MORE INFORMATION
+
+The L<perltie> section contains an example of tying handles.
+
+=head1 COMPATIBILITY
+
+This version of Tie::Handle is neither related to nor compatible with
+the Tie::Handle (3.0) module available on CPAN. It was due to an
+accident that two modules with the same name appeared. The namespace
+clash has been cleared in favor of this module that comes with the
+perl core in September 2000 and accordingly the version number has
+been bumped up to 4.0.
+
+=cut
+
+use Carp;
+use warnings::register;
+
+sub new {
+ my $pkg = shift;
+ $pkg->TIEHANDLE(@_);
+}
+
+# "Grandfather" the new, a la Tie::Hash
+
+sub TIEHANDLE {
+ my $pkg = shift;
+ if (defined &{"{$pkg}::new"}) {
+ warnings::warnif("WARNING: calling ${pkg}->new since ${pkg}->TIEHANDLE is missing");
+ $pkg->new(@_);
+ }
+ else {
+ croak "$pkg doesn't define a TIEHANDLE method";
+ }
+}
+
+sub PRINT {
+ my $self = shift;
+ if($self->can('WRITE') != \&WRITE) {
+ my $buf = join(defined $, ? $, : "",@_);
+ $buf .= $\ if defined $\;
+ $self->WRITE($buf,length($buf),0);
+ }
+ else {
+ croak ref($self)," doesn't define a PRINT method";
+ }
+}
+
+sub PRINTF {
+ my $self = shift;
+
+ if($self->can('WRITE') != \&WRITE) {
+ my $buf = sprintf(shift,@_);
+ $self->WRITE($buf,length($buf),0);
+ }
+ else {
+ croak ref($self)," doesn't define a PRINTF method";
+ }
+}
+
+sub READLINE {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a READLINE method";
+}
+
+sub GETC {
+ my $self = shift;
+
+ if($self->can('READ') != \&READ) {
+ my $buf;
+ $self->READ($buf,1);
+ return $buf;
+ }
+ else {
+ croak ref($self)," doesn't define a GETC method";
+ }
+}
+
+sub READ {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a READ method";
+}
+
+sub WRITE {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a WRITE method";
+}
+
+sub CLOSE {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a CLOSE method";
+}
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/Hash.pm b/Master/tlpkg/tlperl/lib/Tie/Hash.pm
new file mode 100644
index 00000000000..1ca8887e7e3
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Hash.pm
@@ -0,0 +1,257 @@
+package Tie::Hash;
+
+our $VERSION = '1.03';
+
+=head1 NAME
+
+Tie::Hash, Tie::StdHash, Tie::ExtraHash - base class definitions for tied hashes
+
+=head1 SYNOPSIS
+
+ package NewHash;
+ require Tie::Hash;
+
+ @ISA = qw(Tie::Hash);
+
+ sub DELETE { ... } # Provides needed method
+ sub CLEAR { ... } # Overrides inherited method
+
+
+ package NewStdHash;
+ require Tie::Hash;
+
+ @ISA = qw(Tie::StdHash);
+
+ # All methods provided by default, define only those needing overrides
+ # Accessors access the storage in %{$_[0]};
+ # TIEHASH should return a reference to the actual storage
+ sub DELETE { ... }
+
+ package NewExtraHash;
+ require Tie::Hash;
+
+ @ISA = qw(Tie::ExtraHash);
+
+ # All methods provided by default, define only those needing overrides
+ # Accessors access the storage in %{$_[0][0]};
+ # TIEHASH should return an array reference with the first element being
+ # the reference to the actual storage
+ sub DELETE {
+ $_[0][1]->('del', $_[0][0], $_[1]); # Call the report writer
+ delete $_[0][0]->{$_[1]}; # $_[0]->SUPER::DELETE($_[1])
+ }
+
+
+ package main;
+
+ tie %new_hash, 'NewHash';
+ tie %new_std_hash, 'NewStdHash';
+ tie %new_extra_hash, 'NewExtraHash',
+ sub {warn "Doing \U$_[1]\E of $_[2].\n"};
+
+=head1 DESCRIPTION
+
+This module provides some skeletal methods for hash-tying classes. See
+L<perltie> for a list of the functions required in order to tie a hash
+to a package. The basic B<Tie::Hash> package provides a C<new> method, as well
+as methods C<TIEHASH>, C<EXISTS> and C<CLEAR>. The B<Tie::StdHash> and
+B<Tie::ExtraHash> packages
+provide most methods for hashes described in L<perltie> (the exceptions
+are C<UNTIE> and C<DESTROY>). They cause tied hashes to behave exactly like standard hashes,
+and allow for selective overwriting of methods. B<Tie::Hash> grandfathers the
+C<new> method: it is used if C<TIEHASH> is not defined
+in the case a class forgets to include a C<TIEHASH> method.
+
+For developers wishing to write their own tied hashes, the required methods
+are briefly defined below. See the L<perltie> section for more detailed
+descriptive, as well as example code:
+
+=over 4
+
+=item TIEHASH classname, LIST
+
+The method invoked by the command C<tie %hash, classname>. Associates a new
+hash instance with the specified class. C<LIST> would represent additional
+arguments (along the lines of L<AnyDBM_File> and compatriots) needed to
+complete the association.
+
+=item STORE this, key, value
+
+Store datum I<value> into I<key> for the tied hash I<this>.
+
+=item FETCH this, key
+
+Retrieve the datum in I<key> for the tied hash I<this>.
+
+=item FIRSTKEY this
+
+Return the first key in the hash.
+
+=item NEXTKEY this, lastkey
+
+Return the next key in the hash.
+
+=item EXISTS this, key
+
+Verify that I<key> exists with the tied hash I<this>.
+
+The B<Tie::Hash> implementation is a stub that simply croaks.
+
+=item DELETE this, key
+
+Delete the key I<key> from the tied hash I<this>.
+
+=item CLEAR this
+
+Clear all values from the tied hash I<this>.
+
+=item SCALAR this
+
+Returns what evaluating the hash in scalar context yields.
+
+B<Tie::Hash> does not implement this method (but B<Tie::StdHash>
+and B<Tie::ExtraHash> do).
+
+=back
+
+=head1 Inheriting from B<Tie::StdHash>
+
+The accessor methods assume that the actual storage for the data in the tied
+hash is in the hash referenced by C<tied(%tiedhash)>. Thus overwritten
+C<TIEHASH> method should return a hash reference, and the remaining methods
+should operate on the hash referenced by the first argument:
+
+ package ReportHash;
+ our @ISA = 'Tie::StdHash';
+
+ sub TIEHASH {
+ my $storage = bless {}, shift;
+ warn "New ReportHash created, stored in $storage.\n";
+ $storage
+ }
+ sub STORE {
+ warn "Storing data with key $_[1] at $_[0].\n";
+ $_[0]{$_[1]} = $_[2]
+ }
+
+
+=head1 Inheriting from B<Tie::ExtraHash>
+
+The accessor methods assume that the actual storage for the data in the tied
+hash is in the hash referenced by C<(tied(%tiedhash))-E<gt>[0]>. Thus overwritten
+C<TIEHASH> method should return an array reference with the first
+element being a hash reference, and the remaining methods should operate on the
+hash C<< %{ $_[0]->[0] } >>:
+
+ package ReportHash;
+ our @ISA = 'Tie::ExtraHash';
+
+ sub TIEHASH {
+ my $class = shift;
+ my $storage = bless [{}, @_], $class;
+ warn "New ReportHash created, stored in $storage.\n";
+ $storage;
+ }
+ sub STORE {
+ warn "Storing data with key $_[1] at $_[0].\n";
+ $_[0][0]{$_[1]} = $_[2]
+ }
+
+The default C<TIEHASH> method stores "extra" arguments to tie() starting
+from offset 1 in the array referenced by C<tied(%tiedhash)>; this is the
+same storage algorithm as in TIEHASH subroutine above. Hence, a typical
+package inheriting from B<Tie::ExtraHash> does not need to overwrite this
+method.
+
+=head1 C<SCALAR>, C<UNTIE> and C<DESTROY>
+
+The methods C<UNTIE> and C<DESTROY> are not defined in B<Tie::Hash>,
+B<Tie::StdHash>, or B<Tie::ExtraHash>. Tied hashes do not require
+presence of these methods, but if defined, the methods will be called in
+proper time, see L<perltie>.
+
+C<SCALAR> is only defined in B<Tie::StdHash> and B<Tie::ExtraHash>.
+
+If needed, these methods should be defined by the package inheriting from
+B<Tie::Hash>, B<Tie::StdHash>, or B<Tie::ExtraHash>. See L<perltie/"SCALAR">
+to find out what happens when C<SCALAR> does not exist.
+
+=head1 MORE INFORMATION
+
+The packages relating to various DBM-related implementations (F<DB_File>,
+F<NDBM_File>, etc.) show examples of general tied hashes, as does the
+L<Config> module. While these do not utilize B<Tie::Hash>, they serve as
+good working examples.
+
+=cut
+
+use Carp;
+use warnings::register;
+
+sub new {
+ my $pkg = shift;
+ $pkg->TIEHASH(@_);
+}
+
+# Grandfather "new"
+
+sub TIEHASH {
+ my $pkg = shift;
+ if (defined &{"${pkg}::new"}) {
+ warnings::warnif("WARNING: calling ${pkg}->new since ${pkg}->TIEHASH is missing");
+ $pkg->new(@_);
+ }
+ else {
+ croak "$pkg doesn't define a TIEHASH method";
+ }
+}
+
+sub EXISTS {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define an EXISTS method";
+}
+
+sub CLEAR {
+ my $self = shift;
+ my $key = $self->FIRSTKEY(@_);
+ my @keys;
+
+ while (defined $key) {
+ push @keys, $key;
+ $key = $self->NEXTKEY(@_, $key);
+ }
+ foreach $key (@keys) {
+ $self->DELETE(@_, $key);
+ }
+}
+
+# The Tie::StdHash package implements standard perl hash behaviour.
+# It exists to act as a base class for classes which only wish to
+# alter some parts of their behaviour.
+
+package Tie::StdHash;
+# @ISA = qw(Tie::Hash); # would inherit new() only
+
+sub TIEHASH { bless {}, $_[0] }
+sub STORE { $_[0]->{$_[1]} = $_[2] }
+sub FETCH { $_[0]->{$_[1]} }
+sub FIRSTKEY { my $a = scalar keys %{$_[0]}; each %{$_[0]} }
+sub NEXTKEY { each %{$_[0]} }
+sub EXISTS { exists $_[0]->{$_[1]} }
+sub DELETE { delete $_[0]->{$_[1]} }
+sub CLEAR { %{$_[0]} = () }
+sub SCALAR { scalar %{$_[0]} }
+
+package Tie::ExtraHash;
+
+sub TIEHASH { my $p = shift; bless [{}, @_], $p }
+sub STORE { $_[0][0]{$_[1]} = $_[2] }
+sub FETCH { $_[0][0]{$_[1]} }
+sub FIRSTKEY { my $a = scalar keys %{$_[0][0]}; each %{$_[0][0]} }
+sub NEXTKEY { each %{$_[0][0]} }
+sub EXISTS { exists $_[0][0]->{$_[1]} }
+sub DELETE { delete $_[0][0]->{$_[1]} }
+sub CLEAR { %{$_[0][0]} = () }
+sub SCALAR { scalar %{$_[0][0]} }
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/Hash/NamedCapture.pm b/Master/tlpkg/tlperl/lib/Tie/Hash/NamedCapture.pm
new file mode 100644
index 00000000000..58ae743d874
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Hash/NamedCapture.pm
@@ -0,0 +1,62 @@
+package Tie::Hash::NamedCapture;
+
+our $VERSION = "0.06";
+
+# The real meat implemented in XS in universal.c in the core, but this
+# method was left behind because gv.c expects a Purl-Perl method in
+# this package when it loads the tie magic for %+ and %-
+
+my ($one, $all) = Tie::Hash::NamedCapture::flags();
+
+sub TIEHASH {
+ my ($pkg, %arg) = @_;
+ my $flag = $arg{all} ? $all : $one;
+ bless \$flag => $pkg;
+}
+
+tie %+, __PACKAGE__;
+tie %-, __PACKAGE__, all => 1;
+
+1;
+
+__END__
+
+=head1 NAME
+
+Tie::Hash::NamedCapture - Named regexp capture buffers
+
+=head1 SYNOPSIS
+
+ tie my %hash, "Tie::Hash::NamedCapture";
+ # %hash now behaves like %+
+
+ tie my %hash, "Tie::Hash::NamedCapture", all => 1;
+ # %hash now access buffers from regexp in $qr like %-
+
+=head1 DESCRIPTION
+
+This module is used to implement the special hashes C<%+> and C<%->, but it
+can be used to tie other variables as you choose.
+
+When the C<all> parameter is provided, then the tied hash elements will be
+array refs listing the contents of each capture buffer whose name is the
+same as the associated hash key. If none of these buffers were involved in
+the match, the contents of that array ref will be as many C<undef> values
+as there are capture buffers with that name. In other words, the tied hash
+will behave as C<%->.
+
+When the C<all> parameter is omitted or false, then the tied hash elements
+will be the contents of the leftmost defined buffer with the name of the
+associated hash key. In other words, the tied hash will behave as
+C<%+>.
+
+The keys of C<%->-like hashes correspond to all buffer names found in the
+regular expression; the keys of C<%+>-like hashes list only the names of
+buffers that have captured (and that are thus associated to defined values).
+
+=head1 SEE ALSO
+
+L<perlreapi>, L<re>, L<perlmodlib/Pragmatic Modules>, L<perlvar/"%+">,
+L<perlvar/"%-">.
+
+=cut
diff --git a/Master/tlpkg/tlperl/lib/Tie/Memoize.pm b/Master/tlpkg/tlperl/lib/Tie/Memoize.pm
new file mode 100644
index 00000000000..dbe14428280
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Memoize.pm
@@ -0,0 +1,128 @@
+use strict;
+package Tie::Memoize;
+use Tie::Hash;
+our @ISA = 'Tie::ExtraHash';
+our $VERSION = '1.1';
+
+our $exists_token = \undef;
+
+sub croak {require Carp; goto &Carp::croak}
+
+# Format: [0: STORAGE, 1: EXISTS-CACHE, 2: FETCH_function;
+# 3: EXISTS_function, 4: DATA, 5: EXISTS_different ]
+
+sub FETCH {
+ my ($h,$key) = ($_[0][0], $_[1]);
+ my $res = $h->{$key};
+ return $res if defined $res; # Shortcut if accessible
+ return $res if exists $h->{$key}; # Accessible, but undef
+ my $cache = $_[0][1]{$key};
+ return if defined $cache and not $cache; # Known to not exist
+ my @res = $_[0][2]->($key, $_[0][4]); # Autoload
+ $_[0][1]{$key} = 0, return unless @res; # Cache non-existence
+ delete $_[0][1]{$key}; # Clear existence cache, not needed any more
+ $_[0][0]{$key} = $res[0]; # Store data and return
+}
+
+sub EXISTS {
+ my ($a,$key) = (shift, shift);
+ return 1 if exists $a->[0]{$key}; # Have data
+ my $cache = $a->[1]{$key};
+ return $cache if defined $cache; # Existence cache
+ my @res = $a->[3]($key,$a->[4]);
+ $a->[1]{$key} = 0, return unless @res; # Cache non-existence
+ # Now we know it exists
+ return ($a->[1]{$key} = 1) if $a->[5]; # Only existence reported
+ # Now know the value
+ $a->[0]{$key} = $res[0]; # Store data
+ return 1
+}
+
+sub TIEHASH {
+ croak 'syntax: tie %hash, \'Tie::AutoLoad\', \&fetch_subr' if @_ < 2;
+ croak 'syntax: tie %hash, \'Tie::AutoLoad\', \&fetch_subr, $data, \&exists_subr, \%data_cache, \%existence_cache' if @_ > 6;
+ push @_, undef if @_ < 3; # Data
+ push @_, $_[1] if @_ < 4; # exists
+ push @_, {} while @_ < 6; # initial value and caches
+ bless [ @_[4,5,1,3,2], $_[1] ne $_[3]], $_[0]
+}
+
+1;
+
+=head1 NAME
+
+Tie::Memoize - add data to hash when needed
+
+=head1 SYNOPSIS
+
+ require Tie::Memoize;
+ tie %hash, 'Tie::Memoize',
+ \&fetch, # The rest is optional
+ $DATA, \&exists,
+ {%ini_value}, {%ini_existence};
+
+=head1 DESCRIPTION
+
+This package allows a tied hash to autoload its values on the first access,
+and to use the cached value on the following accesses.
+
+Only read-accesses (via fetching the value or C<exists>) result in calls to
+the functions; the modify-accesses are performed as on a normal hash.
+
+The required arguments during C<tie> are the hash, the package, and
+the reference to the C<FETCH>ing function. The optional arguments are
+an arbitrary scalar $data, the reference to the C<EXISTS> function,
+and initial values of the hash and of the existence cache.
+
+Both the C<FETCH>ing function and the C<EXISTS> functions have the
+same signature: the arguments are C<$key, $data>; $data is the same
+value as given as argument during tie()ing. Both functions should
+return an empty list if the value does not exist. If C<EXISTS>
+function is different from the C<FETCH>ing function, it should return
+a TRUE value on success. The C<FETCH>ing function should return the
+intended value if the key is valid.
+
+=head1 Inheriting from B<Tie::Memoize>
+
+The structure of the tied() data is an array reference with elements
+
+ 0: cache of known values
+ 1: cache of known existence of keys
+ 2: FETCH function
+ 3: EXISTS function
+ 4: $data
+
+The rest is for internal usage of this package. In particular, if
+TIEHASH is overwritten, it should call SUPER::TIEHASH.
+
+=head1 EXAMPLE
+
+ sub slurp {
+ my ($key, $dir) = shift;
+ open my $h, '<', "$dir/$key" or return;
+ local $/; <$h> # slurp it all
+ }
+ sub exists { my ($key, $dir) = shift; return -f "$dir/$key" }
+
+ tie %hash, 'Tie::Memoize', \&slurp, $directory, \&exists,
+ { fake_file1 => $content1, fake_file2 => $content2 },
+ { pretend_does_not_exists => 0, known_to_exist => 1 };
+
+This example treats the slightly modified contents of $directory as a
+hash. The modifications are that the keys F<fake_file1> and
+F<fake_file2> fetch values $content1 and $content2, and
+F<pretend_does_not_exists> will never be accessed. Additionally, the
+existence of F<known_to_exist> is never checked (so if it does not
+exists when its content is needed, the user of %hash may be confused).
+
+=head1 BUGS
+
+FIRSTKEY and NEXTKEY methods go through the keys which were already read,
+not all the possible keys of the hash.
+
+=head1 AUTHOR
+
+Ilya Zakharevich L<mailto:perl-module-hash-memoize@ilyaz.org>.
+
+=cut
+
diff --git a/Master/tlpkg/tlperl/lib/Tie/RefHash.pm b/Master/tlpkg/tlperl/lib/Tie/RefHash.pm
new file mode 100644
index 00000000000..f95bf41efdf
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/RefHash.pm
@@ -0,0 +1,274 @@
+package Tie::RefHash;
+
+use vars qw/$VERSION/;
+
+$VERSION = "1.38";
+
+use 5.005;
+
+=head1 NAME
+
+Tie::RefHash - use references as hash keys
+
+=head1 SYNOPSIS
+
+ require 5.004;
+ use Tie::RefHash;
+ tie HASHVARIABLE, 'Tie::RefHash', LIST;
+ tie HASHVARIABLE, 'Tie::RefHash::Nestable', LIST;
+
+ untie HASHVARIABLE;
+
+=head1 DESCRIPTION
+
+This module provides the ability to use references as hash keys if you
+first C<tie> the hash variable to this module. Normally, only the
+keys of the tied hash itself are preserved as references; to use
+references as keys in hashes-of-hashes, use Tie::RefHash::Nestable,
+included as part of Tie::RefHash.
+
+It is implemented using the standard perl TIEHASH interface. Please
+see the C<tie> entry in perlfunc(1) and perltie(1) for more information.
+
+The Nestable version works by looking for hash references being stored
+and converting them to tied hashes so that they too can have
+references as keys. This will happen without warning whenever you
+store a reference to one of your own hashes in the tied hash.
+
+=head1 EXAMPLE
+
+ use Tie::RefHash;
+ tie %h, 'Tie::RefHash';
+ $a = [];
+ $b = {};
+ $c = \*main;
+ $d = \"gunk";
+ $e = sub { 'foo' };
+ %h = ($a => 1, $b => 2, $c => 3, $d => 4, $e => 5);
+ $a->[0] = 'foo';
+ $b->{foo} = 'bar';
+ for (keys %h) {
+ print ref($_), "\n";
+ }
+
+ tie %h, 'Tie::RefHash::Nestable';
+ $h{$a}->{$b} = 1;
+ for (keys %h, keys %{$h{$a}}) {
+ print ref($_), "\n";
+ }
+
+=head1 THREAD SUPPORT
+
+L<Tie::RefHash> fully supports threading using the C<CLONE> method.
+
+=head1 STORABLE SUPPORT
+
+L<Storable> hooks are provided for semantically correct serialization and
+cloning of tied refhashes.
+
+=head1 RELIC SUPPORT
+
+This version of Tie::RefHash seems to no longer work with 5.004. This has not
+been throughly investigated. Patches welcome ;-)
+
+=head1 MAINTAINER
+
+Yuval Kogman E<lt>nothingmuch@woobling.orgE<gt>
+
+=head1 AUTHOR
+
+Gurusamy Sarathy gsar@activestate.com
+
+'Nestable' by Ed Avis ed@membled.com
+
+=head1 SEE ALSO
+
+perl(1), perlfunc(1), perltie(1)
+
+=cut
+
+use Tie::Hash;
+use vars '@ISA';
+@ISA = qw(Tie::Hash);
+use strict;
+use Carp qw/croak/;
+
+BEGIN {
+ local $@;
+ # determine whether we need to take care of threads
+ use Config ();
+ my $usethreads = $Config::Config{usethreads}; # && exists $INC{"threads.pm"}
+ *_HAS_THREADS = $usethreads ? sub () { 1 } : sub () { 0 };
+ *_HAS_SCALAR_UTIL = eval { require Scalar::Util; 1 } ? sub () { 1 } : sub () { 0 };
+ *_HAS_WEAKEN = defined(&Scalar::Util::weaken) ? sub () { 1 } : sub () { 0 };
+}
+
+BEGIN {
+ # create a refaddr function
+
+ local $@;
+
+ if ( _HAS_SCALAR_UTIL ) {
+ Scalar::Util->import("refaddr");
+ } else {
+ require overload;
+
+ *refaddr = sub {
+ if ( overload::StrVal($_[0]) =~ /\( 0x ([a-zA-Z0-9]+) \)$/x) {
+ return $1;
+ } else {
+ die "couldn't parse StrVal: " . overload::StrVal($_[0]);
+ }
+ };
+ }
+}
+
+my (@thread_object_registry, $count); # used by the CLONE method to rehash the keys after their refaddr changed
+
+sub TIEHASH {
+ my $c = shift;
+ my $s = [];
+ bless $s, $c;
+ while (@_) {
+ $s->STORE(shift, shift);
+ }
+
+ if (_HAS_THREADS ) {
+
+ if ( _HAS_WEAKEN ) {
+ # remember the object so that we can rekey it on CLONE
+ push @thread_object_registry, $s;
+ # but make this a weak reference, so that there are no leaks
+ Scalar::Util::weaken( $thread_object_registry[-1] );
+
+ if ( ++$count > 1000 ) {
+ # this ensures we don't fill up with a huge array dead weakrefs
+ @thread_object_registry = grep { defined } @thread_object_registry;
+ $count = 0;
+ }
+ } else {
+ $count++; # used in the warning
+ }
+ }
+
+ return $s;
+}
+
+my $storable_format_version = join("/", __PACKAGE__, "0.01");
+
+sub STORABLE_freeze {
+ my ( $self, $is_cloning ) = @_;
+ my ( $refs, $reg ) = @$self;
+ return ( $storable_format_version, [ values %$refs ], $reg );
+}
+
+sub STORABLE_thaw {
+ my ( $self, $is_cloning, $version, $refs, $reg ) = @_;
+ croak "incompatible versions of Tie::RefHash between freeze and thaw"
+ unless $version eq $storable_format_version;
+
+ @$self = ( {}, $reg );
+ $self->_reindex_keys( $refs );
+}
+
+sub CLONE {
+ my $pkg = shift;
+
+ if ( $count and not _HAS_WEAKEN ) {
+ warn "Tie::RefHash is not threadsafe without Scalar::Util::weaken";
+ }
+
+ # when the thread has been cloned all the objects need to be updated.
+ # dead weakrefs are undefined, so we filter them out
+ @thread_object_registry = grep { defined && do { $_->_reindex_keys; 1 } } @thread_object_registry;
+ $count = 0; # we just cleaned up
+}
+
+sub _reindex_keys {
+ my ( $self, $extra_keys ) = @_;
+ # rehash all the ref keys based on their new StrVal
+ %{ $self->[0] } = map { refaddr($_->[0]) => $_ } (values(%{ $self->[0] }), @{ $extra_keys || [] });
+}
+
+sub FETCH {
+ my($s, $k) = @_;
+ if (ref $k) {
+ my $kstr = refaddr($k);
+ if (defined $s->[0]{$kstr}) {
+ $s->[0]{$kstr}[1];
+ }
+ else {
+ undef;
+ }
+ }
+ else {
+ $s->[1]{$k};
+ }
+}
+
+sub STORE {
+ my($s, $k, $v) = @_;
+ if (ref $k) {
+ $s->[0]{refaddr($k)} = [$k, $v];
+ }
+ else {
+ $s->[1]{$k} = $v;
+ }
+ $v;
+}
+
+sub DELETE {
+ my($s, $k) = @_;
+ (ref $k)
+ ? (delete($s->[0]{refaddr($k)}) || [])->[1]
+ : delete($s->[1]{$k});
+}
+
+sub EXISTS {
+ my($s, $k) = @_;
+ (ref $k) ? exists($s->[0]{refaddr($k)}) : exists($s->[1]{$k});
+}
+
+sub FIRSTKEY {
+ my $s = shift;
+ keys %{$s->[0]}; # reset iterator
+ keys %{$s->[1]}; # reset iterator
+ $s->[2] = 0; # flag for iteration, see NEXTKEY
+ $s->NEXTKEY;
+}
+
+sub NEXTKEY {
+ my $s = shift;
+ my ($k, $v);
+ if (!$s->[2]) {
+ if (($k, $v) = each %{$s->[0]}) {
+ return $v->[0];
+ }
+ else {
+ $s->[2] = 1;
+ }
+ }
+ return each %{$s->[1]};
+}
+
+sub CLEAR {
+ my $s = shift;
+ $s->[2] = 0;
+ %{$s->[0]} = ();
+ %{$s->[1]} = ();
+}
+
+package Tie::RefHash::Nestable;
+use vars '@ISA';
+@ISA = 'Tie::RefHash';
+
+sub STORE {
+ my($s, $k, $v) = @_;
+ if (ref($v) eq 'HASH' and not tied %$v) {
+ my @elems = %$v;
+ tie %$v, ref($s), @elems;
+ }
+ $s->SUPER::STORE($k, $v);
+}
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/Registry.pm b/Master/tlpkg/tlperl/lib/Tie/Registry.pm
new file mode 100644
index 00000000000..2ded338d342
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Registry.pm
@@ -0,0 +1,45 @@
+package Tie::Registry;
+
+# Tie/Registry.pm -- Provides backward compatibility for Win32::TieRegistry
+# that was called Tie::Registry prior to version 0.20.
+# by Tye McQueen, tye@metronet.com, see http://www.metronet.com/~tye/.
+
+use strict;
+use Carp;
+
+use vars qw( $VERSION @ISA );
+BEGIN {
+ require Win32::TieRegistry;
+ $VERSION = '0.15';
+ @ISA = qw{Win32::TieRegistry};
+}
+
+sub import {
+ my $pkg = shift;
+ Win32::TieRegistry->import( ExportLevel => 1, SplitMultis => 0, @_ );
+}
+
+1;
+
+__END__
+
+=pod
+
+=head1 NAME
+
+Tie::Registry - Legacy interface to Win32::TieRegistry (DEPRECATED)
+
+=head1 DESCRIPTION
+
+This module provides backward compatibility for L<Win32::TieRegistry>
+that was called Tie::Registry prior to version 0.20.
+
+=head1 AUTHOR
+
+Tye McQueen E<lt>tye@metronet.comE<gt>
+
+=head1 COPYRIGHT
+
+Copyright 1999 Tye McQueen.
+
+=cut
diff --git a/Master/tlpkg/tlperl/lib/Tie/Scalar.pm b/Master/tlpkg/tlperl/lib/Tie/Scalar.pm
new file mode 100644
index 00000000000..24e4ae79c3c
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Scalar.pm
@@ -0,0 +1,163 @@
+package Tie::Scalar;
+
+our $VERSION = '1.02';
+
+=head1 NAME
+
+Tie::Scalar, Tie::StdScalar - base class definitions for tied scalars
+
+=head1 SYNOPSIS
+
+ package NewScalar;
+ require Tie::Scalar;
+
+ @ISA = qw(Tie::Scalar);
+
+ sub FETCH { ... } # Provide a needed method
+ sub TIESCALAR { ... } # Overrides inherited method
+
+
+ package NewStdScalar;
+ require Tie::Scalar;
+
+ @ISA = qw(Tie::StdScalar);
+
+ # All methods provided by default, so define only what needs be overridden
+ sub FETCH { ... }
+
+
+ package main;
+
+ tie $new_scalar, 'NewScalar';
+ tie $new_std_scalar, 'NewStdScalar';
+
+=head1 DESCRIPTION
+
+This module provides some skeletal methods for scalar-tying classes. See
+L<perltie> for a list of the functions required in tying a scalar to a
+package. The basic B<Tie::Scalar> package provides a C<new> method, as well
+as methods C<TIESCALAR>, C<FETCH> and C<STORE>. The B<Tie::StdScalar>
+package provides all the methods specified in L<perltie>. It inherits from
+B<Tie::Scalar> and causes scalars tied to it to behave exactly like the
+built-in scalars, allowing for selective overloading of methods. The C<new>
+method is provided as a means of grandfathering, for classes that forget to
+provide their own C<TIESCALAR> method.
+
+For developers wishing to write their own tied-scalar classes, the methods
+are summarized below. The L<perltie> section not only documents these, but
+has sample code as well:
+
+=over 4
+
+=item TIESCALAR classname, LIST
+
+The method invoked by the command C<tie $scalar, classname>. Associates a new
+scalar instance with the specified class. C<LIST> would represent additional
+arguments (along the lines of L<AnyDBM_File> and compatriots) needed to
+complete the association.
+
+=item FETCH this
+
+Retrieve the value of the tied scalar referenced by I<this>.
+
+=item STORE this, value
+
+Store data I<value> in the tied scalar referenced by I<this>.
+
+=item DESTROY this
+
+Free the storage associated with the tied scalar referenced by I<this>.
+This is rarely needed, as Perl manages its memory quite well. But the
+option exists, should a class wish to perform specific actions upon the
+destruction of an instance.
+
+=back
+
+=head2 Tie::Scalar vs Tie::StdScalar
+
+C<< Tie::Scalar >> provides all the necessary methods, but one should realize
+they do not do anything useful. Calling C<< Tie::Scalar::FETCH >> or
+C<< Tie::Scalar::STORE >> results in a (trappable) croak. And if you inherit
+from C<< Tie::Scalar >>, you I<must> provide either a C<< new >> or a
+C<< TIESCALAR >> method.
+
+If you are looking for a class that does everything for you you don't
+define yourself, use the C<< Tie::StdScalar >> class, not the
+C<< Tie::Scalar >> one.
+
+=head1 MORE INFORMATION
+
+The L<perltie> section uses a good example of tying scalars by associating
+process IDs with priority.
+
+=cut
+
+use Carp;
+use warnings::register;
+
+sub new {
+ my $pkg = shift;
+ $pkg->TIESCALAR(@_);
+}
+
+# "Grandfather" the new, a la Tie::Hash
+
+sub TIESCALAR {
+ my $pkg = shift;
+ my $pkg_new = $pkg -> can ('new');
+
+ if ($pkg_new and $pkg ne __PACKAGE__) {
+ my $my_new = __PACKAGE__ -> can ('new');
+ if ($pkg_new == $my_new) {
+ #
+ # Prevent recursion
+ #
+ croak "$pkg must define either a TIESCALAR() or a new() method";
+ }
+
+ warnings::warnif ("WARNING: calling ${pkg}->new since " .
+ "${pkg}->TIESCALAR is missing");
+ $pkg -> new (@_);
+ }
+ else {
+ croak "$pkg doesn't define a TIESCALAR method";
+ }
+}
+
+sub FETCH {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a FETCH method";
+}
+
+sub STORE {
+ my $pkg = ref $_[0];
+ croak "$pkg doesn't define a STORE method";
+}
+
+#
+# The Tie::StdScalar package provides scalars that behave exactly like
+# Perl's built-in scalars. Good base to inherit from, if you're only going to
+# tweak a small bit.
+#
+package Tie::StdScalar;
+@ISA = qw(Tie::Scalar);
+
+sub TIESCALAR {
+ my $class = shift;
+ my $instance = shift || undef;
+ return bless \$instance => $class;
+}
+
+sub FETCH {
+ return ${$_[0]};
+}
+
+sub STORE {
+ ${$_[0]} = $_[1];
+}
+
+sub DESTROY {
+ undef ${$_[0]};
+}
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/StdHandle.pm b/Master/tlpkg/tlperl/lib/Tie/StdHandle.pm
new file mode 100644
index 00000000000..3a1a3db4788
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/StdHandle.pm
@@ -0,0 +1,71 @@
+package Tie::StdHandle;
+
+use strict;
+
+use Tie::Handle;
+use vars qw(@ISA $VERSION);
+@ISA = 'Tie::Handle';
+$VERSION = '4.2';
+
+=head1 NAME
+
+Tie::StdHandle - base class definitions for tied handles
+
+=head1 SYNOPSIS
+
+ package NewHandle;
+ require Tie::Handle;
+
+ @ISA = qw(Tie::Handle);
+
+ sub READ { ... } # Provide a needed method
+ sub TIEHANDLE { ... } # Overrides inherited method
+
+
+ package main;
+
+ tie *FH, 'NewHandle';
+
+=head1 DESCRIPTION
+
+The B<Tie::StdHandle> package provide most methods for file handles described
+in L<perltie> (the exceptions are C<UNTIE> and C<DESTROY>). It causes tied
+file handles to behave exactly like standard file handles and allow for
+selective overwriting of methods.
+
+=cut
+
+sub TIEHANDLE
+{
+ my $class = shift;
+ my $fh = \do { local *HANDLE};
+ bless $fh,$class;
+ $fh->OPEN(@_) if (@_);
+ return $fh;
+}
+
+sub EOF { eof($_[0]) }
+sub TELL { tell($_[0]) }
+sub FILENO { fileno($_[0]) }
+sub SEEK { seek($_[0],$_[1],$_[2]) }
+sub CLOSE { close($_[0]) }
+sub BINMODE { binmode($_[0]) }
+
+sub OPEN
+{
+ $_[0]->CLOSE if defined($_[0]->FILENO);
+ @_ == 2 ? open($_[0], $_[1]) : open($_[0], $_[1], $_[2]);
+}
+
+sub READ { read($_[0],$_[1],$_[2]) }
+sub READLINE { my $fh = $_[0]; <$fh> }
+sub GETC { getc($_[0]) }
+
+sub WRITE
+{
+ my $fh = $_[0];
+ print $fh substr($_[1],0,$_[2])
+}
+
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/SubstrHash.pm b/Master/tlpkg/tlperl/lib/Tie/SubstrHash.pm
new file mode 100644
index 00000000000..476dd686787
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/SubstrHash.pm
@@ -0,0 +1,215 @@
+package Tie::SubstrHash;
+
+our $VERSION = '1.00';
+
+=head1 NAME
+
+Tie::SubstrHash - Fixed-table-size, fixed-key-length hashing
+
+=head1 SYNOPSIS
+
+ require Tie::SubstrHash;
+
+ tie %myhash, 'Tie::SubstrHash', $key_len, $value_len, $table_size;
+
+=head1 DESCRIPTION
+
+The B<Tie::SubstrHash> package provides a hash-table-like interface to
+an array of determinate size, with constant key size and record size.
+
+Upon tying a new hash to this package, the developer must specify the
+size of the keys that will be used, the size of the value fields that the
+keys will index, and the size of the overall table (in terms of key-value
+pairs, not size in hard memory). I<These values will not change for the
+duration of the tied hash>. The newly-allocated hash table may now have
+data stored and retrieved. Efforts to store more than C<$table_size>
+elements will result in a fatal error, as will efforts to store a value
+not exactly C<$value_len> characters in length, or reference through a
+key not exactly C<$key_len> characters in length. While these constraints
+may seem excessive, the result is a hash table using much less internal
+memory than an equivalent freely-allocated hash table.
+
+=head1 CAVEATS
+
+Because the current implementation uses the table and key sizes for the
+hashing algorithm, there is no means by which to dynamically change the
+value of any of the initialization parameters.
+
+The hash does not support exists().
+
+=cut
+
+use Carp;
+
+sub TIEHASH {
+ my $pack = shift;
+ my ($klen, $vlen, $tsize) = @_;
+ my $rlen = 1 + $klen + $vlen;
+ $tsize = [$tsize,
+ findgteprime($tsize * 1.1)]; # Allow 10% empty.
+ local $self = bless ["\0", $klen, $vlen, $tsize, $rlen, 0, -1];
+ $$self[0] x= $rlen * $tsize->[1];
+ $self;
+}
+
+sub CLEAR {
+ local($self) = @_;
+ $$self[0] = "\0" x ($$self[4] * $$self[3]->[1]);
+ $$self[5] = 0;
+ $$self[6] = -1;
+}
+
+sub FETCH {
+ local($self,$key) = @_;
+ local($klen, $vlen, $tsize, $rlen) = @$self[1..4];
+ &hashkey;
+ for (;;) {
+ $offset = $hash * $rlen;
+ $record = substr($$self[0], $offset, $rlen);
+ if (ord($record) == 0) {
+ return undef;
+ }
+ elsif (ord($record) == 1) {
+ }
+ elsif (substr($record, 1, $klen) eq $key) {
+ return substr($record, 1+$klen, $vlen);
+ }
+ &rehash;
+ }
+}
+
+sub STORE {
+ local($self,$key,$val) = @_;
+ local($klen, $vlen, $tsize, $rlen) = @$self[1..4];
+ croak("Table is full ($tsize->[0] elements)") if $$self[5] > $tsize->[0];
+ croak(qq/Value "$val" is not $vlen characters long/)
+ if length($val) != $vlen;
+ my $writeoffset;
+
+ &hashkey;
+ for (;;) {
+ $offset = $hash * $rlen;
+ $record = substr($$self[0], $offset, $rlen);
+ if (ord($record) == 0) {
+ $record = "\2". $key . $val;
+ die "panic" unless length($record) == $rlen;
+ $writeoffset = $offset unless defined $writeoffset;
+ substr($$self[0], $writeoffset, $rlen) = $record;
+ ++$$self[5];
+ return;
+ }
+ elsif (ord($record) == 1) {
+ $writeoffset = $offset unless defined $writeoffset;
+ }
+ elsif (substr($record, 1, $klen) eq $key) {
+ $record = "\2". $key . $val;
+ die "panic" unless length($record) == $rlen;
+ substr($$self[0], $offset, $rlen) = $record;
+ return;
+ }
+ &rehash;
+ }
+}
+
+sub DELETE {
+ local($self,$key) = @_;
+ local($klen, $vlen, $tsize, $rlen) = @$self[1..4];
+ &hashkey;
+ for (;;) {
+ $offset = $hash * $rlen;
+ $record = substr($$self[0], $offset, $rlen);
+ if (ord($record) == 0) {
+ return undef;
+ }
+ elsif (ord($record) == 1) {
+ }
+ elsif (substr($record, 1, $klen) eq $key) {
+ substr($$self[0], $offset, 1) = "\1";
+ return substr($record, 1+$klen, $vlen);
+ --$$self[5];
+ }
+ &rehash;
+ }
+}
+
+sub FIRSTKEY {
+ local($self) = @_;
+ $$self[6] = -1;
+ &NEXTKEY;
+}
+
+sub NEXTKEY {
+ local($self) = @_;
+ local($klen, $vlen, $tsize, $rlen, $entries, $iterix) = @$self[1..6];
+ for (++$iterix; $iterix < $tsize->[1]; ++$iterix) {
+ next unless substr($$self[0], $iterix * $rlen, 1) eq "\2";
+ $$self[6] = $iterix;
+ return substr($$self[0], $iterix * $rlen + 1, $klen);
+ }
+ $$self[6] = -1;
+ undef;
+}
+
+sub EXISTS {
+ croak "Tie::SubstrHash does not support exists()";
+}
+
+sub hashkey {
+ croak(qq/Key "$key" is not $klen characters long/)
+ if length($key) != $klen;
+ $hash = 2;
+ for (unpack('C*', $key)) {
+ $hash = $hash * 33 + $_;
+ &_hashwrap if $hash >= 1e13;
+ }
+ &_hashwrap if $hash >= $tsize->[1];
+ $hash = 1 unless $hash;
+ $hashbase = $hash;
+}
+
+sub _hashwrap {
+ $hash -= int($hash / $tsize->[1]) * $tsize->[1];
+}
+
+sub rehash {
+ $hash += $hashbase;
+ $hash -= $tsize->[1] if $hash >= $tsize->[1];
+}
+
+# using POSIX::ceil() would be too heavy, and not all platforms have it.
+sub ceil {
+ my $num = shift;
+ $num = int($num + 1) unless $num == int $num;
+ return $num;
+}
+
+# See:
+#
+# http://www-groups.dcs.st-andrews.ac.uk/~history/HistTopics/Prime_numbers.html
+#
+
+sub findgteprime { # find the smallest prime integer greater than or equal to
+ use integer;
+
+ my $num = ceil(shift);
+ return 2 if $num <= 2;
+
+ $num++ unless $num % 2;
+ my $i;
+ my $sqrtnum = int sqrt $num;
+ my $sqrtnumsquared = $sqrtnum * $sqrtnum;
+
+ NUM:
+ for (;; $num += 2) {
+ if ($sqrtnumsquared < $num) {
+ $sqrtnum++;
+ $sqrtnumsquared = $sqrtnum * $sqrtnum;
+ }
+ for ($i = 3; $i <= $sqrtnum; $i += 2) {
+ next NUM unless $num % $i;
+ }
+ return $num;
+ }
+}
+
+1;
diff --git a/Master/tlpkg/tlperl/lib/Tie/Watch.pm b/Master/tlpkg/tlperl/lib/Tie/Watch.pm
new file mode 100644
index 00000000000..9882751073d
--- /dev/null
+++ b/Master/tlpkg/tlperl/lib/Tie/Watch.pm
@@ -0,0 +1,560 @@
+$Tie::Watch::VERSION = '1.3';
+
+package Tie::Watch;
+
+=head1 NAME
+
+ Tie::Watch - place watchpoints on Perl variables.
+
+=head1 SYNOPSIS
+
+ use Tie::Watch;
+
+ $watch = Tie::Watch->new(
+ -variable => \$frog,
+ -debug => 1,
+ -shadow => 0,
+ -fetch => [\&fetch, 'arg1', 'arg2', ..., 'argn'],
+ -store => \&store,
+ -destroy => sub {print "Final value=$frog.\n"},
+ }
+ %vinfo = $watch->Info;
+ $args = $watch->Args(-fetch);
+ $val = $watch->Fetch;
+ print "val=", $watch->Say($val), ".\n";
+ $watch->Store('Hello');
+ $watch->Unwatch;
+
+=head1 DESCRIPTION
+
+This class module binds one or more subroutines of your devising to a
+Perl variable. All variables can have B<FETCH>, B<STORE> and
+B<DESTROY> callbacks. Additionally, arrays can define B<CLEAR>,
+B<DELETE>, B<EXISTS>, B<EXTEND>, B<FETCHSIZE>, B<POP>, B<PUSH>,
+B<SHIFT>, B<SPLICE>, B<STORESIZE> and B<UNSHIFT> callbacks, and hashes
+can define B<CLEAR>, B<DELETE>, B<EXISTS>, B<FIRSTKEY> and B<NEXTKEY>
+callbacks. If these term are unfamiliar to you, I I<really> suggest
+you read L<perltie>.
+
+With Tie::Watch you can:
+
+ . alter a variable's value
+ . prevent a variable's value from being changed
+ . invoke a Perl/Tk callback when a variable changes
+ . trace references to a variable
+
+Callback format is patterned after the Perl/Tk scheme: supply either a
+code reference, or, supply an array reference and pass the callback
+code reference in the first element of the array, followed by callback
+arguments. (See examples in the Synopsis, above.)
+
+Tie::Watch provides default callbacks for any that you fail to
+specify. Other than negatively impacting performance, they perform
+the standard action that you'd expect, so the variable behaves
+"normally". Once you override a default callback, perhaps to insert
+debug code like print statements, your callback normally finishes by
+calling the underlying (overridden) method. But you don't have to!
+
+To map a tied method name to a default callback name simply lowercase
+the tied method name and uppercase its first character. So FETCH
+becomes Fetch, NEXTKEY becomes Nextkey, etcetera.
+
+Here are two callbacks for a scalar. The B<FETCH> (read) callback does
+nothing other than illustrate the fact that it returns the value to
+assign the variable. The B<STORE> (write) callback uppercases the
+variable and returns it. In all cases the callback I<must> return the
+correct read or write value - typically, it does this by invoking the
+underlying method.
+
+ my $fetch_scalar = sub {
+ my($self) = @_;
+ $self->Fetch;
+ };
+
+ my $store_scalar = sub {
+ my($self, $new_val) = @_;
+ $self->Store(uc $new_val);
+ };
+
+Here are B<FETCH> and B<STORE> callbacks for either an array or hash.
+They do essentially the same thing as the scalar callbacks, but
+provide a little more information.
+
+ my $fetch = sub {
+ my($self, $key) = @_;
+ my $val = $self->Fetch($key);
+ print "In fetch callback, key=$key, val=", $self->Say($val);
+ my $args = $self->Args(-fetch);
+ print ", args=('", join("', '", @$args), "')" if $args;
+ print ".\n";
+ $val;
+ };
+
+ my $store = sub {
+ my($self, $key, $new_val) = @_;
+ my $val = $self->Fetch($key);
+ $new_val = uc $new_val;
+ $self->Store($key, $new_val);
+ print "In store callback, key=$key, val=", $self->Say($val),
+ ", new_val=", $self->Say($new_val);
+ my $args = $self->Args(-store);
+ print ", args=('", join("', '", @$args), "')" if $args;
+ print ".\n";
+ $new_val;
+ };
+
+In all cases, the first parameter is a reference to the Watch object,
+used to invoke the following class methods.
+
+=head1 METHODS
+
+=over 4
+
+=item $watch = Tie::Watch->new(-options => values);
+
+The watchpoint constructor method that accepts option/value pairs to
+create and configure the Watch object. The only required option is
+B<-variable>.
+
+B<-variable> is a I<reference> to a scalar, array or hash variable.
+
+B<-debug> (default 0) is 1 to activate debug print statements internal
+to Tie::Watch.
+
+B<-shadow> (default 1) is 0 to disable array and hash shadowing. To
+prevent infinite recursion Tie::Watch maintains parallel variables for
+arrays and hashes. When the watchpoint is created the parallel shadow
+variable is initialized with the watched variable's contents, and when
+the watchpoint is deleted the shadow variable is copied to the original
+variable. Thus, changes made during the watch process are not lost.
+Shadowing is on my default. If you disable shadowing any changes made
+to an array or hash are lost when the watchpoint is deleted.
+
+Specify any of the following relevant callback parameters, in the
+format described above: B<-fetch>, B<-store>, B<-destroy>.
+Additionally for arrays: B<-clear>, B<-extend>, B<-fetchsize>,
+B<-pop>, B<-push>, B<-shift>, B<-splice>, B<-storesize> and
+B<-unshift>. Additionally for hashes: B<-clear>, B<-delete>,
+B<-exists>, B<-firstkey> and B<-nextkey>.
+
+=item $args = $watch->Args(-fetch);
+
+Returns a reference to a list of arguments for the specified callback,
+or undefined if none.
+
+=item $watch->Fetch(); $watch->Fetch($key);
+
+Returns a variable's current value. $key is required for an array or
+hash.
+
+=item %vinfo = $watch->Info();
+
+Returns a hash detailing the internals of the Watch object, with these
+keys:
+
+ %vinfo = {
+ -variable => SCALAR(0x200737f8)
+ -debug => '0'
+ -shadow => '1'
+ -value => 'HELLO SCALAR'
+ -destroy => ARRAY(0x200f86cc)
+ -fetch => ARRAY(0x200f8558)
+ -store => ARRAY(0x200f85a0)
+ -legible => above data formatted as a list of string, for printing
+ }
+
+For array and hash Watch objects, the B<-value> key is replaced with a
+B<-ptr> key which is a reference to the parallel array or hash.
+Additionally, for an array or hash, there are key/value pairs for
+all the variable specific callbacks.
+
+=item $watch->Say($val);
+
+Used mainly for debugging, it returns $val in quotes if required, or
+the string "undefined" for undefined values.
+
+=item $watch->Store($new_val); $watch->Store($key, $new_val);
+
+Store a variable's new value. $key is required for an array or hash.
+
+=item $watch->Unwatch();
+
+Stop watching the variable.
+
+=back
+
+=head1 EFFICIENCY CONSIDERATIONS
+
+If you can live using the class methods provided, please do so. You
+can meddle with the object hash directly and improved watch
+performance, at the risk of your code breaking in the future.
+
+=head1 AUTHOR
+
+Stephen O. Lidie
+
+=head1 HISTORY
+
+ lusol@Lehigh.EDU, LUCC, 96/05/30
+ . Original version 0.92 release, based on the Trace module from Hans Mulder,
+ and ideas from Tim Bunce.
+
+ lusol@Lehigh.EDU, LUCC, 96/12/25
+ . Version 0.96, release two inner references detected by Perl 5.004.
+
+ lusol@Lehigh.EDU, LUCC, 97/01/11
+ . Version 0.97, fix Makefile.PL and MANIFEST (thanks Andreas Koenig).
+ Make sure test.pl doesn't fail if Tk isn't installed.
+
+ Stephen.O.Lidie@Lehigh.EDU, Lehigh University Computing Center, 97/10/03
+ . Version 0.98, implement -shadow option for arrays and hashes.
+
+ Stephen.O.Lidie@Lehigh.EDU, Lehigh University Computing Center, 98/02/11
+ . Version 0.99, finally, with Perl 5.004_57, we can completely watch arrays.
+ With tied array support this module is essentially complete, so its been
+ optimized for speed at the expense of clarity - sorry about that. The
+ Delete() method has been renamed Unwatch() because it conflicts with the
+ builtin delete().
+
+ Stephen.O.Lidie@Lehigh.EDU, Lehigh University Computing Center, 99/04/04
+ . Version 1.0, for Perl 5.005_03, update Makefile.PL for ActiveState, and
+ add two examples (one for Perl/Tk).
+
+ sol0@lehigh.edu, Lehigh University Computing Center, 2003/06/07
+ . Version 1.1, for Perl 5.8, can trace a reference now, patch from Slaven
+ Rezic.
+
+ sol0@lehigh.edu, Lehigh University Computing Center, 2005/05/17
+ . Version 1.2, for Perl 5.8, per Rob Seegel's suggestion, support array
+ DELETE and EXISTS.
+
+=head1 COPYRIGHT
+
+Copyright (C) 1996 - 2005 Stephen O. Lidie. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the same terms as Perl itself.
+
+=cut
+
+use 5.004_57;;
+use Carp;
+use strict;
+use subs qw/normalize_callbacks/;
+use vars qw/@array_callbacks @hash_callbacks @scalar_callbacks/;
+
+@array_callbacks = qw/-clear -delete -destroy -exists -extend -fetch
+ -fetchsize -pop -push -shift -splice -store
+ -storesize -unshift/;
+@hash_callbacks = qw/-clear -delete -destroy -exists -fetch -firstkey
+ -nextkey -store/;
+@scalar_callbacks = qw/-destroy -fetch -store/;
+
+sub new {
+
+ # Watch constructor. The *real* constructor is Tie::Watch->base_watch(),
+ # invoked by methods in other Watch packages, depending upon the variable's
+ # type. Here we supply defaulted parameter values and then verify them,
+ # normalize all callbacks and bind the variable to the appropriate package.
+
+ my($class, %args) = @_;
+ my $version = $Tie::Watch::VERSION;
+ my (%arg_defaults) = (-debug => 0, -shadow => 1);
+ my $variable = $args{-variable};
+ croak "Tie::Watch::new(): -variable is required." if not defined $variable;
+
+ my($type, $watch_obj) = (ref $variable, undef);
+ if ($type =~ /(SCALAR|REF)/) {
+ @arg_defaults{@scalar_callbacks} = (
+ [\&Tie::Watch::Scalar::Destroy], [\&Tie::Watch::Scalar::Fetch],
+ [\&Tie::Watch::Scalar::Store]);
+ } elsif ($type =~ /ARRAY/) {
+ @arg_defaults{@array_callbacks} = (
+ [\&Tie::Watch::Array::Clear], [\&Tie::Watch::Array::Delete],
+ [\&Tie::Watch::Array::Destroy], [\&Tie::Watch::Array::Exists],
+ [\&Tie::Watch::Array::Extend], [\&Tie::Watch::Array::Fetch],
+ [\&Tie::Watch::Array::Fetchsize], [\&Tie::Watch::Array::Pop],
+ [\&Tie::Watch::Array::Push], [\&Tie::Watch::Array::Shift],
+ [\&Tie::Watch::Array::Splice], [\&Tie::Watch::Array::Store],
+ [\&Tie::Watch::Array::Storesize], [\&Tie::Watch::Array::Unshift]);
+ } elsif ($type =~ /HASH/) {
+ @arg_defaults{@hash_callbacks} = (
+ [\&Tie::Watch::Hash::Clear], [\&Tie::Watch::Hash::Delete],
+ [\&Tie::Watch::Hash::Destroy], [\&Tie::Watch::Hash::Exists],
+ [\&Tie::Watch::Hash::Fetch], [\&Tie::Watch::Hash::Firstkey],
+ [\&Tie::Watch::Hash::Nextkey], [\&Tie::Watch::Hash::Store]);
+ } else {
+ croak "Tie::Watch::new() - not a variable reference.";
+ }
+ my(@margs, %ahsh, $args, @args);
+ @margs = grep ! defined $args{$_}, keys %arg_defaults;
+ %ahsh = %args; # argument hash
+ @ahsh{@margs} = @arg_defaults{@margs}; # fill in missing values
+ normalize_callbacks \%ahsh;
+
+ if ($type =~ /(SCALAR|REF)/) {
+ $watch_obj = tie $$variable, 'Tie::Watch::Scalar', %ahsh;
+ } elsif ($type =~ /ARRAY/) {
+ $watch_obj = tie @$variable, 'Tie::Watch::Array', %ahsh;
+ } elsif ($type =~ /HASH/) {
+ $watch_obj = tie %$variable, 'Tie::Watch::Hash', %ahsh;
+ }
+ $watch_obj;
+
+} # end new, Watch constructor
+
+sub Args {
+
+ # Return a reference to a list of callback arguments, or undef if none.
+ #
+ # $_[0] = self
+ # $_[1] = callback type
+
+ defined $_[0]->{$_[1]}->[1] ? [@{$_[0]->{$_[1]}}[1 .. $#{$_[0]->{$_[1]}}]]
+ : undef;
+
+} # end Args
+
+sub Info {
+
+ # Info() method subclassed by other Watch modules.
+ #
+ # $_[0] = self
+ # @_[1 .. $#_] = optional callback types
+
+ my(%vinfo, @results);
+ my(@info) = (qw/-variable -debug -shadow/);
+ push @info, @_[1 .. $#_] if scalar @_ >= 2;
+ foreach my $type (@info) {
+ push @results, sprintf('%-10s: ', substr $type, 1) .
+ $_[0]->Say($_[0]->{$type});
+ $vinfo{$type} = $_[0]->{$type};
+ }
+ $vinfo{-legible} = [@results];
+ %vinfo;
+
+} # end Info
+
+sub Say {
+
+ # For debugging, mainly.
+ #
+ # $_[0] = self
+ # $_[1] = value
+
+ defined $_[1] ? (ref($_[1]) ne '' ? $_[1] : "'$_[1]'") : "undefined";
+
+} # end Say
+
+sub Unwatch {
+
+ # Stop watching a variable by releasing the last reference and untieing it.
+ # Update the original variable with its shadow, if appropriate.
+ #
+ # $_[0] = self
+
+ my $variable = $_[0]->{-variable};
+ my $type = ref $variable;
+ my $copy; $copy = $_[0]->{-ptr} if $type !~ /(SCALAR|REF)/;
+ my $shadow = $_[0]->{-shadow};
+ undef $_[0];
+ if ($type =~ /(SCALAR|REF)/) {
+ untie $$variable;
+ } elsif ($type =~ /ARRAY/) {
+ untie @$variable;
+ @$variable = @$copy if $shadow;
+ } elsif ($type =~ /HASH/) {
+ untie %$variable;
+ %$variable = %$copy if $shadow;
+ } else {
+ croak "Tie::Watch::Delete() - not a variable reference.";
+ }
+
+} # end Unwatch
+
+# Watch private methods.
+
+sub base_watch {
+
+ # Watch base class constructor invoked by other Watch modules.
+
+ my($class, %args) = @_;
+ my $watch_obj = {%args};
+ $watch_obj;
+
+} # end base_watch
+
+sub callback {
+
+ # Execute a Watch callback, either the default or user specified.
+ # Note that the arguments are those supplied by the tied method,
+ # not those (if any) specified by the user when the watch object
+ # was instantiated. This is for performance reasons, and why the
+ # Args() method exists.
+ #
+ # $_[0] = self
+ # $_[1] = callback type
+ # $_[2] through $#_ = tied arguments
+
+ &{$_[0]->{$_[1]}->[0]} ($_[0], @_[2 .. $#_]);
+
+} # end callback
+
+sub normalize_callbacks {
+
+ # Ensure all callbacks are normalized in [\&code, @args] format.
+
+ my($args_ref) = @_;
+ my($cb, $ref);
+ foreach my $arg (keys %$args_ref) {
+ next if $arg =~ /variable|debug|shadow/;
+ $cb = $args_ref->{$arg};
+ $ref = ref $cb;
+ if ($ref =~ /CODE/) {
+ $args_ref->{$arg} = [$cb];
+ } elsif ($ref !~ /ARRAY/) {
+ croak "Tie::Watch: malformed callback $arg=$cb.";
+ }
+ }
+
+} # end normalize_callbacks
+
+###############################################################################
+
+package Tie::Watch::Scalar;
+
+use Carp;
+@Tie::Watch::Scalar::ISA = qw/Tie::Watch/;
+
+sub TIESCALAR {
+
+ my($class, %args) = @_;
+ my $variable = $args{-variable};
+ my $watch_obj = Tie::Watch->base_watch(%args);
+ $watch_obj->{-value} = $$variable;
+ print "WatchScalar new: $variable created, \@_=", join(',', @_), "!\n"
+ if $watch_obj->{-debug};
+ bless $watch_obj, $class;
+
+} # end TIESCALAR
+
+sub Info {$_[0]->SUPER::Info('-value', @Tie::Watch::scalar_callbacks)}
+
+# Default scalar callbacks.
+
+sub Destroy {undef %{$_[0]}}
+sub Fetch {$_[0]->{-value}}
+sub Store {$_[0]->{-value} = $_[1]}
+
+# Scalar access methods.
+
+sub DESTROY {$_[0]->callback('-destroy')}
+sub FETCH {$_[0]->callback('-fetch')}
+sub STORE {$_[0]->callback('-store', $_[1])}
+
+###############################################################################
+
+package Tie::Watch::Array;
+
+use Carp;
+@Tie::Watch::Array::ISA = qw/Tie::Watch/;
+
+sub TIEARRAY {
+
+ my($class, %args) = @_;
+ my($variable, $shadow) = @args{-variable, -shadow};
+ my @copy; @copy = @$variable if $shadow; # make a private copy of user's array
+ $args{-ptr} = $shadow ? \@copy : [];
+ my $watch_obj = Tie::Watch->base_watch(%args);
+ print "WatchArray new: $variable created, \@_=", join(',', @_), "!\n"
+ if $watch_obj->{-debug};
+ bless $watch_obj, $class;
+
+} # end TIEARRAY
+
+sub Info {$_[0]->SUPER::Info('-ptr', @Tie::Watch::array_callbacks)}
+
+# Default array callbacks.
+
+sub Clear {$_[0]->{-ptr} = ()}
+sub Delete {delete $_[0]->{-ptr}->[$_[1]]}
+sub Destroy {undef %{$_[0]}}
+sub Exists {exists $_[0]->{-ptr}->[$_[1]]}
+sub Extend {}
+sub Fetch {$_[0]->{-ptr}->[$_[1]]}
+sub Fetchsize {scalar @{$_[0]->{-ptr}}}
+sub Pop {pop @{$_[0]->{-ptr}}}
+sub Push {push @{$_[0]->{-ptr}}, @_[1 .. $#_]}
+sub Shift {shift @{$_[0]->{-ptr}}}
+sub Splice {
+ my $n = scalar @_; # splice() is wierd!
+ return splice @{$_[0]->{-ptr}}, $_[1] if $n == 2;
+ return splice @{$_[0]->{-ptr}}, $_[1], $_[2] if $n == 3;
+ return splice @{$_[0]->{-ptr}}, $_[1], $_[2], @_[3 .. $#_] if $n >= 4;
+}
+sub Store {$_[0]->{-ptr}->[$_[1]] = $_[2]}
+sub Storesize {$#{$_[0]->{-ptr}} = $_[1] - 1}
+sub Unshift {unshift @{$_[0]->{-ptr}}, @_[1 .. $#_]}
+
+# Array access methods.
+
+sub CLEAR {$_[0]->callback('-clear')}
+sub DELETE {$_[0]->callback('-delete', $_[1])}
+sub DESTROY {$_[0]->callback('-destroy')}
+sub EXISTS {$_[0]->callback('-exists', $_[1])}
+sub EXTEND {$_[0]->callback('-extend', $_[1])}
+sub FETCH {$_[0]->callback('-fetch', $_[1])}
+sub FETCHSIZE {$_[0]->callback('-fetchsize')}
+sub POP {$_[0]->callback('-pop')}
+sub PUSH {$_[0]->callback('-push', @_[1 .. $#_])}
+sub SHIFT {$_[0]->callback('-shift')}
+sub SPLICE {$_[0]->callback('-splice', @_[1 .. $#_])}
+sub STORE {$_[0]->callback('-store', $_[1], $_[2])}
+sub STORESIZE {$_[0]->callback('-storesize', $_[1])}
+sub UNSHIFT {$_[0]->callback('-unshift', @_[1 .. $#_])}
+
+###############################################################################
+
+package Tie::Watch::Hash;
+
+use Carp;
+@Tie::Watch::Hash::ISA = qw/Tie::Watch/;
+
+sub TIEHASH {
+
+ my($class, %args) = @_;
+ my($variable, $shadow) = @args{-variable, -shadow};
+ my %copy; %copy = %$variable if $shadow; # make a private copy of user's hash
+ $args{-ptr} = $shadow ? \%copy : {};
+ my $watch_obj = Tie::Watch->base_watch(%args);
+ print "WatchHash new: $variable created, \@_=", join(',', @_), "!\n"
+ if $watch_obj->{-debug};
+ bless $watch_obj, $class;
+
+} # end TIEHASH
+
+sub Info {$_[0]->SUPER::Info('-ptr', @Tie::Watch::hash_callbacks)}
+
+# Default hash callbacks.
+
+sub Clear {$_[0]->{-ptr} = ()}
+sub Delete {delete $_[0]->{-ptr}->{$_[1]}}
+sub Destroy {undef %{$_[0]}}
+sub Exists {exists $_[0]->{-ptr}->{$_[1]}}
+sub Fetch {$_[0]->{-ptr}->{$_[1]}}
+sub Firstkey {my $c = keys %{$_[0]->{-ptr}}; each %{$_[0]->{-ptr}}}
+sub Nextkey {each %{$_[0]->{-ptr}}}
+sub Store {$_[0]->{-ptr}->{$_[1]} = $_[2]}
+
+# Hash access methods.
+
+sub CLEAR {$_[0]->callback('-clear')}
+sub DELETE {$_[0]->callback('-delete', $_[1])}
+sub DESTROY {$_[0]->callback('-destroy')}
+sub EXISTS {$_[0]->callback('-exists', $_[1])}
+sub FETCH {$_[0]->callback('-fetch', $_[1])}
+sub FIRSTKEY {$_[0]->callback('-firstkey')}
+sub NEXTKEY {$_[0]->callback('-nextkey')}
+sub STORE {$_[0]->callback('-store', $_[1], $_[2])}
+
+1;