diff options
author | Norbert Preining <preining@logic.at> | 2010-05-12 16:54:37 +0000 |
---|---|---|
committer | Norbert Preining <preining@logic.at> | 2010-05-12 16:54:37 +0000 |
commit | 661c41a09e39a182865e0b51e34cc995a0dc96e8 (patch) | |
tree | 2f79bb1406e22fdcb2587be8ffda6c0c609d7932 /Master/tlpkg/tlperl/lib/DBD | |
parent | b645030efc22e13c2498a1522083634ab91b2de1 (diff) |
move tlperl.straw to tlperl
git-svn-id: svn://tug.org/texlive/trunk@18210 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/tlpkg/tlperl/lib/DBD')
24 files changed, 19630 insertions, 0 deletions
diff --git a/Master/tlpkg/tlperl/lib/DBD/DBM.pm b/Master/tlpkg/tlperl/lib/DBD/DBM.pm new file mode 100755 index 00000000000..1dea6b54ad9 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/DBM.pm @@ -0,0 +1,975 @@ +####################################################################### +# +# DBD::DBM - a DBI driver for DBM files +# +# Copyright (c) 2004 by Jeff Zucker < jzucker AT cpan.org > +# +# All rights reserved. +# +# You may freely distribute and/or modify this module under the terms +# of either the GNU General Public License (GPL) or the Artistic License, +# as specified in the Perl README file. +# +# USERS - see the pod at the bottom of this file +# +# DBD AUTHORS - see the comments in the code +# +####################################################################### +require 5.005_03; +use strict; + +################# +package DBD::DBM; +################# +use base qw( DBD::File ); +use vars qw($VERSION $ATTRIBUTION $drh $methods_already_installed); +$VERSION = '0.03'; +$ATTRIBUTION = 'DBD::DBM by Jeff Zucker'; + +# no need to have driver() unless you need private methods +# +sub driver ($;$) { + my($class, $attr) = @_; + return $drh if $drh; + + # do the real work in DBD::File + # + $attr->{Attribution} = 'DBD::DBM by Jeff Zucker'; + my $this = $class->SUPER::driver($attr); + + # install private methods + # + # this requires that dbm_ (or foo_) be a registered prefix + # but you can write private methods before official registration + # by hacking the $dbd_prefix_registry in a private copy of DBI.pm + # + if ( $DBI::VERSION >= 1.37 and !$methods_already_installed++ ) { + DBD::DBM::db->install_method('dbm_versions'); + DBD::DBM::st->install_method('dbm_schema'); + } + + $this; +} + +sub CLONE { + undef $drh; +} + +##################### +package DBD::DBM::dr; +##################### +$DBD::DBM::dr::imp_data_size = 0; +@DBD::DBM::dr::ISA = qw(DBD::File::dr); + +# you can get by without connect() if you don't have to check private +# attributes, DBD::File will gather the connection string arguements for you +# +sub connect ($$;$$$) { + my($drh, $dbname, $user, $auth, $attr)= @_; + + # create a 'blank' dbh + my $this = DBI::_new_dbh($drh, { + Name => $dbname, + }); + + # parse the connection string for name=value pairs + if ($this) { + + # define valid private attributes + # + # attempts to set non-valid attrs in connect() or + # with $dbh->{attr} will throw errors + # + # the attrs here *must* start with dbm_ or foo_ + # + # see the STORE methods below for how to check these attrs + # + $this->{dbm_valid_attrs} = { + dbm_tables => 1 # per-table information + , dbm_type => 1 # the global DBM type e.g. SDBM_File + , dbm_mldbm => 1 # the global MLDBM serializer + , dbm_cols => 1 # the global column names + , dbm_version => 1 # verbose DBD::DBM version + , dbm_ext => 1 # file extension + , dbm_lockfile => 1 # lockfile extension + , dbm_store_metadata => 1 # column names, etc. + , dbm_berkeley_flags => 1 # for BerkeleyDB + }; + + my($var, $val); + $this->{f_dir} = $DBD::File::haveFileSpec ? File::Spec->curdir() : '.'; + while (length($dbname)) { + if ($dbname =~ s/^((?:[^\\;]|\\.)*?);//s) { + $var = $1; + } else { + $var = $dbname; + $dbname = ''; + } + if ($var =~ /^(.+?)=(.*)/s) { + $var = $1; + ($val = $2) =~ s/\\(.)/$1/g; + + # in the connect string the attr names + # can either have dbm_ (or foo_) prepended or not + # this will add the prefix if it's missing + # + $var = 'dbm_' . $var unless $var =~ /^dbm_/ + or $var eq 'f_dir'; + # XXX should pass back to DBI via $attr for connect() to STORE + $this->{$var} = $val; + } + } + $this->{f_version} = $DBD::File::VERSION; + $this->{dbm_version} = $DBD::DBM::VERSION; + for (qw( nano_version statement_version)) { + $this->{'sql_'.$_} = $DBI::SQL::Nano::versions->{$_}||''; + } + $this->{sql_handler} = ($this->{sql_statement_version}) + ? 'SQL::Statement' + : 'DBI::SQL::Nano'; + } + $this->STORE('Active',1); + return $this; +} + +# you could put some :dr private methods here + +# you may need to over-ride some DBD::File::dr methods here +# but you can probably get away with just letting it do the work +# in most cases + +##################### +package DBD::DBM::db; +##################### +$DBD::DBM::db::imp_data_size = 0; +@DBD::DBM::db::ISA = qw(DBD::File::db); + +# the ::db::STORE method is what gets called when you set +# a lower-cased database handle attribute such as $dbh->{somekey}=$someval; +# +# STORE should check to make sure that "somekey" is a valid attribute name +# but only if it is really one of our attributes (starts with dbm_ or foo_) +# You can also check for valid values for the attributes if needed +# and/or perform other operations +# +sub STORE ($$$) { + my ($dbh, $attrib, $value) = @_; + + # use DBD::File's STORE unless its one of our own attributes + # + return $dbh->SUPER::STORE($attrib,$value) unless $attrib =~ /^dbm_/; + + # throw an error if it has our prefix but isn't a valid attr name + # + if ( $attrib ne 'dbm_valid_attrs' # gotta start somewhere :-) + and !$dbh->{dbm_valid_attrs}->{$attrib} ) { + return $dbh->set_err( $DBI::stderr,"Invalid attribute '$attrib'!"); + } + else { + + # check here if you need to validate values + # or conceivably do other things as well + # + $dbh->{$attrib} = $value; + return 1; + } +} + +# and FETCH is done similar to STORE +# +sub FETCH ($$) { + my ($dbh, $attrib) = @_; + + return $dbh->SUPER::FETCH($attrib) unless $attrib =~ /^dbm_/; + + # throw an error if it has our prefix but isn't a valid attr name + # + if ( $attrib ne 'dbm_valid_attrs' # gotta start somewhere :-) + and !$dbh->{dbm_valid_attrs}->{$attrib} ) { + return $dbh->set_err( $DBI::stderr,"Invalid attribute '$attrib'"); + } + else { + + # check here if you need to validate values + # or conceivably do other things as well + # + return $dbh->{$attrib}; + } +} + + +# this is an example of a private method +# these used to be done with $dbh->func(...) +# see above in the driver() sub for how to install the method +# +sub dbm_versions { + my $dbh = shift; + my $table = shift || ''; + my $dtype = $dbh->{dbm_tables}->{$table}->{type} + || $dbh->{dbm_type} + || 'SDBM_File'; + my $mldbm = $dbh->{dbm_tables}->{$table}->{mldbm} + || $dbh->{dbm_mldbm} + || ''; + $dtype .= ' + MLDBM + ' . $mldbm if $mldbm; + + my %version = ( DBI => $DBI::VERSION ); + $version{"DBI::PurePerl"} = $DBI::PurePerl::VERSION if $DBI::PurePerl; + $version{OS} = "$^O ($Config::Config{osvers})"; + $version{Perl} = "$] ($Config::Config{archname})"; + my $str = sprintf "%-16s %s\n%-16s %s\n%-16s %s\n", + 'DBD::DBM' , $dbh->{Driver}->{Version} . " using $dtype" + , ' DBD::File' , $dbh->{f_version} + , ' DBI::SQL::Nano' , $dbh->{sql_nano_version} + ; + $str .= sprintf "%-16s %s\n", + , ' SQL::Statement' , $dbh->{sql_statement_version} + if $dbh->{sql_handler} eq 'SQL::Statement'; + for (sort keys %version) { + $str .= sprintf "%-16s %s\n", $_, $version{$_}; + } + return "$str\n"; +} + +# you may need to over-ride some DBD::File::db methods here +# but you can probably get away with just letting it do the work +# in most cases + +##################### +package DBD::DBM::st; +##################### +$DBD::DBM::st::imp_data_size = 0; +@DBD::DBM::st::ISA = qw(DBD::File::st); + +sub dbm_schema { + my($sth,$tname)=@_; + return $sth->set_err($DBI::stderr,'No table name supplied!') unless $tname; + return $sth->set_err($DBI::stderr,"Unknown table '$tname'!") + unless $sth->{Database}->{dbm_tables} + and $sth->{Database}->{dbm_tables}->{$tname}; + return $sth->{Database}->{dbm_tables}->{$tname}->{schema}; +} +# you could put some :st private methods here + +# you may need to over-ride some DBD::File::st methods here +# but you can probably get away with just letting it do the work +# in most cases + +############################ +package DBD::DBM::Statement; +############################ +use base qw( DBD::File::Statement ); +use IO::File; # for locking only +use Fcntl; + +my $HAS_FLOCK = eval { flock STDOUT, 0; 1 }; + +# you must define open_table; +# it is done at the start of all executes; +# it doesn't necessarily have to "open" anything; +# you must define the $tbl and at least the col_names and col_nums; +# anything else you put in depends on what you need in your +# ::Table methods below; you must bless the $tbl into the +# appropriate class as shown +# +# see also the comments inside open_table() showing the difference +# between global, per-table, and default settings +# +sub open_table ($$$$$) { + my($self, $data, $table, $createMode, $lockMode) = @_; + my $dbh = $data->{Database}; + + my $tname = $table || $self->{tables}->[0]->{name}; + my $file; + ($table,$file) = $self->get_file_name($data,$tname); + + # note the use of three levels of attribute settings below + # first it looks for a per-table setting + # if none is found, it looks for a global setting + # if none is found, it sets a default + # + # your DBD may not need this, gloabls and defaults may be enough + # + my $dbm_type = $dbh->{dbm_tables}->{$tname}->{type} + || $dbh->{dbm_type} + || 'SDBM_File'; + $dbh->{dbm_tables}->{$tname}->{type} = $dbm_type; + + my $serializer = $dbh->{dbm_tables}->{$tname}->{mldbm} + || $dbh->{dbm_mldbm} + || ''; + $dbh->{dbm_tables}->{$tname}->{mldbm} = $serializer if $serializer; + + my $ext = '' if $dbm_type eq 'GDBM_File' + or $dbm_type eq 'DB_File' + or $dbm_type eq 'BerkeleyDB'; + # XXX NDBM_File on FreeBSD (and elsewhere?) may actually be Berkeley + # behind the scenes and so create a single .db file. + $ext = '.pag' if $dbm_type eq 'NDBM_File' + or $dbm_type eq 'SDBM_File' + or $dbm_type eq 'ODBM_File'; + $ext = $dbh->{dbm_ext} if defined $dbh->{dbm_ext}; + $ext = $dbh->{dbm_tables}->{$tname}->{ext} + if defined $dbh->{dbm_tables}->{$tname}->{ext}; + $ext = '' unless defined $ext; + + my $open_mode = O_RDONLY; + $open_mode = O_RDWR if $lockMode; + $open_mode = O_RDWR|O_CREAT|O_TRUNC if $createMode; + + my($tie_type); + + if ( $serializer ) { + require 'MLDBM.pm'; + $MLDBM::UseDB = $dbm_type; + $MLDBM::UseDB = 'BerkeleyDB::Hash' if $dbm_type eq 'BerkeleyDB'; + $MLDBM::Serializer = $serializer; + $tie_type = 'MLDBM'; + } + else { + require "$dbm_type.pm"; + $tie_type = $dbm_type; + } + + # Second-guessing the file extension isn't great here (or in general) + # could replace this by trying to open the file in non-create mode + # first and dieing if that succeeds. + # Currently this test doesn't work where NDBM is actually Berkeley (.db) + die "Cannot CREATE '$file$ext' because it already exists" + if $createMode and (-e "$file$ext"); + + # LOCKING + # + my($nolock,$lockext,$lock_table); + $lockext = $dbh->{dbm_tables}->{$tname}->{lockfile}; + $lockext = $dbh->{dbm_lockfile} if !defined $lockext; + if ( (defined $lockext and $lockext == 0) or !$HAS_FLOCK + ) { + undef $lockext; + $nolock = 1; + } + else { + $lockext ||= '.lck'; + } + # open and flock the lockfile, creating it if necessary + # + if (!$nolock) { + $lock_table = $self->SUPER::open_table( + $data, "$table$lockext", $createMode, $lockMode + ); + } + + # TIEING + # + # allow users to pass in a pre-created tied object + # + my @tie_args; + if ($dbm_type eq 'BerkeleyDB') { + my $DB_CREATE = 1; # but import constants if supplied + my $DB_RDONLY = 16; # + my %flags; + if (my $f = $dbh->{dbm_berkeley_flags}) { + $DB_CREATE = $f->{DB_CREATE} if $f->{DB_CREATE}; + $DB_RDONLY = $f->{DB_RDONLY} if $f->{DB_RDONLY}; + delete $f->{DB_CREATE}; + delete $f->{DB_RDONLY}; + %flags = %$f; + } + $flags{'-Flags'} = $DB_RDONLY; + $flags{'-Flags'} = $DB_CREATE if $lockMode or $createMode; + my $t = 'BerkeleyDB::Hash'; + $t = 'MLDBM' if $serializer; + @tie_args = ($t, -Filename=>$file, %flags); + } + else { + @tie_args = ($tie_type, $file, $open_mode, 0666); + } + my %h; + if ( $self->{command} ne 'DROP') { + my $tie_class = shift @tie_args; + eval { tie %h, $tie_class, @tie_args }; + die "Cannot tie(%h $tie_class @tie_args): $@" if $@; + } + + + # COLUMN NAMES + # + my $store = $dbh->{dbm_tables}->{$tname}->{store_metadata}; + $store = $dbh->{dbm_store_metadata} unless defined $store; + $store = 1 unless defined $store; + $dbh->{dbm_tables}->{$tname}->{store_metadata} = $store; + + my($meta_data,$schema,$col_names); + $meta_data = $col_names = $h{"_metadata \0"} if $store; + if ($meta_data and $meta_data =~ m~<dbd_metadata>(.+)</dbd_metadata>~is) { + $schema = $col_names = $1; + $schema =~ s~.*<schema>(.+)</schema>.*~$1~is; + $col_names =~ s~.*<col_names>(.+)</col_names>.*~$1~is; + } + $col_names ||= $dbh->{dbm_tables}->{$tname}->{c_cols} + || $dbh->{dbm_tables}->{$tname}->{cols} + || $dbh->{dbm_cols} + || ['k','v']; + $col_names = [split /,/,$col_names] if (ref $col_names ne 'ARRAY'); + $dbh->{dbm_tables}->{$tname}->{cols} = $col_names; + $dbh->{dbm_tables}->{$tname}->{schema} = $schema; + + my $i; + my %col_nums = map { $_ => $i++ } @$col_names; + + my $tbl = { + table_name => $tname, + file => $file, + ext => $ext, + hash => \%h, + dbm_type => $dbm_type, + store_metadata => $store, + mldbm => $serializer, + lock_fh => $lock_table->{fh}, + lock_ext => $lockext, + nolock => $nolock, + col_nums => \%col_nums, + col_names => $col_names + }; + + my $class = ref($self); + $class =~ s/::Statement/::Table/; + bless($tbl, $class); + $tbl; +} + +######################## +package DBD::DBM::Table; +######################## +use base qw( DBD::File::Table ); + +# you must define drop +# it is called from execute of a SQL DROP statement +# +sub drop ($$) { + my($self,$data) = @_; + untie %{$self->{hash}} if $self->{hash}; + my $ext = $self->{ext}; + unlink $self->{file}.$ext if -f $self->{file}.$ext; + unlink $self->{file}.'.dir' if -f $self->{file}.'.dir' + and $ext eq '.pag'; + if (!$self->{nolock}) { + $self->{lock_fh}->close if $self->{lock_fh}; + unlink $self->{file}.$self->{lock_ext} + if -f $self->{file}.$self->{lock_ext}; + } + return 1; +} + +# you must define fetch_row, it is called on all fetches; +# it MUST return undef when no rows are left to fetch; +# checking for $ary[0] is specific to hashes so you'll +# probably need some other kind of check for nothing-left. +# as Janis might say: "undef's just another word for +# nothing left to fetch" :-) +# +sub fetch_row ($$$) { + my($self, $data, $row) = @_; + # fetch with %each + # + my @ary = each %{$self->{hash}}; + @ary = each %{$self->{hash}} if $self->{store_metadata} + and $ary[0] + and $ary[0] eq "_metadata \0"; + + my($key,$val) = @ary; + return undef unless $key; + my @row = (ref($val) eq 'ARRAY') ? ($key,@$val) : ($key,$val); + return (@row) if wantarray; + return \@row; + + # fetch without %each + # + # $self->{keys} = [sort keys %{$self->{hash}}] unless $self->{keys}; + # my $key = shift @{$self->{keys}}; + # $key = shift @{$self->{keys}} if $self->{store_metadata} + # and $key + # and $key eq "_metadata \0"; + # return undef unless defined $key; + # my @ary; + # $row = $self->{hash}->{$key}; + # if (ref $row eq 'ARRAY') { + # @ary = ( $key, @{$row} ); + # } + # else { + # @ary = ($key,$row); + # } + # return (@ary) if wantarray; + # return \@ary; +} + +# you must define push_row +# it is called on inserts and updates +# +sub push_row ($$$) { + my($self, $data, $row_aryref) = @_; + my $key = shift @$row_aryref; + if ( $self->{mldbm} ) { + $self->{hash}->{$key}= $row_aryref; + } + else { + $self->{hash}->{$key}=$row_aryref->[0]; + } + 1; +} + +# this is where you grab the column names from a CREATE statement +# if you don't need to do that, it must be defined but can be empty +# +sub push_names ($$$) { + my($self, $data, $row_aryref) = @_; + $data->{Database}->{dbm_tables}->{$self->{table_name}}->{c_cols} + = $row_aryref; + return unless $self->{store_metadata}; + my $stmt = $data->{f_stmt}; + my $col_names = join ',', @{$row_aryref}; + my $schema = $data->{Database}->{Statement}; + $schema =~ s/^[^\(]+\((.+)\)$/$1/s; + $schema = $stmt->schema_str if $stmt->can('schema_str'); + $self->{hash}->{"_metadata \0"} = "<dbd_metadata>" + . "<schema>$schema</schema>" + . "<col_names>$col_names</col_names>" + . "</dbd_metadata>" + ; +} + +# fetch_one_row, delete_one_row, update_one_row +# are optimized for hash-style lookup without looping; +# if you don't need them, omit them, they're optional +# but, in that case you may need to define +# truncate() and seek(), see below +# +sub fetch_one_row ($$;$) { + my($self,$key_only,$key) = @_; + return $self->{col_names}->[0] if $key_only; + return undef unless exists $self->{hash}->{$key}; + my $val = $self->{hash}->{$key}; + $val = (ref($val)eq'ARRAY') ? $val : [$val]; + my $row = [$key, @$val]; + return @$row if wantarray; + return $row; +} +sub delete_one_row ($$$) { + my($self,$data,$aryref) = @_; + delete $self->{hash}->{$aryref->[0]}; +} +sub update_one_row ($$$) { + my($self,$data,$aryref) = @_; + my $key = shift @$aryref; + return undef unless defined $key; + my $row = (ref($aryref)eq'ARRAY') ? $aryref : [$aryref]; + if ( $self->{mldbm} ) { + $self->{hash}->{$key}= $row; + } + else { + $self->{hash}->{$key}=$row->[0]; + } +} + +# you may not need to explicitly DESTROY the ::Table +# put cleanup code to run when the execute is done +# +sub DESTROY ($) { + my $self=shift; + untie %{$self->{hash}} if $self->{hash}; + # release the flock on the lock file + $self->{lock_fh}->close if !$self->{nolock} and $self->{lock_fh}; +} + +# truncate() and seek() must be defined to satisfy DBI::SQL::Nano +# *IF* you define the *_one_row methods above, truncate() and +# seek() can be empty or you can use them without actually +# truncating or seeking anything but if you don't define the +# *_one_row methods, you may need to define these + +# if you need to do something after a series of +# deletes or updates, you can put it in truncate() +# which is called at the end of executing +# +sub truncate ($$) { + my($self,$data) = @_; + 1; +} + +# seek() is only needed if you use IO::File +# though it could be used for other non-file operations +# that you need to do before "writes" or truncate() +# +sub seek ($$$$) { + my($self, $data, $pos, $whence) = @_; +} + +# Th, th, th, that's all folks! See DBD::File and DBD::CSV for other +# examples of creating pure perl DBDs. I hope this helped. +# Now it's time to go forth and create your own DBD! +# Remember to check in with dbi-dev@perl.org before you get too far. +# We may be able to make suggestions or point you to other related +# projects. + +1; +__END__ + +=pod + +=head1 NAME + +DBD::DBM - a DBI driver for DBM & MLDBM files + +=head1 SYNOPSIS + + use DBI; + $dbh = DBI->connect('dbi:DBM:'); # defaults to SDBM_File + $dbh = DBI->connect('DBI:DBM(RaiseError=1):'); # defaults to SDBM_File + $dbh = DBI->connect('dbi:DBM:type=GDBM_File'); # defaults to GDBM_File + $dbh = DBI->connect('dbi:DBM:mldbm=Storable'); # MLDBM with SDBM_File + # and Storable + +or + + $dbh = DBI->connect('dbi:DBM:', undef, undef); + $dbh = DBI->connect('dbi:DBM:', undef, undef, { dbm_type => 'ODBM_File' }); + +and other variations on connect() as shown in the DBI docs and with +the dbm_ attributes shown below + +... and then use standard DBI prepare, execute, fetch, placeholders, etc., +see L<QUICK START> for an example + +=head1 DESCRIPTION + +DBD::DBM is a database management sytem that can work right out of the box. If you have a standard installation of Perl and a standard installation of DBI, you can begin creating, accessing, and modifying database tables without any further installation. You can also add some other modules to it for more robust capabilities if you wish. + +The module uses a DBM file storage layer. DBM file storage is common on many platforms and files can be created with it in many languges. That means that, in addition to creating files with DBI/SQL, you can also use DBI/SQL to access and modify files created by other DBM modules and programs. You can also use those programs to access files created with DBD::DBM. + +DBM files are stored in binary format optimized for quick retrieval when using a key field. That optimization can be used advantageously to make DBD::DBM SQL operations that use key fields very fast. There are several different "flavors" of DBM - different storage formats supported by different sorts of perl modules such as SDBM_File and MLDBM. This module supports all of the flavors that perl supports and, when used with MLDBM, supports tables with any number of columns and insertion of Perl objects into tables. + +DBD::DBM has been tested with the following DBM types: SDBM_File, NDBM_File, ODBM_File, GDBM_File, DB_File, BerekeleyDB. Each type was tested both with and without MLDBM. + +=head1 QUICK START + +DBD::DBM operates like all other DBD drivers - it's basic syntax and operation is specified by DBI. If you're not familiar with DBI, you should start by reading L<DBI> and the documents it points to and then come back and read this file. If you are familiar with DBI, you already know most of what you need to know to operate this module. Just jump in and create a test script something like the one shown below. + +You should be aware that there are several options for the SQL engine underlying DBD::DBM, see L<Supported SQL syntax>. There are also many options for DBM support, see especially the section on L<Adding multi-column support with MLDBM>. + +But here's a sample to get you started. + + use DBI; + my $dbh = DBI->connect('dbi:DBM:'); + $dbh->{RaiseError} = 1; + for my $sql( split /;\n+/," + CREATE TABLE user ( user_name TEXT, phone TEXT ); + INSERT INTO user VALUES ('Fred Bloggs','233-7777'); + INSERT INTO user VALUES ('Sanjay Patel','777-3333'); + INSERT INTO user VALUES ('Junk','xxx-xxxx'); + DELETE FROM user WHERE user_name = 'Junk'; + UPDATE user SET phone = '999-4444' WHERE user_name = 'Sanjay Patel'; + SELECT * FROM user + "){ + my $sth = $dbh->prepare($sql); + $sth->execute; + $sth->dump_results if $sth->{NUM_OF_FIELDS}; + } + $dbh->disconnect; + +=head1 USAGE + +=head2 Specifiying Files and Directories + +DBD::DBM will automatically supply an appropriate file extension for the type of DBM you are using. For example, if you use SDBM_File, a table called "fruit" will be stored in two files called "fruit.pag" and "fruit.dir". You should I<never> specify the file extensions in your SQL statements. + +However, I am not aware (and therefore DBD::DBM is not aware) of all possible extensions for various DBM types. If your DBM type uses an extension other than .pag and .dir, you should set the I<dbm_ext> attribute to the extension. B<And> you should write me with the name of the implementation and extension so I can add it to DBD::DBM! Thanks in advance for that :-). + + $dbh = DBI->connect('dbi:DBM:ext=.db'); # .db extension is used + $dbh = DBI->connect('dbi:DBM:ext='); # no extension is used + +or + + $dbh->{dbm_ext}='.db'; # global setting + $dbh->{dbm_tables}->{'qux'}->{ext}='.db'; # setting for table 'qux' + +By default files are assumed to be in the current working directory. To have the module look in a different directory, specify the I<f_dir> attribute in either the connect string or by setting the database handle attribute. + +For example, this will look for the file /foo/bar/fruit (or /foo/bar/fruit.pag for DBM types that use that extension) + + my $dbh = DBI->connect('dbi:DBM:f_dir=/foo/bar'); + my $ary = $dbh->selectall_arrayref(q{ SELECT * FROM fruit }); + +And this will too: + + my $dbh = DBI->connect('dbi:DBM:'); + $dbh->{f_dir} = '/foo/bar'; + my $ary = $dbh->selectall_arrayref(q{ SELECT x FROM fruit }); + +You can also use delimited identifiers to specify paths directly in SQL statements. This looks in the same place as the two examples above but without setting I<f_dir>: + + my $dbh = DBI->connect('dbi:DBM:'); + my $ary = $dbh->selectall_arrayref(q{ + SELECT x FROM "/foo/bar/fruit" + }); + +If you have SQL::Statement installed, you can use table aliases: + + my $dbh = DBI->connect('dbi:DBM:'); + my $ary = $dbh->selectall_arrayref(q{ + SELECT f.x FROM "/foo/bar/fruit" AS f + }); + +See the L<GOTCHAS AND WARNINGS> for using DROP on tables. + +=head2 Table locking and flock() + +Table locking is accomplished using a lockfile which has the same name as the table's file but with the file extension '.lck' (or a lockfile extension that you suppy, see belwo). This file is created along with the table during a CREATE and removed during a DROP. Every time the table itself is opened, the lockfile is flocked(). For SELECT, this is an shared lock. For all other operations, it is an exclusive lock. + +Since the locking depends on flock(), it only works on operating systems that support flock(). In cases where flock() is not implemented, DBD::DBM will not complain, it will simply behave as if the flock() had occurred although no actual locking will happen. Read the documentation for flock() if you need to understand this. + +Even on those systems that do support flock(), the locking is only advisory - as is allways the case with flock(). This means that if some other program tries to access the table while DBD::DBM has the table locked, that other program will *succeed* at opening the table. DBD::DBM's locking only applies to DBD::DBM. An exception to this would be the situation in which you use a lockfile with the other program that has the same name as the lockfile used in DBD::DBM and that program also uses flock() on that lockfile. In that case, DBD::DBM and your other program will respect each other's locks. + +If you wish to use a lockfile extension other than '.lck', simply specify the dbm_lockfile attribute: + + $dbh = DBI->connect('dbi:DBM:lockfile=.foo'); + $dbh->{dbm_lockfile} = '.foo'; + $dbh->{dbm_tables}->{qux}->{lockfile} = '.foo'; + +If you wish to disable locking, set the dbm_lockfile equal to 0. + + $dbh = DBI->connect('dbi:DBM:lockfile=0'); + $dbh->{dbm_lockfile} = 0; + $dbh->{dbm_tables}->{qux}->{lockfile} = 0; + +=head2 Specifying the DBM type + +Each "flavor" of DBM stores its files in a different format and has different capabilities and different limitations. See L<AnyDBM_File> for a comparison of DBM types. + +By default, DBD::DBM uses the SDBM_File type of storage since SDBM_File comes with Perl itself. But if you have other types of DBM storage available, you can use any of them with DBD::DBM also. + +You can specify the DBM type using the "dbm_type" attribute which can be set in the connection string or with the $dbh->{dbm_type} attribute for global settings or with the $dbh->{dbm_tables}->{$table_name}->{type} attribute for per-table settings in cases where a single script is accessing more than one kind of DBM file. + +In the connection string, just set type=TYPENAME where TYPENAME is any DBM type such as GDBM_File, DB_File, etc. Do I<not> use MLDBM as your dbm_type, that is set differently, see below. + + my $dbh=DBI->connect('dbi:DBM:'); # uses the default SDBM_File + my $dbh=DBI->connect('dbi:DBM:type=GDBM_File'); # uses the GDBM_File + +You can also use $dbh->{dbm_type} to set global DBM type: + + $dbh->{dbm_type} = 'GDBM_File'; # set the global DBM type + print $dbh->{dbm_type}; # display the global DBM type + +If you are going to have several tables in your script that come from different DBM types, you can use the $dbh->{dbm_tables} hash to store different settings for the various tables. You can even use this to perform joins on files that have completely different storage mechanisms. + + my $dbh->('dbi:DBM:type=GDBM_File'); + # + # sets global default of GDBM_File + + my $dbh->{dbm_tables}->{foo}->{type} = 'DB_File'; + # + # over-rides the global setting, but only for the table called "foo" + + print $dbh->{dbm_tables}->{foo}->{type}; + # + # prints the dbm_type for the table "foo" + +=head2 Adding multi-column support with MLDBM + +Most of the DBM types only support two columns. However a CPAN module called MLDBM overcomes this limitation by allowing more than two columns. It does this by serializing the data - basically it puts a reference to an array into the second column. It can also put almost any kind of Perl object or even Perl coderefs into columns. + +If you want more than two columns, you must install MLDBM. It's available for many platforms and is easy to install. + +MLDBM can use three different modules to serialize the column - Data::Dumper, Storable, and FreezeThaw. Data::Dumper is the default, Storable is the fastest. MLDBM can also make use of user-defined serialization methods. All of this is available to you through DBD::DBM with just one attribute setting. + +To use MLDBM with DBD::DBM, you need to set the dbm_mldbm attribute to the name of the serialization module. + +Some examples: + + $dbh=DBI->connect('dbi:DBM:mldbm=Storable'); # use MLDBM with Storable + $dbh=DBI->connect( + 'dbi:DBM:mldbm=MySerializer' # use MLDBM with a user defined module + ); + $dbh->{dbm_mldbm} = 'MySerializer'; # same as above + print $dbh->{dbm_mldbm} # show the MLDBM serializer + $dbh->{dbm_tables}->{foo}->{mldbm}='Data::Dumper'; # set Data::Dumper for table "foo" + print $dbh->{dbm_tables}->{foo}->{mldbm}; # show serializer for table "foo" + +MLDBM works on top of other DBM modules so you can also set a DBM type along with setting dbm_mldbm. The examples above would default to using SDBM_File with MLDBM. If you wanted GDBM_File instead, here's how: + + $dbh = DBI->connect('dbi:DBM:type=GDBM_File;mldbm=Storable'); + # + # uses GDBM_File with MLDBM and Storable + +SDBM_File, the default file type is quite limited, so if you are going to use MLDBM, you should probably use a different type, see L<AnyDBM_File>. + +See below for some L<GOTCHAS AND WARNINGS> about MLDBM. + +=head2 Support for Berkeley DB + +The Berkeley DB storage type is supported through two different Perl modules - DB_File (which supports only features in old versions of Berkeley DB) and BerkeleyDB (which supports all versions). DBD::DBM supports specifying either "DB_File" or "BerkeleyDB" as a I<dbm_type>, with or without MLDBM support. + +The "BerkeleyDB" dbm_type is experimental and its interface is likely to chagne. It currently defaults to BerkeleyDB::Hash and does not currently support ::Btree or ::Recno. + +With BerkeleyDB, you can specify initialization flags by setting them in your script like this: + + my $dbh = DBI->connect('dbi:DBM:type=BerkeleyDB;mldbm=Storable'); + use BerkeleyDB; + my $env = new BerkeleyDB::Env -Home => $dir; # and/or other Env flags + $dbh->{dbm_berkeley_flags} = { + 'DB_CREATE' => DB_CREATE # pass in constants + , 'DB_RDONLY' => DB_RDONLY # pass in constants + , '-Cachesize' => 1000 # set a ::Hash flag + , '-Env' => $env # pass in an environment + }; + +Do I<not> set the -Flags or -Filename flags, those are determined by the SQL (e.g. -Flags => DB_RDONLY is set automatically when you issue a SELECT statement). + +Time has not permitted me to provide support in this release of DBD::DBM for further Berkeley DB features such as transactions, concurrency, locking, etc. I will be working on these in the future and would value suggestions, patches, etc. + +See L<DB_File> and L<BerkeleyDB> for further details. + +=head2 Supported SQL syntax + +DBD::DBM uses a subset of SQL. The robustness of that subset depends on what other modules you have installed. Both options support basic SQL operations including CREATE TABLE, DROP TABLE, INSERT, DELETE, UPDATE, and SELECT. + +B<Option #1:> By default, this module inherits its SQL support from DBI::SQL::Nano that comes with DBI. Nano is, as its name implies, a *very* small SQL engine. Although limited in scope, it is faster than option #2 for some operations. See L<DBI::SQL::Nano> for a description of the SQL it supports and comparisons of it with option #2. + +B<Option #2:> If you install the pure Perl CPAN module SQL::Statement, DBD::DBM will use it instead of Nano. This adds support for table aliases, for functions, for joins, and much more. If you're going to use DBD::DBM for anything other than very simple tables and queries, you should install SQL::Statement. You don't have to change DBD::DBM or your scripts in any way, simply installing SQL::Statement will give you the more robust SQL capabilities without breaking scripts written for DBI::SQL::Nano. See L<SQL::Statement> for a description of the SQL it supports. + +To find out which SQL module is working in a given script, you can use the dbm_versions() method or, if you don't need the full output and version numbers, just do this: + + print $dbh->{sql_handler}; + +That will print out either "SQL::Statement" or "DBI::SQL::Nano". + +=head2 Optimizing use of key fields + +Most "flavors" of DBM have only two physical columns (but can contain multiple logical columns as explained below). They work similarly to a Perl hash with the first column serving as the key. Like a Perl hash, DBM files permit you to do quick lookups by specifying the key and thus avoid looping through all records. Also like a Perl hash, the keys must be unique. It is impossible to create two records with the same key. To put this all more simply and in SQL terms, the key column functions as the PRIMARY KEY. + +In DBD::DBM, you can take advantage of the speed of keyed lookups by using a WHERE clause with a single equal comparison on the key field. For example, the following SQL statements are optimized for keyed lookup: + + CREATE TABLE user ( user_name TEXT, phone TEXT); + INSERT INTO user VALUES ('Fred Bloggs','233-7777'); + # ... many more inserts + SELECT phone FROM user WHERE user_name='Fred Bloggs'; + +The "user_name" column is the key column since it is the first column. The SELECT statement uses the key column in a single equal comparision - "user_name='Fred Bloggs' - so the search will find it very quickly without having to loop through however many names were inserted into the table. + +In contrast, thes searches on the same table are not optimized: + + 1. SELECT phone FROM user WHERE user_name < 'Fred'; + 2. SELECT user_name FROM user WHERE phone = '233-7777'; + +In #1, the operation uses a less-than (<) comparison rather than an equals comparison, so it will not be optimized for key searching. In #2, the key field "user_name" is not specified in the WHERE clause, and therefore the search will need to loop through all rows to find the desired result. + +=head2 Specifying Column Names + +DBM files don't have a standard way to store column names. DBD::DBM gets around this issue with a DBD::DBM specific way of storing the column names. B<If you are working only with DBD::DBM and not using files created by or accessed with other DBM programs, you can ignore this section.> + +DBD::DBM stores column names as a row in the file with the key I<_metadata \0>. So this code + + my $dbh = DBI->connect('dbi:DBM:'); + $dbh->do("CREATE TABLE baz (foo CHAR(10), bar INTEGER)"); + $dbh->do("INSERT INTO baz (foo,bar) VALUES ('zippy',1)"); + +Will create a file that has a structure something like this: + + _metadata \0 | foo,bar + zippy | 1 + +The next time you access this table with DBD::DBM, it will treat the _metadata row as a header rather than as data and will pull the column names from there. However, if you access the file with something other than DBD::DBM, the row will be treated as a regular data row. + +If you do not want the column names stored as a data row in the table you can set the I<dbm_store_metadata> attribute to 0. + + my $dbh = DBI->connect('dbi:DBM:store_metadata=0'); + +or + + $dbh->{dbm_store_metadata} = 0; + +or, for per-table setting + + $dbh->{dbm_tables}->{qux}->{store_metadata} = 0; + +By default, DBD::DBM assumes that you have two columns named "k" and "v" (short for "key" and "value"). So if you have I<dbm_store_metadata> set to 1 and you want to use alternate column names, you need to specify the column names like this: + + my $dbh = DBI->connect('dbi:DBM:store_metadata=0;cols=foo,bar'); + +or + + $dbh->{dbm_store_metadata} = 0; + $dbh->{dbm_cols} = 'foo,bar'; + +To set the column names on per-table basis, do this: + + $dbh->{dbm_tables}->{qux}->{store_metadata} = 0; + $dbh->{dbm_tables}->{qux}->{cols} = 'foo,bar'; + # + # sets the column names only for table "qux" + +If you have a file that was created by another DBM program or created with I<dbm_store_metadata> set to zero and you want to convert it to using DBD::DBM's column name storage, just use one of the methods above to name the columns but *without* specifying I<dbm_store_metadata> as zero. You only have to do that once - thereafter you can get by without setting either I<dbm_store_metadata> or setting I<dbm_cols> because the names will be stored in the file. + +=head2 Statement handle ($sth) attributes and methods + +Most statement handle attributes such as NAME, NUM_OF_FIELDS, etc. are available only after an execute. The same is true of $sth->rows which is available after the execute but does I<not> require a fetch. + +=head2 The $dbh->dbm_versions() method + +The private method dbm_versions() presents a summary of what other modules are being used at any given time. DBD::DBM can work with or without many other modules - it can use either SQL::Statement or DBI::SQL::Nano as its SQL engine, it can be run with DBI or DBI::PurePerl, it can use many kinds of DBM modules, and many kinds of serializers when run with MLDBM. The dbm_versions() method reports on all of that and more. + + print $dbh->dbm_versions; # displays global settings + print $dbh->dbm_versions($table_name); # displays per table settings + +An important thing to note about this method is that when called with no arguments, it displays the *global* settings. If you over-ride these by setting per-table attributes, these will I<not> be shown unless you specifiy a table name as an argument to the method call. + +=head2 Storing Objects + +If you are using MLDBM, you can use DBD::DBM to take advantage of its serializing abilities to serialize any Perl object that MLDBM can handle. To store objects in columns, you should (but don't absolutely need to) declare it as a column of type BLOB (the type is *currently* ignored by the SQL engine, but heh, it's good form). + +You *must* use placeholders to insert or refer to the data. + +=head1 GOTCHAS AND WARNINGS + +Using the SQL DROP command will remove any file that has the name specified in the command with either '.pag' or '.dir' or your {dbm_ext} appended to it. So +this be dangerous if you aren't sure what file it refers to: + + $dbh->do(qq{DROP TABLE "/path/to/any/file"}); + +Each DBM type has limitations. SDBM_File, for example, can only store values of less than 1,000 characters. *You* as the script author must ensure that you don't exceed those bounds. If you try to insert a value that is bigger than the DBM can store, the results will be unpredictable. See the documentation for whatever DBM you are using for details. + +Different DBM implementations return records in different orders. That means that you can I<not> depend on the order of records unless you use an ORDER BY statement. DBI::SQL::Nano does not currently support ORDER BY (though it may soon) so if you need ordering, you'll have to install SQL::Statement. + +DBM data files are platform-specific. To move them from one platform to another, you'll need to do something along the lines of dumping your data to CSV on platform #1 and then dumping from CSV to DBM on platform #2. DBD::AnyData and DBD::CSV can help with that. There may also be DBM conversion tools for your platforms which would probably be quickest. + +When using MLDBM, there is a very powerful serializer - it will allow you to store Perl code or objects in database columns. When these get de-serialized, they may be evaled - in other words MLDBM (or actually Data::Dumper when used by MLDBM) may take the values and try to execute them in Perl. Obviously, this can present dangers, so if you don't know what's in a file, be careful before you access it with MLDBM turned on! + +See the entire section on L<Table locking and flock()> for gotchas and warnings about the use of flock(). + +=head1 GETTING HELP, MAKING SUGGESTIONS, AND REPORTING BUGS + +If you need help installing or using DBD::DBM, please write to the DBI users mailing list at dbi-users@perl.org or to the comp.lang.perl.modules newsgroup on usenet. I'm afraid I can't always answer these kinds of questions quickly and there are many on the mailing list or in the newsgroup who can. + +If you have suggestions, ideas for improvements, or bugs to report, please write me directly at the email shown below. + +When reporting bugs, please send the output of $dbh->dbm_versions($table) for a table that exhibits the bug and, if possible, as small a sample as you can make of the code that produces the bug. And of course, patches are welcome too :-). + +=head1 ACKNOWLEDGEMENTS + +Many, many thanks to Tim Bunce for prodding me to write this, and for copious, wise, and patient suggestions all along the way. + +=head1 AUTHOR AND COPYRIGHT + +This module is written and maintained by + +Jeff Zucker < jzucker AT cpan.org > + +Copyright (c) 2004 by Jeff Zucker, all rights reserved. + +You may freely distribute and/or modify this module under the terms of either the GNU General Public License (GPL) or the Artistic License, as specified in the Perl README file. + +=head1 SEE ALSO + +L<DBI>, L<SQL::Statement>, L<DBI::SQL::Nano>, L<AnyDBM_File>, L<MLDBM> + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/ExampleP.pm b/Master/tlpkg/tlperl/lib/DBD/ExampleP.pm new file mode 100755 index 00000000000..a4159d7df4b --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/ExampleP.pm @@ -0,0 +1,430 @@ +{ + package DBD::ExampleP; + + use Symbol; + + use DBI qw(:sql_types); + + @EXPORT = qw(); # Do NOT @EXPORT anything. + $VERSION = sprintf("12.%06d", q$Revision: 10007 $ =~ /(\d+)/o); + + +# $Id: ExampleP.pm 10007 2007-09-27 20:53:04Z timbo $ +# +# Copyright (c) 1994,1997,1998 Tim Bunce +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + + @statnames = qw(dev ino mode nlink + uid gid rdev size + atime mtime ctime + blksize blocks name); + @statnames{@statnames} = (0 .. @statnames-1); + + @stattypes = (SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, + SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, + SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, + SQL_INTEGER, SQL_INTEGER, SQL_VARCHAR); + @stattypes{@statnames} = @stattypes; + @statprec = ((10) x (@statnames-1), 1024); + @statprec{@statnames} = @statprec; + die unless @statnames == @stattypes; + die unless @statprec == @stattypes; + + $drh = undef; # holds driver handle once initialised + #$gensym = "SYM000"; # used by st::execute() for filehandles + + sub driver{ + return $drh if $drh; + my($class, $attr) = @_; + $class .= "::dr"; + ($drh) = DBI::_new_drh($class, { + 'Name' => 'ExampleP', + 'Version' => $VERSION, + 'Attribution' => 'DBD Example Perl stub by Tim Bunce', + }, ['example implementors private data '.__PACKAGE__]); + $drh; + } + + sub CLONE { + undef $drh; + } +} + + +{ package DBD::ExampleP::dr; # ====== DRIVER ====== + $imp_data_size = 0; + use strict; + + sub connect { # normally overridden, but a handy default + my($drh, $dbname, $user, $auth)= @_; + my ($outer, $dbh) = DBI::_new_dbh($drh, { + Name => $dbname, + examplep_private_dbh_attrib => 42, # an example, for testing + }); + $dbh->{examplep_get_info} = { + 29 => '"', # SQL_IDENTIFIER_QUOTE_CHAR + 41 => '.', # SQL_CATALOG_NAME_SEPARATOR + 114 => 1, # SQL_CATALOG_LOCATION + }; + #$dbh->{Name} = $dbname; + $dbh->STORE('Active', 1); + return $outer; + } + + sub data_sources { + return ("dbi:ExampleP:dir=."); # possibly usefully meaningless + } + +} + + +{ package DBD::ExampleP::db; # ====== DATABASE ====== + $imp_data_size = 0; + use strict; + + sub prepare { + my($dbh, $statement)= @_; + my @fields; + my($fields, $dir) = $statement =~ m/^\s*select\s+(.*?)\s+from\s+(\S*)/i; + + if (defined $fields and defined $dir) { + @fields = ($fields eq '*') + ? keys %DBD::ExampleP::statnames + : split(/\s*,\s*/, $fields); + } + else { + return $dbh->set_err($DBI::stderr, "Syntax error in select statement (\"$statement\")") + unless $statement =~ m/^\s*set\s+/; + # the SET syntax is just a hack so the ExampleP driver can + # be used to test non-select statements. + # Now we have DBI::DBM etc., ExampleP should be deprecated + } + + my ($outer, $sth) = DBI::_new_sth($dbh, { + 'Statement' => $statement, + examplep_private_sth_attrib => 24, # an example, for testing + }, ['example implementors private data '.__PACKAGE__]); + + my @bad = map { + defined $DBD::ExampleP::statnames{$_} ? () : $_ + } @fields; + return $dbh->set_err($DBI::stderr, "Unknown field names: @bad") + if @bad; + + $outer->STORE('NUM_OF_FIELDS' => scalar(@fields)); + + $sth->{examplep_ex_dir} = $dir if defined($dir) && $dir !~ /\?/; + $outer->STORE('NUM_OF_PARAMS' => ($dir) ? $dir =~ tr/?/?/ : 0); + + if (@fields) { + $outer->STORE('NAME' => \@fields); + $outer->STORE('NULLABLE' => [ (0) x @fields ]); + $outer->STORE('SCALE' => [ (0) x @fields ]); + } + + $outer; + } + + + sub table_info { + my $dbh = shift; + my ($catalog, $schema, $table, $type) = @_; + + my @types = split(/["']*,["']/, $type || 'TABLE'); + my %types = map { $_=>$_ } @types; + + # Return a list of all subdirectories + my $dh = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym; + my $haveFileSpec = eval { require File::Spec }; + my $dir = $catalog || ($haveFileSpec ? File::Spec->curdir() : "."); + my @list; + if ($types{VIEW}) { # for use by test harness + push @list, [ undef, "schema", "table", 'VIEW', undef ]; + push @list, [ undef, "sch-ema", "table", 'VIEW', undef ]; + push @list, [ undef, "schema", "ta-ble", 'VIEW', undef ]; + push @list, [ undef, "sch ema", "table", 'VIEW', undef ]; + push @list, [ undef, "schema", "ta ble", 'VIEW', undef ]; + } + if ($types{TABLE}) { + no strict 'refs'; + opendir($dh, $dir) + or return $dbh->set_err(int($!), "Failed to open directory $dir: $!"); + while (defined(my $item = readdir($dh))) { + if ($^O eq 'VMS') { + # if on VMS then avoid warnings from catdir if you use a file + # (not a dir) as the item below + next if $item !~ /\.dir$/oi; + } + my $file = ($haveFileSpec) ? File::Spec->catdir($dir,$item) : $item; + next unless -d $file; + my($dev, $ino, $mode, $nlink, $uid) = lstat($file); + my $pwnam = undef; # eval { scalar(getpwnam($uid)) } || $uid; + push @list, [ $dir, $pwnam, $item, 'TABLE', undef ]; + } + close($dh); + } + # We would like to simply do a DBI->connect() here. However, + # this is wrong if we are in a subclass like DBI::ProxyServer. + $dbh->{'dbd_sponge_dbh'} ||= DBI->connect("DBI:Sponge:", '','') + or return $dbh->set_err($DBI::err, + "Failed to connect to DBI::Sponge: $DBI::errstr"); + + my $attr = { + 'rows' => \@list, + 'NUM_OF_FIELDS' => 5, + 'NAME' => ['TABLE_CAT', 'TABLE_SCHEM', 'TABLE_NAME', + 'TABLE_TYPE', 'REMARKS'], + 'TYPE' => [DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(), + DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR() ], + 'NULLABLE' => [1, 1, 1, 1, 1] + }; + my $sdbh = $dbh->{'dbd_sponge_dbh'}; + my $sth = $sdbh->prepare("SHOW TABLES FROM $dir", $attr) + or return $dbh->set_err($sdbh->err(), $sdbh->errstr()); + $sth; + } + + + sub type_info_all { + my ($dbh) = @_; + my $ti = [ + { TYPE_NAME => 0, + DATA_TYPE => 1, + COLUMN_SIZE => 2, + LITERAL_PREFIX => 3, + LITERAL_SUFFIX => 4, + CREATE_PARAMS => 5, + NULLABLE => 6, + CASE_SENSITIVE => 7, + SEARCHABLE => 8, + UNSIGNED_ATTRIBUTE=> 9, + FIXED_PREC_SCALE=> 10, + AUTO_UNIQUE_VALUE => 11, + LOCAL_TYPE_NAME => 12, + MINIMUM_SCALE => 13, + MAXIMUM_SCALE => 14, + }, + [ 'VARCHAR', DBI::SQL_VARCHAR, 1024, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ], + [ 'INTEGER', DBI::SQL_INTEGER, 10, "","", undef, 0, 0, 1, 0, 0,0,undef,0,0 ], + ]; + return $ti; + } + + + sub ping { + (shift->FETCH('Active')) ? 2 : 0; # the value 2 is checked for by t/80proxy.t + } + + + sub disconnect { + shift->STORE(Active => 0); + return 1; + } + + + sub get_info { + my ($dbh, $info_type) = @_; + return $dbh->{examplep_get_info}->{$info_type}; + } + + + sub FETCH { + my ($dbh, $attrib) = @_; + # In reality this would interrogate the database engine to + # either return dynamic values that cannot be precomputed + # or fetch and cache attribute values too expensive to prefetch. + # else pass up to DBI to handle + return $INC{"DBD/ExampleP.pm"} if $attrib eq 'example_driver_path'; + return $dbh->SUPER::FETCH($attrib); + } + + + sub STORE { + my ($dbh, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + if ($attrib eq 'AutoCommit') { + # convert AutoCommit values to magic ones to let DBI + # know that the driver has 'handled' the AutoCommit attribute + $value = ($value) ? -901 : -900; + } + return $dbh->{$attrib} = $value if $attrib =~ /^examplep_/; + return $dbh->SUPER::STORE($attrib, $value); + } + + sub DESTROY { + my $dbh = shift; + $dbh->disconnect if $dbh->FETCH('Active'); + undef + } + + + # This is an example to demonstrate the use of driver-specific + # methods via $dbh->func(). + # Use it as follows: + # my @tables = $dbh->func($re, 'examplep_tables'); + # + # Returns all the tables that match the regular expression $re. + sub examplep_tables { + my $dbh = shift; my $re = shift; + grep { $_ =~ /$re/ } $dbh->tables(); + } + + sub parse_trace_flag { + my ($h, $name) = @_; + return 0x01000000 if $name eq 'foo'; + return 0x02000000 if $name eq 'bar'; + return 0x04000000 if $name eq 'baz'; + return 0x08000000 if $name eq 'boo'; + return 0x10000000 if $name eq 'bop'; + return $h->SUPER::parse_trace_flag($name); + } + + sub private_attribute_info { + return { example_driver_path => undef }; + } +} + + +{ package DBD::ExampleP::st; # ====== STATEMENT ====== + $imp_data_size = 0; + use strict; no strict 'refs'; # cause problems with filehandles + + my $haveFileSpec = eval { require File::Spec }; + + sub bind_param { + my($sth, $param, $value, $attribs) = @_; + $sth->{'dbd_param'}->[$param-1] = $value; + return 1; + } + + + sub execute { + my($sth, @dir) = @_; + my $dir; + + if (@dir) { + $sth->bind_param($_, $dir[$_-1]) or return + foreach (1..@dir); + } + + my $dbd_param = $sth->{'dbd_param'} || []; + return $sth->set_err(2, @$dbd_param." values bound when $sth->{NUM_OF_PARAMS} expected") + unless @$dbd_param == $sth->{NUM_OF_PARAMS}; + + return 0 unless $sth->{NUM_OF_FIELDS}; # not a select + + $dir = $dbd_param->[0] || $sth->{examplep_ex_dir}; + return $sth->set_err(2, "No bind parameter supplied") + unless defined $dir; + + $sth->finish; + + # + # If the users asks for directory "long_list_4532", then we fake a + # directory with files "file4351", "file4350", ..., "file0". + # This is a special case used for testing, especially DBD::Proxy. + # + if ($dir =~ /^long_list_(\d+)$/) { + $sth->{dbd_dir} = [ $1 ]; # array ref indicates special mode + $sth->{dbd_datahandle} = undef; + } + else { + $sth->{dbd_dir} = $dir; + my $sym = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym; + opendir($sym, $dir) + or return $sth->set_err(2, "opendir($dir): $!"); + $sth->{dbd_datahandle} = $sym; + } + $sth->STORE(Active => 1); + return 1; + } + + + sub fetch { + my $sth = shift; + my $dir = $sth->{dbd_dir}; + my %s; + + if (ref $dir) { # special fake-data test mode + my $num = $dir->[0]--; + unless ($num > 0) { + $sth->finish(); + return; + } + my $time = time; + @s{@DBD::ExampleP::statnames} = + ( 2051, 1000+$num, 0644, 2, $>, $), 0, 1024, + $time, $time, $time, 512, 2, "file$num") + } + else { # normal mode + my $dh = $sth->{dbd_datahandle} + or return $sth->set_err($DBI::stderr, "fetch without successful execute"); + my $f = readdir($dh); + unless ($f) { + $sth->finish; + return; + } + # untaint $f so that we can use this for DBI taint tests + ($f) = ($f =~ m/^(.*)$/); + my $file = $haveFileSpec + ? File::Spec->catfile($dir, $f) : "$dir/$f"; + # put in all the data fields + @s{ @DBD::ExampleP::statnames } = (lstat($file), $f); + } + + # return just what fields the query asks for + my @new = @s{ @{$sth->{NAME}} }; + + return $sth->_set_fbav(\@new); + } + *fetchrow_arrayref = \&fetch; + + + sub finish { + my $sth = shift; + closedir($sth->{dbd_datahandle}) if $sth->{dbd_datahandle}; + $sth->{dbd_datahandle} = undef; + $sth->{dbd_dir} = undef; + $sth->SUPER::finish(); + return 1; + } + + + sub FETCH { + my ($sth, $attrib) = @_; + # In reality this would interrogate the database engine to + # either return dynamic values that cannot be precomputed + # or fetch and cache attribute values too expensive to prefetch. + if ($attrib eq 'TYPE'){ + return [ @DBD::ExampleP::stattypes{ @{ $sth->FETCH(q{NAME_lc}) } } ]; + } + elsif ($attrib eq 'PRECISION'){ + return [ @DBD::ExampleP::statprec{ @{ $sth->FETCH(q{NAME_lc}) } } ]; + } + elsif ($attrib eq 'ParamValues') { + my $dbd_param = $sth->{dbd_param} || []; + my %pv = map { $_ => $dbd_param->[$_-1] } 1..@$dbd_param; + return \%pv; + } + # else pass up to DBI to handle + return $sth->SUPER::FETCH($attrib); + } + + + sub STORE { + my ($sth, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + return $sth->{$attrib} = $value + if $attrib eq 'NAME' or $attrib eq 'NULLABLE' or $attrib eq 'SCALE' or $attrib eq 'PRECISION'; + return $sth->SUPER::STORE($attrib, $value); + } + + *parse_trace_flag = \&DBD::ExampleP::db::parse_trace_flag; +} + +1; +# vim: sw=4:ts=8 diff --git a/Master/tlpkg/tlperl/lib/DBD/File.pm b/Master/tlpkg/tlperl/lib/DBD/File.pm new file mode 100755 index 00000000000..fe72e8812d4 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/File.pm @@ -0,0 +1,989 @@ +# -*- perl -*- +# +# DBD::File - A base class for implementing DBI drivers that +# act on plain files +# +# This module is currently maintained by +# +# H.Merijn Brand & Jens Rehsack +# +# The original author is Jochen Wiedmann. +# +# Copyright (C) 2009 by H.Merijn Brand & Jens Rehsack +# Copyright (C) 2004 by Jeff Zucker +# Copyright (C) 1998 by Jochen Wiedmann +# +# All rights reserved. +# +# You may distribute this module under the terms of either the GNU +# General Public License or the Artistic License, as specified in +# the Perl README file. + +require 5.005; + +use strict; + +use DBI (); +require DBI::SQL::Nano; +require File::Spec; + +package DBD::File; + +use strict; + +use vars qw( @ISA $VERSION $drh $valid_attrs ); + +$VERSION = "0.37"; + +$drh = undef; # holds driver handle(s) once initialised + +sub driver ($;$) +{ + my ($class, $attr) = @_; + + # Drivers typically use a singleton object for the $drh + # We use a hash here to have one singleton per subclass. + # (Otherwise DBD::CSV and DBD::DBM, for example, would + # share the same driver object which would cause problems.) + # An alternative would be not not cache the $drh here at all + # and require that subclasses do that. Subclasses should do + # their own caching, so caching here just provides extra safety. + $drh->{$class} and return $drh->{$class}; + + DBI->setup_driver ("DBD::File"); # only needed once but harmless to repeat + $attr ||= {}; + { no strict "refs"; + unless ($attr->{Attribution}) { + $class eq "DBD::File" and $attr->{Attribution} = "$class by Jeff Zucker"; + $attr->{Attribution} ||= ${$class . "::ATTRIBUTION"} || + "oops the author of $class forgot to define this"; + } + $attr->{Version} ||= ${$class . "::VERSION"}; + $attr->{Name} or ($attr->{Name} = $class) =~ s/^DBD\:\://; + } + + $drh->{$class} = DBI::_new_drh ($class . "::dr", $attr); + $drh->{$class}->STORE (ShowErrorStatement => 1); + return $drh->{$class}; + } # driver + +sub CLONE +{ + undef $drh; + } # CLONE + +sub file2table +{ + my ($data, $dir, $file, $file_is_tab, $quoted) = @_; + + $file eq "." || $file eq ".." and return; + + my ($ext, $req) = ("", 0, 0); + if ($data->{f_ext}) { + ($ext, my $opt) = split m/\//, $data->{f_ext}; + if ($ext && $opt) { + $opt =~ m/r/i and $req = 1; + } + } + + (my $tbl = $file) =~ s/$ext$//i; + $file_is_tab and $file = "$tbl$ext"; + + # Fully Qualified File Name + my $fqfn; + unless ($quoted) { # table names are case insensitive in SQL + local *DIR; + opendir DIR, $dir; + my @f = grep { lc $_ eq lc $file } readdir DIR; + @f == 1 and $file = $f[0]; + } + $fqfn = File::Spec->catfile ($dir, $file); + + $file = $fqfn; + if ($ext) { + if ($req) { + # File extension required + $file =~ s/$ext$//i or return; + } + else { + # File extension optional, skip if file with extension exists + grep m/$ext$/i, glob "$fqfn.*" and return; + $file =~ s/$ext$//i; + } + } + + $data->{f_map}{$tbl} = $fqfn; + return $tbl; + } # file2table + +# ====== DRIVER ================================================================ + +package DBD::File::dr; + +use strict; + +$DBD::File::dr::imp_data_size = 0; + +sub connect ($$;$$$) +{ + my ($drh, $dbname, $user, $auth, $attr)= @_; + + # create a 'blank' dbh + my $this = DBI::_new_dbh ($drh, { + Name => $dbname, + USER => $user, + CURRENT_USER => $user, + }); + + if ($this) { + my ($var, $val); + $this->{f_dir} = File::Spec->curdir (); + $this->{f_ext} = ""; + $this->{f_map} = {}; + while (length $dbname) { + if ($dbname =~ s/^((?:[^\\;]|\\.)*?);//s) { + $var = $1; + } + else { + $var = $dbname; + $dbname = ""; + } + if ($var =~ m/^(.+?)=(.*)/s) { + $var = $1; + ($val = $2) =~ s/\\(.)/$1/g; + $this->{$var} = $val; + } + } + $this->{f_valid_attrs} = { + f_version => 1, # DBD::File version + f_dir => 1, # base directory + f_ext => 1, # file extension + f_schema => 1, # schema name + f_tables => 1, # base directory + }; + $this->{sql_valid_attrs} = { + sql_handler => 1, # Nano or S:S + sql_nano_version => 1, # Nano version + sql_statement_version => 1, # S:S version + }; + } + $this->STORE ("Active", 1); + return set_versions ($this); + } # connect + +sub set_versions +{ + my $this = shift; + $this->{f_version} = $DBD::File::VERSION; + for (qw( nano_version statement_version)) { + $this->{"sql_$_"} = $DBI::SQL::Nano::versions->{$_} || ""; + } + $this->{sql_handler} = $this->{sql_statement_version} + ? "SQL::Statement" + : "DBI::SQL::Nano"; + return $this; + } # set_versions + +sub data_sources ($;$) +{ + my ($drh, $attr) = @_; + my $dir = $attr && exists $attr->{f_dir} + ? $attr->{f_dir} + : File::Spec->curdir (); + my ($dirh) = Symbol::gensym (); + unless (opendir ($dirh, $dir)) { + $drh->set_err ($DBI::stderr, "Cannot open directory $dir: $!"); + return; + } + + my ($file, @dsns, %names, $driver); + if ($drh->{ImplementorClass} =~ m/^dbd\:\:([^\:]+)\:\:/i) { + $driver = $1; + } + else { + $driver = "File"; + } + + while (defined ($file = readdir ($dirh))) { + if ($^O eq "VMS") { + # if on VMS then avoid warnings from catdir if you use a file + # (not a dir) as the file below + $file !~ m/\.dir$/oi and next; + } + my $d = File::Spec->catdir ($dir, $file); + # allow current dir ... it can be a data_source too + $file ne File::Spec->updir () && -d $d and + push @dsns, "DBI:$driver:f_dir=$d"; + } + @dsns; + } # data_sources + +sub disconnect_all +{ + } # disconnect_all + +sub DESTROY +{ + undef; + } # DESTROY + +# ====== DATABASE ============================================================== + +package DBD::File::db; + +use strict; +use Carp; + +$DBD::File::db::imp_data_size = 0; + +sub ping +{ + return (shift->FETCH ("Active")) ? 1 : 0; + } # ping + +sub prepare ($$;@) +{ + my ($dbh, $statement, @attribs) = @_; + + # create a 'blank' sth + my $sth = DBI::_new_sth ($dbh, {Statement => $statement}); + + if ($sth) { + my $class = $sth->FETCH ("ImplementorClass"); + $class =~ s/::st$/::Statement/; + my $stmt; + + # if using SQL::Statement version > 1 + # cache the parser object if the DBD supports parser caching + # SQL::Nano and older SQL::Statements don't support this + + if ( $dbh->{sql_handler} eq "SQL::Statement" and + $dbh->{sql_statement_version} > 1) { + my $parser = $dbh->{csv_sql_parser_object}; + $parser ||= eval { $dbh->func ("csv_cache_sql_parser_object") }; + if ($@) { + $stmt = eval { $class->new ($statement) }; + } + else { + $stmt = eval { $class->new ($statement, $parser) }; + } + } + else { + $stmt = eval { $class->new ($statement) }; + } + if ($@) { + $dbh->set_err ($DBI::stderr, $@); + undef $sth; + } + else { + $sth->STORE ("f_stmt", $stmt); + $sth->STORE ("f_params", []); + $sth->STORE ("NUM_OF_PARAMS", scalar ($stmt->params ())); + } + } + $sth; + } # prepare + +sub csv_cache_sql_parser_object +{ + my $dbh = shift; + my $parser = { + dialect => "CSV", + RaiseError => $dbh->FETCH ("RaiseError"), + PrintError => $dbh->FETCH ("PrintError"), + }; + my $sql_flags = $dbh->FETCH ("sql_flags") || {}; + %$parser = (%$parser, %$sql_flags); + $parser = SQL::Parser->new ($parser->{dialect}, $parser); + $dbh->{csv_sql_parser_object} = $parser; + return $parser; + } # csv_cache_sql_parser_object + +sub disconnect ($) +{ + shift->STORE ("Active", 0); + 1; + } # disconnect + +sub FETCH ($$) +{ + my ($dbh, $attrib) = @_; + $attrib eq "AutoCommit" and + return 1; + + if ($attrib eq (lc $attrib)) { + # Driver private attributes are lower cased + + # Error-check for valid attributes + # not implemented yet, see STORE + # + return $dbh->{$attrib}; + } + # else pass up to DBI to handle + return $dbh->SUPER::FETCH ($attrib); + } # FETCH + +sub STORE ($$$) +{ + my ($dbh, $attrib, $value) = @_; + + if ($attrib eq "AutoCommit") { + $value and return 1; # is already set + croak "Can't disable AutoCommit"; + } + + if ($attrib eq lc $attrib) { + # Driver private attributes are lower cased + + # I'm not implementing this yet becuase other drivers may be + # setting f_ and sql_ attrs I don't know about + # I'll investigate and publicize warnings to DBD authors + # then implement this + + # return to implementor if not f_ or sql_ + # not implemented yet + # my $class = $dbh->FETCH ("ImplementorClass"); + # + # !$dbh->{f_valid_attrs}->{$attrib} && !$dbh->{sql_valid_attrs}->{$attrib} and + # return $dbh->set_err ($DBI::stderr, "Invalid attribute '$attrib'"); + # $dbh->{$attrib} = $value; + + if ($attrib eq "f_dir") { + -d $value or + return $dbh->set_err ($DBI::stderr, "No such directory '$value'") + } + if ($attrib eq "f_ext") { + $value eq "" || $value =~ m{^\.\w+(?:/[rR]*)?$} + or carp "'$value' doesn't look like a valid file extension attribute\n"; + } + $dbh->{$attrib} = $value; + return 1; + } + return $dbh->SUPER::STORE ($attrib, $value); + } # STORE + +sub DESTROY ($) +{ + my $dbh = shift; + $dbh->SUPER::FETCH ("Active") and $dbh->disconnect ; + } # DESTROY + +sub type_info_all ($) +{ + [ { TYPE_NAME => 0, + DATA_TYPE => 1, + PRECISION => 2, + LITERAL_PREFIX => 3, + LITERAL_SUFFIX => 4, + CREATE_PARAMS => 5, + NULLABLE => 6, + CASE_SENSITIVE => 7, + SEARCHABLE => 8, + UNSIGNED_ATTRIBUTE => 9, + MONEY => 10, + AUTO_INCREMENT => 11, + LOCAL_TYPE_NAME => 12, + MINIMUM_SCALE => 13, + MAXIMUM_SCALE => 14, + }, + [ "VARCHAR", DBI::SQL_VARCHAR (), + undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, + ], + [ "CHAR", DBI::SQL_CHAR (), + undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, + ], + [ "INTEGER", DBI::SQL_INTEGER (), + undef, "", "", undef, 0, 0, 1, 0, 0, 0, undef, 0, 0, + ], + [ "REAL", DBI::SQL_REAL (), + undef, "", "", undef, 0, 0, 1, 0, 0, 0, undef, 0, 0, + ], + [ "BLOB", DBI::SQL_LONGVARBINARY (), + undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, + ], + [ "BLOB", DBI::SQL_LONGVARBINARY (), + undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, + ], + [ "TEXT", DBI::SQL_LONGVARCHAR (), + undef, "'", "'", undef, 0, 1, 1, 0, 0, 0, undef, 1, 999999, + ]]; + } # type_info_all + +{ my $names = [ + qw( TABLE_QUALIFIER TABLE_OWNER TABLE_NAME TABLE_TYPE REMARKS )]; + + sub table_info ($) + { + my $dbh = shift; + my $dir = $dbh->{f_dir}; + my $dirh = Symbol::gensym (); + + unless (opendir $dirh, $dir) { + $dbh->set_err ($DBI::stderr, "Cannot open directory $dir: $!"); + return; + } + + my ($file, @tables, %names); + my $schema = exists $dbh->{f_schema} + ? $dbh->{f_schema} + : eval { getpwuid ((stat $dir)[4]) }; + while (defined ($file = readdir ($dirh))) { + my $tbl = DBD::File::file2table ($dbh, $dir, $file, 0, 0) or next; + push @tables, [ undef, $schema, $tbl, "TABLE", undef ]; + } + unless (closedir $dirh) { + $dbh->set_err ($DBI::stderr, "Cannot close directory $dir: $!"); + return; + } + + my $dbh2 = $dbh->{csv_sponge_driver}; + unless ($dbh2) { + $dbh2 = $dbh->{csv_sponge_driver} = DBI->connect ("DBI:Sponge:"); + unless ($dbh2) { + $dbh->set_err ($DBI::stderr, $DBI::errstr); + return; + } + } + + # Temporary kludge: DBD::Sponge dies if @tables is empty. :-( + @tables or return; + + my $sth = $dbh2->prepare ("TABLE_INFO", { + rows => \@tables, + NAMES => $names, + }); + $sth or $dbh->set_err ($DBI::stderr, $dbh2->errstr); + $sth; + } # table_info + } + +sub list_tables ($) +{ + my $dbh = shift; + my ($sth, @tables); + $sth = $dbh->table_info () or return; + while (my $ref = $sth->fetchrow_arrayref ()) { + push @tables, $ref->[2]; + } + @tables; + } # list_tables + +sub quote ($$;$) +{ + my ($self, $str, $type) = @_; + defined $str or return "NULL"; + defined $type && ( + $type == DBI::SQL_NUMERIC () + || $type == DBI::SQL_DECIMAL () + || $type == DBI::SQL_INTEGER () + || $type == DBI::SQL_SMALLINT () + || $type == DBI::SQL_FLOAT () + || $type == DBI::SQL_REAL () + || $type == DBI::SQL_DOUBLE () + || $type == DBI::SQL_TINYINT ()) + and return $str; + + $str =~ s/\\/\\\\/sg; + $str =~ s/\0/\\0/sg; + $str =~ s/\'/\\\'/sg; + $str =~ s/\n/\\n/sg; + $str =~ s/\r/\\r/sg; + "'$str'"; + } # quote + +sub commit ($) +{ + my $dbh = shift; + $dbh->FETCH ("Warn") and + carp "Commit ineffective while AutoCommit is on", -1; + 1; + } # commit + +sub rollback ($) +{ + my $dbh = shift; + $dbh->FETCH ("Warn") and + carp "Rollback ineffective while AutoCommit is on", -1; + 0; + } # rollback + +# ====== STATEMENT ============================================================= + +package DBD::File::st; + +use strict; + +$DBD::File::st::imp_data_size = 0; + +sub bind_param ($$$;$) +{ + my ($sth, $pNum, $val, $attr) = @_; + if ($attr && defined $val) { + my $type = ref $attr eq "HASH" ? $attr->{TYPE} : $attr; + if ( $attr == DBI::SQL_BIGINT () + || $attr == DBI::SQL_INTEGER () + || $attr == DBI::SQL_SMALLINT () + || $attr == DBI::SQL_TINYINT () + ) { + $val += 0; + } + elsif ($attr == DBI::SQL_DECIMAL () + || $attr == DBI::SQL_DOUBLE () + || $attr == DBI::SQL_FLOAT () + || $attr == DBI::SQL_NUMERIC () + || $attr == DBI::SQL_REAL () + ) { + $val += 0.; + } + else { + $val = "$val"; + } + } + $sth->{f_params}[$pNum - 1] = $val; + 1; + } # bind_param + +sub execute +{ + my $sth = shift; + my $params = @_ ? ($sth->{f_params} = [ @_ ]) : $sth->{f_params}; + + $sth->finish; + my $stmt = $sth->{f_stmt}; + unless ($sth->{f_params_checked}++) { + # bug in SQL::Statement 1.20 and below causes breakage + # on all but the first call + unless ((my $req_prm = $stmt->params ()) == (my $nparm = @$params)) { + my $msg = "You passed $nparm parameters where $req_prm required"; + $sth->set_err ($DBI::stderr, $msg); + return; + } + } + my @err; + my $result = eval { + local $SIG{__WARN__} = sub { push @err, @_ }; + $stmt->execute ($sth, $params); + }; + if ($@ || @err) { + $sth->set_err ($DBI::stderr, $@ || $err[0]); + return undef; + } + + if ($stmt->{NUM_OF_FIELDS}) { # is a SELECT statement + $sth->STORE (Active => 1); + $sth->FETCH ("NUM_OF_FIELDS") or + $sth->STORE ("NUM_OF_FIELDS", $stmt->{NUM_OF_FIELDS}) + } + return $result; + } # execute + +sub finish +{ + my $sth = shift; + $sth->SUPER::STORE (Active => 0); + delete $sth->{f_stmt}->{data}; + return 1; + } # finish + +sub fetch ($) +{ + my $sth = shift; + my $data = $sth->{f_stmt}->{data}; + if (!$data || ref $data ne "ARRAY") { + $sth->set_err ($DBI::stderr, + "Attempt to fetch row without a preceeding execute () call or from a non-SELECT statement" + ); + return + } + my $dav = shift @$data; + unless ($dav) { + $sth->finish; + return + } + if ($sth->FETCH ("ChopBlanks")) { + $_ && $_ =~ s/\s+$// for @$dav; + } + $sth->_set_fbav ($dav); + } # fetch +*fetchrow_arrayref = \&fetch; + +my %unsupported_attrib = map { $_ => 1 } qw( TYPE PRECISION ); + +sub FETCH ($$) +{ + my ($sth, $attrib) = @_; + exists $unsupported_attrib{$attrib} + and return undef; # Workaround for a bug in DBI 0.93 + $attrib eq "NAME" and + return $sth->FETCH ("f_stmt")->{NAME}; + if ($attrib eq "NULLABLE") { + my ($meta) = $sth->FETCH ("f_stmt")->{NAME}; # Intentional ! + $meta or return undef; + return [ (1) x @$meta ]; + } + if ($attrib eq lc $attrib) { + # Private driver attributes are lower cased + return $sth->{$attrib}; + } + # else pass up to DBI to handle + return $sth->SUPER::FETCH ($attrib); + } # FETCH + +sub STORE ($$$) +{ + my ($sth, $attrib, $value) = @_; + exists $unsupported_attrib{$attrib} + and return; # Workaround for a bug in DBI 0.93 + if ($attrib eq lc $attrib) { + # Private driver attributes are lower cased + $sth->{$attrib} = $value; + return 1; + } + return $sth->SUPER::STORE ($attrib, $value); + } # STORE + +sub DESTROY ($) +{ + my $sth = shift; + $sth->SUPER::FETCH ("Active") and $sth->finish; + } # DESTROY + +sub rows ($) +{ + shift->{f_stmt}->{NUM_OF_ROWS}; + } # rows + +package DBD::File::Statement; + +use strict; +use Carp; + +# We may have a working flock () built-in but that doesn't mean that locking +# will work on NFS (flock () may hang hard) +my $locking = eval { flock STDOUT, 0; 1 }; + +# Jochen's old check for flock () +# +# my $locking = $^O ne "MacOS" && +# ($^O ne "MSWin32" || !Win32::IsWin95 ()) && +# $^O ne "VMS"; + +@DBD::File::Statement::ISA = qw( DBI::SQL::Nano::Statement ); + +my $open_table_re = sprintf "(?:%s|%s|%s)", + quotemeta (File::Spec->curdir ()), + quotemeta (File::Spec->updir ()), + quotemeta (File::Spec->rootdir ()); + +sub get_file_name ($$$) +{ + my ($self, $data, $table) = @_; + my $quoted = 0; + $table =~ s/^\"// and $quoted = 1; # handle quoted identifiers + $table =~ s/\"$//; + my $file = $table; + if ( $file !~ m/^$open_table_re/o + and $file !~ m{^[/\\]} # root + and $file !~ m{^[a-z]\:} # drive letter + ) { + exists $data->{Database}{f_map}{$table} or + DBD::File::file2table ($data->{Database}, + $data->{Database}{f_dir}, $file, 1, $quoted); + $file = $data->{Database}{f_map}{$table} || undef; + } + return ($table, $file); + } # get_file_name + +sub open_table ($$$$$) +{ + my ($self, $data, $table, $createMode, $lockMode) = @_; + my $file; + ($table, $file) = $self->get_file_name ($data, $table); + defined $file && $file ne "" or croak "No filename given"; + require IO::File; + my $fh; + my $safe_drop = $self->{ignore_missing_table} ? 1 : 0; + if ($createMode) { + -f $file and + croak "Cannot create table $table: Already exists"; + $fh = IO::File->new ($file, "a+") or + croak "Cannot open $file for writing: $!"; + $fh->seek (0, 0) or + croak "Error while seeking back: $!"; + } + else { + unless ($fh = IO::File->new ($file, ($lockMode ? "r+" : "r"))) { + $safe_drop or croak "Cannot open $file: $!"; + } + } + $fh and binmode $fh; + if ($locking and $fh) { + if ($lockMode) { + flock $fh, 2 or croak "Cannot obtain exclusive lock on $file: $!"; + } + else { + flock $fh, 1 or croak "Cannot obtain shared lock on $file: $!"; + } + } + my $columns = {}; + my $array = []; + my $pos = $fh ? $fh->tell () : undef; + my $tbl = { + file => $file, + fh => $fh, + col_nums => $columns, + col_names => $array, + first_row_pos => $pos, + }; + my $class = ref $self; + $class =~ s/::Statement/::Table/; + bless $tbl, $class; + $tbl; + } # open_table + +package DBD::File::Table; + +use strict; +use Carp; + +@DBD::File::Table::ISA = qw(DBI::SQL::Nano::Table); + +sub drop ($) +{ + my $self = shift; + # We have to close the file before unlinking it: Some OS'es will + # refuse the unlink otherwise. + $self->{fh} and $self->{fh}->close (); + unlink $self->{file}; + return 1; + } # drop + +sub seek ($$$$) +{ + my ($self, $data, $pos, $whence) = @_; + if ($whence == 0 && $pos == 0) { + $pos = $self->{first_row_pos}; + } + elsif ($whence != 2 || $pos != 0) { + croak "Illegal seek position: pos = $pos, whence = $whence"; + } + + $self->{fh}->seek ($pos, $whence) or + croak "Error while seeking in " . $self->{file} . ": $!"; + } # seek + +sub truncate ($$) +{ + my ($self, $data) = @_; + $self->{fh}->truncate ($self->{fh}->tell ()) or + croak "Error while truncating " . $self->{file} . ": $!"; + 1; + } # truncate + +1; + +__END__ + +=head1 NAME + +DBD::File - Base class for writing DBI drivers + +=head1 SYNOPSIS + + This module is a base class for writing other DBDs. + It is not intended to function as a DBD itself. + If you want to access flatfiles, use DBD::AnyData, or DBD::CSV, + (both of which are subclasses of DBD::File). + +=head1 DESCRIPTION + +The DBD::File module is not a true DBI driver, but an abstract +base class for deriving concrete DBI drivers from it. The implication is, +that these drivers work with plain files, for example CSV files or +INI files. The module is based on the SQL::Statement module, a simple +SQL engine. + +See L<DBI> for details on DBI, L<SQL::Statement> for details on +SQL::Statement and L<DBD::CSV> or L<DBD::IniFile> for example +drivers. + + +=head2 Metadata + +The following attributes are handled by DBI itself and not by DBD::File, +thus they all work like expected: + + Active + ActiveKids + CachedKids + CompatMode (Not used) + InactiveDestroy + Kids + PrintError + RaiseError + Warn (Not used) + +The following DBI attributes are handled by DBD::File: + +=over 4 + +=item AutoCommit + +Always on + +=item ChopBlanks + +Works + +=item NUM_OF_FIELDS + +Valid after C<$sth->execute> + +=item NUM_OF_PARAMS + +Valid after C<$sth->prepare> + +=item NAME + +Valid after C<$sth->execute>; undef for Non-Select statements. + +=item NULLABLE + +Not really working, always returns an array ref of one's, as DBD::CSV +doesn't verify input data. Valid after C<$sth->execute>; undef for +Non-Select statements. + +=back + +These attributes and methods are not supported: + + bind_param_inout + CursorName + LongReadLen + LongTruncOk + +Additional to the DBI attributes, you can use the following dbh +attribute: + +=over 4 + +=item f_dir + +This attribute is used for setting the directory where CSV files are +opened. Usually you set it in the dbh, it defaults to the current +directory ("."). However, it is overwritable in the statement handles. + +=item f_ext + +This attribute is used for setting the file extension where (CSV) files are +opened. There are several possibilities. + + DBI:CSV:f_dir=data;f_ext=.csv + +In this case, DBD::File will open only C<table.csv> if both C<table.csv> and +C<table> exist in the datadir. The table will still be named C<table>. If +your datadir has files with extensions, and you do not pass this attribute, +your table is named C<table.csv>, which is probably not what you wanted. The +extension is always case-insensitive. The table names are not. + + DBI:CSV:f_dir=data;f_ext=.csv/r + +In this case the extension is required, and all filenames that do not match +are ignored. + +=item f_schema + +This will set the schema name. Default is the owner of the folder in which +the table file resides. C<undef> is allowed. + + my $dbh = DBI->connect ("dbi:CSV:", "", "", { + f_schema => undef, + f_dir => "data", + f_ext => ".csv/r", + }) or die $DBI::errstr; + +The effect is that when you get table names from DBI, you can force all +tables into the same (or no) schema: + + my @tables $dbh->tables (); + + # no f_schema + "merijn".foo + "merijn".bar + + # f_schema => "dbi" + "dbi".foo + "dbi".bar + + # f_schema => undef + foo + bar + +=back + +=head2 Driver private methods + +=over 4 + +=item data_sources + +The C<data_sources> method returns a list of subdirectories of the current +directory in the form "DBI:CSV:f_dir=$dirname". + +If you want to read the subdirectories of another directory, use + + my ($drh) = DBI->install_driver ("CSV"); + my (@list) = $drh->data_sources (f_dir => "/usr/local/csv_data" ); + +=item list_tables + +This method returns a list of file names inside $dbh->{f_dir}. +Example: + + my ($dbh) = DBI->connect ("DBI:CSV:f_dir=/usr/local/csv_data"); + my (@list) = $dbh->func ("list_tables"); + +Note that the list includes all files contained in the directory, even +those that have non-valid table names, from the view of SQL. + +=back + +=head1 KNOWN BUGS + +=over 8 + +=item * + +The module is using flock () internally. However, this function is not +available on all platforms. Using flock () is disabled on MacOS and +Windows 95: There's no locking at all (perhaps not so important on +MacOS and Windows 95, as there's a single user anyways). + +=back + +=head1 AUTHOR + +This module is currently maintained by + +H.Merijn Brand < h.m.brand at xs4all.nl > and +Jens Rehsack < rehsack at googlemail.com > + +The original author is Jochen Wiedmann. + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by H.Merijn Brand & Jens Rehsack +Copyright (C) 2004 by Jeff Zucker +Copyright (C) 1998 by Jochen Wiedmann + +All rights reserved. + +You may freely distribute and/or modify this module under the terms of +either the GNU General Public License (GPL) or the Artistic License, as +specified in the Perl README file. + +=head1 SEE ALSO + +L<DBI>, L<Text::CSV>, L<Text::CSV_XS>, L<SQL::Statement> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer.pm new file mode 100755 index 00000000000..aa78557afe4 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer.pm @@ -0,0 +1,1284 @@ +{ + package DBD::Gofer; + + use strict; + + require DBI; + require DBI::Gofer::Request; + require DBI::Gofer::Response; + require Carp; + + our $VERSION = sprintf("0.%06d", q$Revision: 11565 $ =~ /(\d+)/o); + +# $Id: Gofer.pm 11565 2008-07-22 20:17:33Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + + + + # attributes we'll allow local STORE + our %xxh_local_store_attrib = map { $_=>1 } qw( + Active + CachedKids + Callbacks + DbTypeSubclass + ErrCount Executed + FetchHashKeyName + HandleError HandleSetErr + InactiveDestroy + PrintError PrintWarn + Profile + RaiseError + RootClass + ShowErrorStatement + Taint TaintIn TaintOut + TraceLevel + Warn + dbi_quote_identifier_cache + dbi_connect_closure + dbi_go_execute_unique + ); + our %xxh_local_store_attrib_if_same_value = map { $_=>1 } qw( + Username + dbi_connect_method + ); + + our $drh = undef; # holds driver handle once initialised + our $methods_already_installed; + + sub driver{ + return $drh if $drh; + + DBI->setup_driver('DBD::Gofer'); + + unless ($methods_already_installed++) { + my $opts = { O=> 0x0004 }; # IMA_KEEP_ERR + DBD::Gofer::db->install_method('go_dbh_method', $opts); + DBD::Gofer::st->install_method('go_sth_method', $opts); + DBD::Gofer::st->install_method('go_clone_sth', $opts); + DBD::Gofer::db->install_method('go_cache', $opts); + DBD::Gofer::st->install_method('go_cache', $opts); + } + + my($class, $attr) = @_; + $class .= "::dr"; + ($drh) = DBI::_new_drh($class, { + 'Name' => 'Gofer', + 'Version' => $VERSION, + 'Attribution' => 'DBD Gofer by Tim Bunce', + }); + + $drh; + } + + + sub CLONE { + undef $drh; + } + + + sub go_cache { + my $h = shift; + $h->{go_cache} = shift if @_; + # return handle's override go_cache, if it has one + return $h->{go_cache} if defined $h->{go_cache}; + # or else the transports default go_cache + return $h->{go_transport}->{go_cache}; + } + + + sub set_err_from_response { # set error/warn/info and propagate warnings + my $h = shift; + my $response = shift; + if (my $warnings = $response->warnings) { + warn $_ for @$warnings; + } + return $h->set_err($response->err_errstr_state); + } + + + sub install_methods_proxy { + my ($installed_methods) = @_; + while ( my ($full_method, $attr) = each %$installed_methods ) { + # need to install both a DBI dispatch stub and a proxy stub + # (the dispatch stub may be already here due to local driver use) + + DBI->_install_method($full_method, "", $attr||{}) + unless defined &{$full_method}; + + # now install proxy stubs on the driver side + $full_method =~ m/^DBI::(\w\w)::(\w+)$/ + or die "Invalid method name '$full_method' for install_method"; + my ($type, $method) = ($1, $2); + my $driver_method = "DBD::Gofer::${type}::${method}"; + next if defined &{$driver_method}; + my $sub; + if ($type eq 'db') { + $sub = sub { return shift->go_dbh_method(undef, $method, @_) }; + } + else { + $sub = sub { shift->set_err($DBI::stderr, "Can't call \$${type}h->$method when using DBD::Gofer"); return; }; + } + no strict 'refs'; + *$driver_method = $sub; + } + } +} + + +{ package DBD::Gofer::dr; # ====== DRIVER ====== + + $imp_data_size = 0; + use strict; + + sub connect_cached { + my ($drh, $dsn, $user, $auth, $attr)= @_; + $attr ||= {}; + return $drh->SUPER::connect_cached($dsn, $user, $auth, { + (%$attr), + go_connect_method => $attr->{go_connect_method} || 'connect_cached', + }); + } + + + sub connect { + my($drh, $dsn, $user, $auth, $attr)= @_; + my $orig_dsn = $dsn; + + # first remove dsn= and everything after it + my $remote_dsn = ($dsn =~ s/;?\bdsn=(.*)$// && $1) + or return $drh->set_err($DBI::stderr, "No dsn= argument in '$orig_dsn'"); + + if ($attr->{go_bypass}) { # don't use DBD::Gofer for this connection + # useful for testing with DBI_AUTOPROXY, e.g., t/03handle.t + return DBI->connect($remote_dsn, $user, $auth, $attr); + } + + my %go_attr; + # extract any go_ attributes from the connect() attr arg + for my $k (grep { /^go_/ } keys %$attr) { + $go_attr{$k} = delete $attr->{$k}; + } + # then override those with any attributes embedded in our dsn (not remote_dsn) + for my $kv (grep /=/, split /;/, $dsn, -1) { + my ($k, $v) = split /=/, $kv, 2; + $go_attr{ "go_$k" } = $v; + } + + if (not ref $go_attr{go_policy}) { # if not a policy object already + my $policy_class = $go_attr{go_policy} || 'classic'; + $policy_class = "DBD::Gofer::Policy::$policy_class" + unless $policy_class =~ /::/; + _load_class($policy_class) + or return $drh->set_err($DBI::stderr, "Can't load $policy_class: $@"); + # replace policy name in %go_attr with policy object + $go_attr{go_policy} = eval { $policy_class->new(\%go_attr) } + or return $drh->set_err($DBI::stderr, "Can't instanciate $policy_class: $@"); + } + # policy object is left in $go_attr{go_policy} so transport can see it + my $go_policy = $go_attr{go_policy}; + + if ($go_attr{go_cache} and not ref $go_attr{go_cache}) { # if not a cache object already + my $cache_class = $go_attr{go_cache}; + $cache_class = "DBI::Util::CacheMemory" if $cache_class eq '1'; + _load_class($cache_class) + or return $drh->set_err($DBI::stderr, "Can't load $cache_class $@"); + $go_attr{go_cache} = eval { $cache_class->new() } + or $drh->set_err(0, "Can't instanciate $cache_class: $@"); # warning + } + + # delete any other attributes that don't apply to transport + my $go_connect_method = delete $go_attr{go_connect_method}; + + my $transport_class = delete $go_attr{go_transport} + or return $drh->set_err($DBI::stderr, "No transport= argument in '$orig_dsn'"); + $transport_class = "DBD::Gofer::Transport::$transport_class" + unless $transport_class =~ /::/; + _load_class($transport_class) + or return $drh->set_err($DBI::stderr, "Can't load $transport_class: $@"); + my $go_transport = eval { $transport_class->new(\%go_attr) } + or return $drh->set_err($DBI::stderr, "Can't instanciate $transport_class: $@"); + + my $request_class = "DBI::Gofer::Request"; + my $go_request = eval { + my $go_attr = { %$attr }; + # XXX user/pass of fwd server vs db server ? also impact of autoproxy + if ($user) { + $go_attr->{Username} = $user; + $go_attr->{Password} = $auth; + } + # delete any attributes we can't serialize (or don't want to) + delete @{$go_attr}{qw(Profile HandleError HandleSetErr Callbacks)}; + # delete any attributes that should only apply to the client-side + delete @{$go_attr}{qw(RootClass DbTypeSubclass)}; + + $go_connect_method ||= $go_policy->connect_method($remote_dsn, $go_attr) || 'connect'; + $request_class->new({ + dbh_connect_call => [ $go_connect_method, $remote_dsn, $user, $auth, $go_attr ], + }) + } or return $drh->set_err($DBI::stderr, "Can't instanciate $request_class: $@"); + + my ($dbh, $dbh_inner) = DBI::_new_dbh($drh, { + 'Name' => $dsn, + 'USER' => $user, + go_transport => $go_transport, + go_request => $go_request, + go_policy => $go_policy, + }); + + # mark as inactive temporarily for STORE. Active not set until connected() called. + $dbh->STORE(Active => 0); + + # should we ping to check the connection + # and fetch dbh attributes + my $skip_connect_check = $go_policy->skip_connect_check($attr, $dbh); + if (not $skip_connect_check) { + if (not $dbh->go_dbh_method(undef, 'ping')) { + return undef if $dbh->err; # error already recorded, typically + return $dbh->set_err($DBI::stderr, "ping failed"); + } + } + + return $dbh; + } + + sub _load_class { # return true or false+$@ + my $class = shift; + (my $pm = $class) =~ s{::}{/}g; + $pm .= ".pm"; + return 1 if eval { require $pm }; + delete $INC{$pm}; # shouldn't be needed (perl bug?) and assigning undef isn't enough + undef; # error in $@ + } + +} + + +{ package DBD::Gofer::db; # ====== DATABASE ====== + $imp_data_size = 0; + use strict; + use Carp qw(carp croak); + + my %dbh_local_store_attrib = %DBD::Gofer::xxh_local_store_attrib; + + sub connected { + shift->STORE(Active => 1); + } + + sub go_dbh_method { + my $dbh = shift; + my $meta = shift; + # @_ now contains ($method_name, @args) + + my $request = $dbh->{go_request}; + $request->init_request([ wantarray, @_ ], $dbh); + ++$dbh->{go_request_count}; + + my $go_policy = $dbh->{go_policy}; + my $dbh_attribute_update = $go_policy->dbh_attribute_update(); + $request->dbh_attributes( $go_policy->dbh_attribute_list() ) + if $dbh_attribute_update eq 'every' + or $dbh->{go_request_count}==1; + + $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args}) + if $meta->{go_last_insert_id_args}; + + my $transport = $dbh->{go_transport} + or return $dbh->set_err($DBI::stderr, "Not connected (no transport)"); + + local $transport->{go_cache} = $dbh->{go_cache} + if defined $dbh->{go_cache}; + + my ($response, $retransmit_sub) = $transport->transmit_request($request); + $response ||= $transport->receive_response($request, $retransmit_sub); + $dbh->{go_response} = $response + or die "No response object returned by $transport"; + + die "response '$response' returned by $transport is not a response object" + unless UNIVERSAL::isa($response,"DBI::Gofer::Response"); + + if (my $dbh_attributes = $response->dbh_attributes) { + + # XXX installed_methods piggbacks on dbh_attributes for now + if (my $installed_methods = delete $dbh_attributes->{dbi_installed_methods}) { + DBD::Gofer::install_methods_proxy($installed_methods) + if $dbh->{go_request_count}==1; + } + + # XXX we don't STORE here, we just stuff the value into the attribute cache + $dbh->{$_} = $dbh_attributes->{$_} + for keys %$dbh_attributes; + } + + my $rv = $response->rv; + if (my $resultset_list = $response->sth_resultsets) { + # dbh method call returned one or more resultsets + # (was probably a metadata method like table_info) + # + # setup an sth but don't execute/forward it + my $sth = $dbh->prepare(undef, { go_skip_prepare_check => 1 }); + # set the sth response to our dbh response + (tied %$sth)->{go_response} = $response; + # setup the sth with the results in our response + $sth->more_results; + # and return that new sth as if it came from original request + $rv = [ $sth ]; + } + elsif (!$rv) { # should only occur for major transport-level error + #carp("no rv in response { @{[ %$response ]} }"); + $rv = [ ]; + } + + DBD::Gofer::set_err_from_response($dbh, $response); + + return (wantarray) ? @$rv : $rv->[0]; + } + + + # Methods that should be forwarded but can be cached + for my $method (qw( + tables table_info column_info primary_key_info foreign_key_info statistics_info + data_sources type_info_all get_info + parse_trace_flags parse_trace_flag + func + )) { + my $policy_name = "cache_$method"; + my $super_name = "SUPER::$method"; + my $sub = sub { + my $dbh = shift; + my $rv; + + # if we know the remote side doesn't override the DBI's default method + # then we might as well just call the DBI's default method on the client + # (which may, in turn, call other methods that are forwarded, like get_info) + if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) { + $dbh->trace_msg(" !! $method: using local default as remote method is also default\n"); + return $dbh->$super_name(@_); + } + + my $cache; + my $cache_key; + if (my $cache_it = $dbh->{go_policy}->$policy_name(undef, $dbh, @_)) { + $cache = $dbh->{go_meta_cache} ||= {}; # keep separate from go_cache + $cache_key = sprintf "%s_wa%d(%s)", $policy_name, wantarray||0, + join(",\t", map { # XXX basic but sufficient for now + !ref($_) ? DBI::neat($_,1e6) + : ref($_) eq 'ARRAY' ? DBI::neat_list($_,1e6,",\001") + : ref($_) eq 'HASH' ? do { my @k = sort keys %$_; DBI::neat_list([@k,@{$_}{@k}],1e6,",\002") } + : do { warn "unhandled argument type ($_)"; $_ } + } @_); + if ($rv = $cache->{$cache_key}) { + $dbh->trace_msg("$method(@_) returning previously cached value ($cache_key)\n",4); + my @cache_rv = @$rv; + # if it's an sth we have to clone it + $cache_rv[0] = $cache_rv[0]->go_clone_sth if UNIVERSAL::isa($cache_rv[0],'DBI::st'); + return (wantarray) ? @cache_rv : $cache_rv[0]; + } + } + + $rv = [ (wantarray) + ? ($dbh->go_dbh_method(undef, $method, @_)) + : scalar $dbh->go_dbh_method(undef, $method, @_) + ]; + + if ($cache) { + $dbh->trace_msg("$method(@_) caching return value ($cache_key)\n",4); + my @cache_rv = @$rv; + # if it's an sth we have to clone it + #$cache_rv[0] = $cache_rv[0]->go_clone_sth + # if UNIVERSAL::isa($cache_rv[0],'DBI::st'); + $cache->{$cache_key} = \@cache_rv + unless UNIVERSAL::isa($cache_rv[0],'DBI::st'); # XXX cloning sth not yet done + } + + return (wantarray) ? @$rv : $rv->[0]; + }; + no strict 'refs'; + *$method = $sub; + } + + + # Methods that can use the DBI defaults for some situations/drivers + for my $method (qw( + quote quote_identifier + )) { # XXX keep DBD::Gofer::Policy::Base in sync + my $policy_name = "locally_$method"; + my $super_name = "SUPER::$method"; + my $sub = sub { + my $dbh = shift; + + # if we know the remote side doesn't override the DBI's default method + # then we might as well just call the DBI's default method on the client + # (which may, in turn, call other methods that are forwarded, like get_info) + if ($dbh->{dbi_default_methods}{$method} && $dbh->{go_policy}->skip_default_methods()) { + $dbh->trace_msg(" !! $method: using local default as remote method is also default\n"); + return $dbh->$super_name(@_); + } + + # false: use remote gofer + # 1: use local DBI default method + # code ref: use the code ref + my $locally = $dbh->{go_policy}->$policy_name($dbh, @_); + if ($locally) { + return $locally->($dbh, @_) if ref $locally eq 'CODE'; + return $dbh->$super_name(@_); + } + return $dbh->go_dbh_method(undef, $method, @_); # propagate context + }; + no strict 'refs'; + *$method = $sub; + } + + + # Methods that should always fail + for my $method (qw( + begin_work commit rollback + )) { + no strict 'refs'; + *$method = sub { return shift->set_err($DBI::stderr, "$method not available with DBD::Gofer") } + } + + + sub do { + my ($dbh, $sql, $attr, @args) = @_; + delete $dbh->{Statement}; # avoid "Modification of non-creatable hash value attempted" + $dbh->{Statement} = $sql; # for profiling and ShowErrorStatement + my $meta = { go_last_insert_id_args => $attr->{go_last_insert_id_args} }; + return $dbh->go_dbh_method($meta, 'do', $sql, $attr, @args); + } + + sub ping { + my $dbh = shift; + return $dbh->set_err(0, "can't ping while not connected") # warning + unless $dbh->SUPER::FETCH('Active'); + my $skip_ping = $dbh->{go_policy}->skip_ping(); + return ($skip_ping) ? 1 : $dbh->go_dbh_method(undef, 'ping', @_); + } + + sub last_insert_id { + my $dbh = shift; + my $response = $dbh->{go_response} or return undef; + return $response->last_insert_id; + } + + sub FETCH { + my ($dbh, $attrib) = @_; + + # FETCH is effectively already cached because the DBI checks the + # attribute cache in the handle before calling FETCH + # and this FETCH copies the value into the attribute cache + + # forward driver-private attributes (except ours) + if ($attrib =~ m/^[a-z]/ && $attrib !~ /^go_/) { + my $value = $dbh->go_dbh_method(undef, 'FETCH', $attrib); + $dbh->{$attrib} = $value; # XXX forces caching by DBI + return $dbh->{$attrib} = $value; + } + + # else pass up to DBI to handle + return $dbh->SUPER::FETCH($attrib); + } + + sub STORE { + my ($dbh, $attrib, $value) = @_; + if ($attrib eq 'AutoCommit') { + croak "Can't enable transactions when using DBD::Gofer" if !$value; + return $dbh->SUPER::STORE($attrib => ($value) ? -901 : -900); + } + return $dbh->SUPER::STORE($attrib => $value) + # we handle this attribute locally + if $dbh_local_store_attrib{$attrib} + # or it's a private_ (application) attribute + or $attrib =~ /^private_/ + # or not yet connected (ie being called by DBI->connect) + or not $dbh->FETCH('Active'); + + return $dbh->SUPER::STORE($attrib => $value) + if $DBD::Gofer::xxh_local_store_attrib_if_same_value{$attrib} + && do { # values are the same + my $crnt = $dbh->FETCH($attrib); + local $^W; + (defined($value) ^ defined($crnt)) + ? 0 # definedness differs + : $value eq $crnt; + }; + + # dbh attributes are set at connect-time - see connect() + carp("Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer") if $dbh->FETCH('Warn'); + return $dbh->set_err($DBI::stderr, "Can't alter \$dbh->{$attrib} after handle created with DBD::Gofer"); + } + + sub disconnect { + my $dbh = shift; + $dbh->{go_transport} = undef; + $dbh->STORE(Active => 0); + } + + sub prepare { + my ($dbh, $statement, $attr)= @_; + + return $dbh->set_err($DBI::stderr, "Can't prepare when disconnected") + unless $dbh->FETCH('Active'); + + $attr = { %$attr } if $attr; # copy so we can edit + + my $policy = delete($attr->{go_policy}) || $dbh->{go_policy}; + my $lii_args = delete $attr->{go_last_insert_id_args}; + my $go_prepare = delete($attr->{go_prepare_method}) + || $dbh->{go_prepare_method} + || $policy->prepare_method($dbh, $statement, $attr) + || 'prepare'; # e.g. for code not using placeholders + my $go_cache = delete $attr->{go_cache}; + # set to undef if there are no attributes left for the actual prepare call + $attr = undef if $attr and not %$attr; + + my ($sth, $sth_inner) = DBI::_new_sth($dbh, { + Statement => $statement, + go_prepare_call => [ 0, $go_prepare, $statement, $attr ], + # go_method_calls => [], # autovivs if needed + go_request => $dbh->{go_request}, + go_transport => $dbh->{go_transport}, + go_policy => $policy, + go_last_insert_id_args => $lii_args, + go_cache => $go_cache, + }); + $sth->STORE(Active => 0); + + my $skip_prepare_check = $policy->skip_prepare_check($attr, $dbh, $statement, $attr, $sth); + if (not $skip_prepare_check) { + $sth->go_sth_method() or return undef; + } + + return $sth; + } + + sub prepare_cached { + my ($dbh, $sql, $attr, $if_active)= @_; + $attr ||= {}; + return $dbh->SUPER::prepare_cached($sql, { + %$attr, + go_prepare_method => $attr->{go_prepare_method} || 'prepare_cached', + }, $if_active); + } + + *go_cache = \&DBD::Gofer::go_cache; +} + + +{ package DBD::Gofer::st; # ====== STATEMENT ====== + $imp_data_size = 0; + use strict; + + my %sth_local_store_attrib = (%DBD::Gofer::xxh_local_store_attrib, NUM_OF_FIELDS => 1); + + sub go_sth_method { + my ($sth, $meta) = @_; + + if (my $ParamValues = $sth->{ParamValues}) { + my $ParamAttr = $sth->{ParamAttr}; + # XXX the sort here is a hack to work around a DBD::Sybase bug + # but only works properly for params 1..9 + # (reverse because of the unshift) + my @params = reverse sort keys %$ParamValues; + if (@params > 9 && ($sth->{Database}{go_dsn}||'') =~ /dbi:Sybase/) { + # if more than 9 then we need to do a proper numeric sort + # also warn to alert user of this issue + warn "Sybase param binding order hack in use"; + @params = sort { $b <=> $a } @params; + } + for my $p (@params) { + # unshift to put binds before execute call + unshift @{ $sth->{go_method_calls} }, + [ 'bind_param', $p, $ParamValues->{$p}, $ParamAttr->{$p} ]; + } + } + + my $dbh = $sth->{Database} or die "panic"; + ++$dbh->{go_request_count}; + + my $request = $sth->{go_request}; + $request->init_request($sth->{go_prepare_call}, $sth); + $request->sth_method_calls(delete $sth->{go_method_calls}) + if $sth->{go_method_calls}; + $request->sth_result_attr({}); # (currently) also indicates this is an sth request + + $request->dbh_last_insert_id_args($meta->{go_last_insert_id_args}) + if $meta->{go_last_insert_id_args}; + + my $go_policy = $sth->{go_policy}; + my $dbh_attribute_update = $go_policy->dbh_attribute_update(); + $request->dbh_attributes( $go_policy->dbh_attribute_list() ) + if $dbh_attribute_update eq 'every' + or $dbh->{go_request_count}==1; + + my $transport = $sth->{go_transport} + or return $sth->set_err($DBI::stderr, "Not connected (no transport)"); + + local $transport->{go_cache} = $sth->{go_cache} + if defined $sth->{go_cache}; + + my ($response, $retransmit_sub) = $transport->transmit_request($request); + $response ||= $transport->receive_response($request, $retransmit_sub); + $sth->{go_response} = $response + or die "No response object returned by $transport"; + $dbh->{go_response} = $response; # mainly for last_insert_id + + if (my $dbh_attributes = $response->dbh_attributes) { + # XXX we don't STORE here, we just stuff the value into the attribute cache + $dbh->{$_} = $dbh_attributes->{$_} + for keys %$dbh_attributes; + # record the values returned, so we know that we have fetched + # values are which we have fetched (see dbh->FETCH method) + $dbh->{go_dbh_attributes_fetched} = $dbh_attributes; + } + + my $rv = $response->rv; # may be undef on error + if ($response->sth_resultsets) { + # setup first resultset - including sth attributes + $sth->more_results; + } + else { + $sth->STORE(Active => 0); + $sth->{go_rows} = $rv; + } + # set error/warn/info (after more_results as that'll clear err) + DBD::Gofer::set_err_from_response($sth, $response); + + return $rv; + } + + + sub bind_param { + my ($sth, $param, $value, $attr) = @_; + $sth->{ParamValues}{$param} = $value; + $sth->{ParamAttr}{$param} = $attr + if defined $attr; # attr is sticky if not explicitly set + return 1; + } + + + sub execute { + my $sth = shift; + $sth->bind_param($_, $_[$_-1]) for (1..@_); + push @{ $sth->{go_method_calls} }, [ 'execute' ]; + my $meta = { go_last_insert_id_args => $sth->{go_last_insert_id_args} }; + return $sth->go_sth_method($meta); + } + + + sub more_results { + my $sth = shift; + + $sth->finish; + + my $response = $sth->{go_response} or do { + # e.g., we haven't sent a request yet (ie prepare then more_results) + $sth->trace_msg(" No response object present", 3); + return; + }; + + my $resultset_list = $response->sth_resultsets + or return $sth->set_err($DBI::stderr, "No sth_resultsets"); + + my $meta = shift @$resultset_list + or return undef; # no more result sets + #warn "more_results: ".Data::Dumper::Dumper($meta); + + # pull out the special non-atributes first + my ($rowset, $err, $errstr, $state) + = delete @{$meta}{qw(rowset err errstr state)}; + + # copy meta attributes into attribute cache + my $NUM_OF_FIELDS = delete $meta->{NUM_OF_FIELDS}; + $sth->STORE('NUM_OF_FIELDS', $NUM_OF_FIELDS); + # XXX need to use STORE for some? + $sth->{$_} = $meta->{$_} for keys %$meta; + + if (($NUM_OF_FIELDS||0) > 0) { + $sth->{go_rows} = ($rowset) ? @$rowset : -1; + $sth->{go_current_rowset} = $rowset; + $sth->{go_current_rowset_err} = [ $err, $errstr, $state ] + if defined $err; + $sth->STORE(Active => 1) if $rowset; + } + + return $sth; + } + + + sub go_clone_sth { + my ($sth1) = @_; + # clone an (un-fetched-from) sth - effectively undoes the initial more_results + # not 100% so just for use in caching returned sth e.g. table_info + my $sth2 = $sth1->{Database}->prepare($sth1->{Statement}, { go_skip_prepare_check => 1 }); + $sth2->STORE($_, $sth1->{$_}) for qw(NUM_OF_FIELDS Active); + my $sth2_inner = tied %$sth2; + $sth2_inner->{$_} = $sth1->{$_} for qw(NUM_OF_PARAMS FetchHashKeyName); + die "not fully implemented yet"; + return $sth2; + } + + + sub fetchrow_arrayref { + my ($sth) = @_; + my $resultset = $sth->{go_current_rowset} || do { + # should only happen if fetch called after execute failed + my $rowset_err = $sth->{go_current_rowset_err} + || [ 1, 'no result set (did execute fail)' ]; + return $sth->set_err( @$rowset_err ); + }; + return $sth->_set_fbav(shift @$resultset) if @$resultset; + $sth->finish; # no more data so finish + return undef; + } + *fetch = \&fetchrow_arrayref; # alias + + + sub fetchall_arrayref { + my ($sth, $slice, $max_rows) = @_; + my $resultset = $sth->{go_current_rowset} || do { + # should only happen if fetch called after execute failed + my $rowset_err = $sth->{go_current_rowset_err} + || [ 1, 'no result set (did execute fail)' ]; + return $sth->set_err( @$rowset_err ); + }; + my $mode = ref($slice) || 'ARRAY'; + return $sth->SUPER::fetchall_arrayref($slice, $max_rows) + if ref($slice) or defined $max_rows; + $sth->finish; # no more data after this so finish + return $resultset; + } + + + sub rows { + return shift->{go_rows}; + } + + + sub STORE { + my ($sth, $attrib, $value) = @_; + + return $sth->SUPER::STORE($attrib => $value) + if $sth_local_store_attrib{$attrib} # handle locally + # or it's a private_ (application) attribute + or $attrib =~ /^private_/; + + # otherwise warn but do it anyway + # this will probably need refining later + my $msg = "Altering \$sth->{$attrib} won't affect proxied handle"; + Carp::carp($msg) if $sth->FETCH('Warn'); + + # XXX could perhaps do + # push @{ $sth->{go_method_calls} }, [ 'STORE', $attrib, $value ] + # if not $sth->FETCH('Executed'); + # but how to handle repeat executions? How to we know when an + # attribute is being set to affect the current resultset or the + # next execution? + # Could just always use go_method_calls I guess. + + # do the store locally anyway, just in case + $sth->SUPER::STORE($attrib => $value); + + return $sth->set_err($DBI::stderr, $msg); + } + + # sub bind_param_array + # we use DBI's default, which sets $sth->{ParamArrays}{$param} = $value + # and calls bind_param($param, undef, $attr) if $attr. + + sub execute_array { + my $sth = shift; + my $attr = shift; + $sth->bind_param_array($_, $_[$_-1]) for (1..@_); + push @{ $sth->{go_method_calls} }, [ 'execute_array', $attr ]; + return $sth->go_sth_method($attr); + } + + *go_cache = \&DBD::Gofer::go_cache; +} + +1; + +__END__ + +=head1 NAME + +DBD::Gofer - A stateless-proxy driver for communicating with a remote DBI + +=head1 SYNOPSIS + + use DBI; + + $original_dsn = "dbi:..."; # your original DBI Data Source Name + + $dbh = DBI->connect("dbi:Gofer:transport=$transport;...;dsn=$original_dsn", + $user, $passwd, \%attributes); + + ... use $dbh as if it was connected to $original_dsn ... + + +The C<transport=$transport> part specifies the name of the module to use to +transport the requests to the remote DBI. If $transport doesn't contain any +double colons then it's prefixed with C<DBD::Gofer::Transport::>. + +The C<dsn=$original_dsn> part I<must be the last element> of the DSN because +everything after C<dsn=> is assumed to be the DSN that the remote DBI should +use. + +The C<...> represents attributes that influence the operation of the Gofer +driver or transport. These are described below or in the documentation of the +transport module being used. + +=head1 DESCRIPTION + +DBD::Gofer is a DBI database driver that forwards requests to another DBI +driver, usually in a seperate process, often on a separate machine. It tries to +be as transparent as possible so it appears that you are using the remote +driver directly. + +DBD::Gofer is very similar to DBD::Proxy. The major difference is that with +DBD::Gofer no state is maintained on the remote end. That means every +request contains all the information needed to create the required state. (So, +for example, every request includes the DSN to connect to.) Each request can be +sent to any available server. The server executes the request and returns a +single response that includes all the data. + +This is very similar to the way http works as a stateless protocol for the web. +Each request from your web browser can be handled by a different web server process. + +=head2 Use Cases + +This may seem like pointless overhead but there are situations where this is a +very good thing. Let's consider a specific case. + +Imagine using DBD::Gofer with an http transport. Your application calls +connect(), prepare("select * from table where foo=?"), bind_param(), and execute(). +At this point DBD::Gofer builds a request containing all the information +about the method calls. It then uses the httpd transport to send that request +to an apache web server. + +This 'dbi execute' web server executes the request (using DBI::Gofer::Execute +and related modules) and builds a response that contains all the rows of data, +if the statement returned any, along with all the attributes that describe the +results, such as $sth->{NAME}. This response is sent back to DBD::Gofer which +unpacks it and presents it to the application as if it had executed the +statement itself. + +=head2 Advantages + +Okay, but you still don't see the point? Well let's consider what we've gained: + +=head3 Connection Pooling and Throttling + +The 'dbi execute' web server leverages all the functionality of web +infrastructure in terms of load balancing, high-availability, firewalls, access +management, proxying, caching. + +At its most basic level you get a configurable pool of persistent database connections. + +=head3 Simple Scaling + +Got thousands of processes all trying to connect to the database? You can use +DBD::Gofer to connect them to your smaller pool of 'dbi execute' web servers instead. + +=head3 Caching + +Client-side caching is as simple as adding "C<cache=1>" to the DSN. +This feature alone can be worth using DBD::Gofer for. + +=head3 Fewer Network Round-trips + +DBD::Gofer sends as few requests as possible (dependent on the policy being used). + +=head3 Thin Clients / Unsupported Platforms + +You no longer need drivers for your database on every system. DBD::Gofer is pure perl. + +=head1 CONSTRAINTS + +There are some natural constraints imposed by the DBD::Gofer 'stateless' approach. +But not many: + +=head2 You can't change database handle attributes after connect() + +You can't change database handle attributes after you've connected. +Use the connect() call to specify all the attribute settings you want. + +This is because it's critical that when a request is complete the database +handle is left in the same state it was when first connected. + +An exception is made for attributes with names starting "C<private_>": +They can be set after connect() but the change is only applied locally. + +=head2 You can't change statement handle attributes after prepare() + +You can't change statment handle attributes after prepare. + +An exception is made for attributes with names starting "C<private_>": +They can be set after prepare() but the change is only applied locally. + +=head2 You can't use transactions + +AutoCommit only. Transactions aren't supported. + +(In theory transactions could be supported when using a transport that +maintains a connection, like C<stream> does. If you're interested in this +please get in touch via dbi-dev@perl.org) + +=head2 You can't call driver-private sth methods + +But that's rarely needed anyway. + +=head1 GENERAL CAVEATS + +A few important things to keep in mind when using DBD::Gofer: + +=head2 Temporary tables, locks, and other per-connection persistent state + +You shouldn't expect any per-session state to persist between requests. +This includes locks and temporary tables. + +Because the server-side may execute your requests via a different +database connections, you can't rely on any per-connection persistent state, +such as temporary tables, being available from one request to the next. + +This is an easy trap to fall into. A good way to check for this is to test your +code with a Gofer policy package that sets the C<connect_method> policy to +'connect' to force a new connection for each request. The C<pedantic> policy does this. + +=head2 Driver-private Database Handle Attributes + +Some driver-private dbh attributes may not be available if the driver has not +implemented the private_attribute_info() method (added in DBI 1.54). + +=head2 Driver-private Statement Handle Attributes + +Driver-private sth attributes can be set in the prepare() call. TODO + +Some driver-private dbh attributes may not be available if the driver has not +implemented the private_attribute_info() method (added in DBI 1.54). + +=head2 Multiple Resultsets + +Multiple resultsets are supported only if the driver supports the more_results() method +(an exception is made for DBD::Sybase). + +=head2 Statement activity that also updates dbh attributes + +Some drivers may update one or more dbh attributes after performing activity on +a child sth. For example, DBD::mysql provides $dbh->{mysql_insertid} in addition to +$sth->{mysql_insertid}. Currently mysql_insertid is supported via a hack but a +more general mechanism is needed for other drivers to use. + +=head2 Methods that report an error always return undef + +With DBD::Gofer, a method that sets an error always return an undef or empty list. +That shouldn't be a problem in practice because the DBI doesn't define any +methods that return meaningful values while also reporting an error. + +=head2 Subclassing only applies to client-side + +The RootClass and DbTypeSubclass attributes are not passed to the Gofer server. + +=head1 CAVEATS FOR SPECIFIC METHODS + +=head2 last_insert_id + +To enable use of last_insert_id you need to indicate to DBD::Gofer that you'd +like to use it. You do that my adding a C<go_last_insert_id_args> attribute to +the do() or prepare() method calls. For example: + + $dbh->do($sql, { go_last_insert_id_args => [...] }); + +or + + $sth = $dbh->prepare($sql, { go_last_insert_id_args => [...] }); + +The array reference should contains the args that you want passed to the +last_insert_id() method. + +=head2 execute_for_fetch + +The array methods bind_param_array() and execute_array() are supported. +When execute_array() is called the data is serialized and executed in a single +round-trip to the Gofer server. This makes it very fast, but requires enough +memory to store all the serialized data. + +The execute_for_fetch() method currently isn't optimised, it uses the DBI +fallback behaviour of executing each tuple individually. +(It could be implemented as a wrapper for execute_array() - patches welcome.) + +=head1 TRANSPORTS + +DBD::Gofer doesn't concern itself with transporting requests and responses to and fro. +For that it uses special Gofer transport modules. + +Gofer transport modules usually come in pairs: one for the 'client' DBD::Gofer +driver to use and one for the remote 'server' end. They have very similar names: + + DBD::Gofer::Transport::<foo> + DBI::Gofer::Transport::<foo> + +Sometimes the transports on the DBD and DBI sides may have different names. For +example DBD::Gofer::Transport::http is typically used with DBI::Gofer::Transport::mod_perl +(DBD::Gofer::Transport::http and DBI::Gofer::Transport::mod_perl modules are +part of the GoferTransport-http distribution). + +=head2 Bundled Transports + +Several transport modules are provided with DBD::Gofer: + +=head3 null + +The null transport is the simplest of them all. It doesn't actually transport the request anywhere. +It just serializes (freezes) the request into a string, then thaws it back into +a data structure before passing it to DBI::Gofer::Execute to execute. The same +freeze and thaw is applied to the results. + +The null transport is the best way to test if your application will work with Gofer. +Just set the DBI_AUTOPROXY environment variable to "C<dbi:Gofer:transport=null;policy=pedantic>" +(see L</Using DBI_AUTOPROXY> below) and run your application, or ideally its test suite, as usual. + +It doesn't take any parameters. + +=head3 pipeone + +The pipeone transport launches a subprocess for each request. It passes in the +request and reads the response. + +The fact that a new subprocess is started for each request ensures that the +server side is truly stateless. While this does make the transport I<very> slow, +it is useful as a way to test that your application doesn't depend on +per-connection state, such as temporary tables, persisting between requests. + +It's also useful both as a proof of concept and as a base class for the stream +driver. + +=head3 stream + +The stream driver also launches a subprocess and writes requests and reads +responses, like the pipeone transport. In this case, however, the subprocess +is expected to handle more that one request. (Though it will be automitically +restarted if it exits.) + +This is the first transport that is truly useful because it can launch the +subprocess on a remote machine using C<ssh>. This means you can now use DBD::Gofer +to easily access any databases that's accessible from any system you can login to. +You also get all the benefits of ssh, including encryption and optional compression. + +See L</Using DBI_AUTOPROXY> below for an example. + +=head2 Other Transports + +Implementing a Gofer transport is I<very> simple, and more transports are very welcome. +Just take a look at any existing transports that are similar to your needs. + +=head3 http + +See the GoferTransport-http distribution on CPAN: http://search.cpan.org/dist/GoferTransport-http/ + +=head3 Gearman + +I know Ask Bjørn Hansen has implemented a transport for the C<gearman> distributed +job system, though it's not on CPAN at the time of writing this. + +=head1 CONNECTING + +Simply prefix your existing DSN with "C<dbi:Gofer:transport=$transport;dsn=>" +where $transport is the name of the Gofer transport you want to use (see L</TRANSPORTS>). +The C<transport> and C<dsn> attributes must be specified and the C<dsn> attributes must be last. + +Other attributes can be specified in the DSN to configure DBD::Gofer and/or the +Gofer transport module being used. The main attributes after C<transport>, are +C<url> and C<policy>. These and other attributes are described below. + +=head2 Using DBI_AUTOPROXY + +The simplest way to try out DBD::Gofer is to set the DBI_AUTOPROXY environment variable. +In this case you don't include the C<dsn=> part. For example: + + export DBI_AUTOPROXY="dbi:Gofer:transport=null" + +or, for a more useful example, try: + + export DBI_AUTOPROXY="dbi:Gofer:transport=stream;url=ssh:user@example.com" + +=head2 Connection Attributes + +These attributes can be specified in the DSN. They can also be passed in the +\%attr parameter of the DBI connect method by adding a "C<go_>" prefix to the name. + +=head3 transport + +Specifies the Gofer transport class to use. Required. See L</TRANSPORTS> above. + +If the value does not include C<::> then "C<DBD::Gofer::Transport::>" is prefixed. + +The transport object can be accessed via $h->{go_transport}. + +=head3 dsn + +Specifies the DSN for the remote side to connect to. Required, and must be last. + +=head3 url + +Used to tell the transport where to connect to. The exact form of the value depends on the transport used. + +=head3 policy + +Specifies the policy to use. See L</CONFIGURING BEHAVIOUR POLICY>. + +If the value does not include C<::> then "C<DBD::Gofer::Policy>" is prefixed. + +The policy object can be accessed via $h->{go_policy}. + +=head3 timeout + +Specifies a timeout, in seconds, to use when waiting for responses from the server side. + +=head3 retry_limit + +Specifies the number of times a failed request will be retried. Default is 0. + +=head3 retry_hook + +Specifies a code reference to be called to decide if a failed request should be retried. +The code reference is called like this: + + $transport = $h->{go_transport}; + $retry = $transport->go_retry_hook->($request, $response, $transport); + +If it returns true then the request will be retried, upto the C<retry_limit>. +If it returns a false but defined value then the request will not be retried. +If it returns undef then the default behaviour will be used, as if C<retry_hook> +had not been specified. + +The default behaviour is to retry requests where $request->is_idempotent is true, +or the error message matches C</induced by DBI_GOFER_RANDOM/>. + +=head3 cache + +Specifies that client-side caching should be performed. The value is the name +of a cache class to use. + +Any class implementing get($key) and set($key, $value) methods can be used. +That includes a great many powerful caching classes on CPAN, including the +Cache and Cache::Cache distributions. + +You can use "C<cache=1>" is a shortcut for "C<cache=DBI::Util::CacheMemory>". +See L<DBI::Util::CacheMemory> for a description of this simple fast default cache. + +The cache object can be accessed via $h->go_cache. For example: + + $dbh->go_cache->clear; # free up memory being used by the cache + +The cache keys are the frozen (serialized) requests, and the values are the +frozen responses. + +The default behaviour is to only use the cache for requests where +$request->is_idempotent is true (i.e., the dbh has the ReadOnly attribute set +or the SQL statement is obviously a SELECT without a FOR UPDATE clause.) + +For even more control you can use the C<go_cache> attribute to pass in an +instanciated cache object. Individual methods, including prepare(), can also +specify alternative caches via the C<go_cache> attribute. For example, to +specify no caching for a particular query, you could use + + $sth = $dbh->prepare( $sql, { go_cache => 0 } ); + +This can be used to implement different caching policies for different statements. + +It's interesting to note that DBD::Gofer can be used to add client-side caching +to any (gofer compatible) application, with no code changes and no need for a +gofer server. Just set the DBI_AUTOPROXY environment variable like this: + + DBI_AUTOPROXY='dbi:Gofer:transport=null;cache=1' + +=head1 CONFIGURING BEHAVIOUR POLICY + +DBD::Gofer supports a 'policy' mechanism that allows you to fine-tune the number of round-trips to the Gofer server. +The policies are grouped into classes (which may be subclassed) and referenced by the name of the class. + +The L<DBD::Gofer::Policy::Base> class is the base class for all the policy +packages and describes all the available policies. + +Three policy packages are supplied with DBD::Gofer: + +L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it +makes more round-trips to the Gofer server. + +L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy. + +L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications. + +Generally the default C<classic> policy is fine. When first testing an existing +application with Gofer it is a good idea to start with the C<pedantic> policy +first and then switch to C<classic> or a custom policy, for final testing. + + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=head1 ACKNOWLEDGEMENTS + +The development of DBD::Gofer and related modules was sponsored by +Shopzilla.com (L<http://Shopzilla.com>), where I currently work. + +=head1 SEE ALSO + +L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>. + +L<DBI::Gofer::Transport::Base>, L<DBD::Gofer::Policy::Base>. + +L<DBI> + +=head1 Caveats for specific drivers + +This section aims to record issues to be aware of when using Gofer with specific drivers. +It usually only documents issues that are not natural consequences of the limitations +of the Gofer approach - as documented avove. + +=head1 TODO + +This is just a random brain dump... (There's more in the source of the Changes file, not the pod) + +Document policy mechanism + +Add mechanism for transports to list config params and for Gofer to apply any that match (and warn if any left over?) + +Driver-private sth attributes - set via prepare() - change DBI spec + +add hooks into transport base class for checking & updating a result set cache + ie via a standard cache interface such as: + http://search.cpan.org/~robm/Cache-FastMmap/FastMmap.pm + http://search.cpan.org/~bradfitz/Cache-Memcached/lib/Cache/Memcached.pm + http://search.cpan.org/~dclinton/Cache-Cache/ + http://search.cpan.org/~cleishman/Cache/ +Also caching instructions could be passed through the httpd transport layer +in such a way that appropriate http cache headers are added to the results +so that web caches (squid etc) could be used to implement the caching. +(MUST require the use of GET rather than POST requests.) + +Rework handling of installed_methods to not piggback on dbh_attributes? + +Perhaps support transactions for transports where it's possible (ie null and stream)? +Would make stream transport (ie ssh) more useful to more people. + +Make sth_result_attr more like dbh_attributes (using '*' etc) + +Add @val = FETCH_many(@names) to DBI in C and use in Gofer/Execute? + +Implement _new_sth in C. + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/Base.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/Base.pm new file mode 100755 index 00000000000..1725b0316ad --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/Base.pm @@ -0,0 +1,162 @@ +package DBD::Gofer::Policy::Base; + +# $Id: Base.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; +use Carp; + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); +our $AUTOLOAD; + +my %policy_defaults = ( + # force connect method (unless overridden by go_connect_method=>'...' attribute) + # if false: call same method on client as on server + connect_method => 'connect', + # force prepare method (unless overridden by go_prepare_method=>'...' attribute) + # if false: call same method on client as on server + prepare_method => 'prepare', + skip_connect_check => 0, + skip_default_methods => 0, + skip_prepare_check => 0, + skip_ping => 0, + dbh_attribute_update => 'every', + dbh_attribute_list => ['*'], + locally_quote => 0, + locally_quote_identifier => 0, + cache_parse_trace_flags => 1, + cache_parse_trace_flag => 1, + cache_data_sources => 1, + cache_type_info_all => 1, + cache_tables => 0, + cache_table_info => 0, + cache_column_info => 0, + cache_primary_key_info => 0, + cache_foreign_key_info => 0, + cache_statistics_info => 0, + cache_get_info => 0, + cache_func => 0, +); + +my $base_policy_file = $INC{"DBD/Gofer/Policy/Base.pm"}; + +__PACKAGE__->create_policy_subs(\%policy_defaults); + +sub create_policy_subs { + my ($class, $policy_defaults) = @_; + + while ( my ($policy_name, $policy_default) = each %$policy_defaults) { + my $policy_attr_name = "go_$policy_name"; + my $sub = sub { + # $policy->foo($attr, ...) + #carp "$policy_name($_[1],...)"; + # return the policy default value unless an attribute overrides it + return (ref $_[1] && exists $_[1]->{$policy_attr_name}) + ? $_[1]->{$policy_attr_name} + : $policy_default; + }; + no strict 'refs'; + *{$class . '::' . $policy_name} = $sub; + } +} + +sub AUTOLOAD { + carp "Unknown policy name $AUTOLOAD used"; + # only warn once + no strict 'refs'; + *$AUTOLOAD = sub { undef }; + return undef; +} + +sub new { + my ($class, $args) = @_; + my $policy = {}; + bless $policy, $class; +} + +sub DESTROY { }; + +1; + +=head1 NAME + +DBD::Gofer::Policy::Base - Base class for DBD::Gofer policies + +=head1 SYNOPSIS + + $dbh = DBI->connect("dbi:Gofer:transport=...;policy=...", ...) + +=head1 DESCRIPTION + +DBD::Gofer can be configured via a 'policy' mechanism that allows you to +fine-tune the number of round-trips to the Gofer server. The policies are +grouped into classes (which may be subclassed) and referenced by the name of +the class. + +The L<DBD::Gofer::Policy::Base> class is the base class for all the policy +classes and describes all the individual policy items. + +The Base policy is not used directly. You should use a policy class derived from it. + +=head1 POLICY CLASSES + +Three policy classes are supplied with DBD::Gofer: + +L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it +makes more round-trips to the Gofer server. + +L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy. + +L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications. + +Generally the default C<classic> policy is fine. When first testing an existing +application with Gofer it is a good idea to start with the C<pedantic> policy +first and then switch to C<classic> or a custom policy, for final testing. + +=head1 POLICY ITEMS + +These are temporary docs: See the source code for list of policies and their defaults. + +In a future version the policies and their defaults will be defined in the pod and parsed out at load-time. + +See the source code to this module for more details. + +=head1 POLICY CUSTOMIZATION + +XXX This area of DBD::Gofer is subject to change. + +There are three ways to customize policies: + +Policy classes are designed to influence the overall behaviour of DBD::Gofer +with existing, unaltered programs, so they work in a reasonably optimal way +without requiring code changes. You can implement new policy classes as +subclasses of existing policies. + +In many cases individual policy items can be overridden on a case-by-case basis +within your application code. You do this by passing a corresponding +C<<go_<policy_name>>> attribute into DBI methods by your application code. +This let's you fine-tune the behaviour for special cases. + +The policy items are implemented as methods. In many cases the methods are +passed parameters relating to the DBD::Gofer code being executed. This means +the policy can implement dynamic behaviour that varies depending on the +particular circumstances, such as the particular statement being executed. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/classic.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/classic.pm new file mode 100755 index 00000000000..8f828f0ddba --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/classic.pm @@ -0,0 +1,79 @@ +package DBD::Gofer::Policy::classic; + +# $Id: classic.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); + +use base qw(DBD::Gofer::Policy::Base); + +__PACKAGE__->create_policy_subs({ + + # always use connect_cached on server + connect_method => 'connect_cached', + + # use same methods on server as is called on client + prepare_method => '', + + # don't skip the connect check since that also sets dbh attributes + # although this makes connect more expensive, that's partly offset + # by skip_ping=>1 below, which makes connect_cached very fast. + skip_connect_check => 0, + + # most code doesn't rely on sth attributes being set after prepare + skip_prepare_check => 1, + + # we're happy to use local method if that's the same as the remote + skip_default_methods => 1, + + # ping is not important for DBD::Gofer and most transports + skip_ping => 1, + + # only update dbh attributes on first contact with server + dbh_attribute_update => 'first', + + # we'd like to set locally_* but can't because drivers differ + + # get_info results usually don't change + cache_get_info => 1, +}); + + +1; + +=head1 NAME + +DBD::Gofer::Policy::classic - The 'classic' policy for DBD::Gofer + +=head1 SYNOPSIS + + $dbh = DBI->connect("dbi:Gofer:transport=...;policy=classic", ...) + +The C<classic> policy is the default DBD::Gofer policy, so need not be included in the DSN. + +=head1 DESCRIPTION + +Temporary docs: See the source code for list of policies and their defaults. + +In a future version the policies and their defaults will be defined in the pod and parsed out at load-time. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/pedantic.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/pedantic.pm new file mode 100755 index 00000000000..6829beafe3d --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/pedantic.pm @@ -0,0 +1,53 @@ +package DBD::Gofer::Policy::pedantic; + +# $Id: pedantic.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); + +use base qw(DBD::Gofer::Policy::Base); + +# the 'pedantic' policy is the same as the Base policy + +1; + +=head1 NAME + +DBD::Gofer::Policy::pedantic - The 'pedantic' policy for DBD::Gofer + +=head1 SYNOPSIS + + $dbh = DBI->connect("dbi:Gofer:transport=...;policy=pedantic", ...) + +=head1 DESCRIPTION + +The C<pedantic> policy tries to be as transparent as possible. To do this it +makes round-trips to the server for almost every DBI method call. + +This is the best policy to use when first testing existing code with Gofer. +Once it's working well you should consider moving to the C<classic> policy or defining your own policy class. + +Temporary docs: See the source code for list of policies and their defaults. + +In a future version the policies and their defaults will be defined in the pod and parsed out at load-time. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/rush.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/rush.pm new file mode 100755 index 00000000000..9cfd5826829 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Policy/rush.pm @@ -0,0 +1,90 @@ +package DBD::Gofer::Policy::rush; + +# $Id: rush.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); + +use base qw(DBD::Gofer::Policy::Base); + +__PACKAGE__->create_policy_subs({ + + # always use connect_cached on server + connect_method => 'connect_cached', + + # use same methods on server as is called on client + # (because code not using placeholders would bloat the sth cache) + prepare_method => '', + + # Skipping the connect check is fast, but it also skips + # fetching the remote dbh attributes! + # Make sure that your application doesn't need access to dbh attributes. + skip_connect_check => 1, + + # most code doesn't rely on sth attributes being set after prepare + skip_prepare_check => 1, + + # we're happy to use local method if that's the same as the remote + skip_default_methods => 1, + + # ping is almost meaningless for DBD::Gofer and most transports anyway + skip_ping => 1, + + # don't update dbh attributes at all + # XXX actually we currently need dbh_attribute_update for skip_default_methods to work + # and skip_default_methods is more valuable to us than the cost of dbh_attribute_update + dbh_attribute_update => 'none', # actually means 'first' currently + #dbh_attribute_list => undef, + + # we'd like to set locally_* but can't because drivers differ + + # in a rush assume metadata doesn't change + cache_tables => 1, + cache_table_info => 1, + cache_column_info => 1, + cache_primary_key_info => 1, + cache_foreign_key_info => 1, + cache_statistics_info => 1, + cache_get_info => 1, +}); + + +1; + +=head1 NAME + +DBD::Gofer::Policy::rush - The 'rush' policy for DBD::Gofer + +=head1 SYNOPSIS + + $dbh = DBI->connect("dbi:Gofer:transport=...;policy=rush", ...) + +=head1 DESCRIPTION + +The C<rush> policy tries to make as few round-trips as possible. +It's the opposite end of the policy spectrum to the C<pedantic> policy. + +Temporary docs: See the source code for list of policies and their defaults. + +In a future version the policies and their defaults will be defined in the pod and parsed out at load-time. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/Base.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/Base.pm new file mode 100755 index 00000000000..fa9a24b727f --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/Base.pm @@ -0,0 +1,410 @@ +package DBD::Gofer::Transport::Base; + +# $Id: Base.pm 12536 2009-02-24 22:37:09Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +use base qw(DBI::Gofer::Transport::Base); + +our $VERSION = sprintf("0.%06d", q$Revision: 12536 $ =~ /(\d+)/o); + +__PACKAGE__->mk_accessors(qw( + trace + go_dsn + go_url + go_policy + go_timeout + go_retry_hook + go_retry_limit + go_cache + cache_hit + cache_miss + cache_store +)); +__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw( + meta +)); + + +sub new { + my ($class, $args) = @_; + $args->{$_} = 0 for (qw(cache_hit cache_miss cache_store)); + $args->{keep_meta_frozen} ||= 1 if $args->{go_cache}; + #warn "args @{[ %$args ]}\n"; + return $class->SUPER::new($args); +} + + +sub _init_trace { $ENV{DBD_GOFER_TRACE} || 0 } + + +sub new_response { + my $self = shift; + return DBI::Gofer::Response->new(@_); +} + + +sub transmit_request { + my ($self, $request) = @_; + my $trace = $self->trace; + my $response; + + my ($go_cache, $request_cache_key); + if ($go_cache = $self->{go_cache}) { + $request_cache_key + = $request->{meta}{request_cache_key} + = $self->get_cache_key_for_request($request); + if ($request_cache_key) { + my $frozen_response = eval { $go_cache->get($request_cache_key) }; + if ($frozen_response) { + $self->_dump("cached response found for ".ref($request), $request) + if $trace; + $response = $self->thaw_response($frozen_response); + $self->trace_msg("transmit_request is returning a response from cache $go_cache\n") + if $trace; + ++$self->{cache_hit}; + return $response; + } + warn $@ if $@; + ++$self->{cache_miss}; + $self->trace_msg("transmit_request cache miss\n") + if $trace; + } + } + + my $to = $self->go_timeout; + my $transmit_sub = sub { + $self->trace_msg("transmit_request\n") if $trace; + local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to; + + my $response = eval { + local $SIG{PIPE} = sub { + my $extra = ($! eq "Broken pipe") ? "" : " ($!)"; + die "Unable to send request: Broken pipe$extra\n"; + }; + alarm($to) if $to; + $self->transmit_request_by_transport($request); + }; + alarm(0) if $to; + + if ($@) { + return $self->transport_timedout("transmit_request", $to) + if $@ eq "TIMEOUT\n"; + return $self->new_response({ err => 1, errstr => $@ }); + } + + return $response; + }; + + $response = $self->_transmit_request_with_retries($request, $transmit_sub); + + if ($response) { + my $frozen_response = delete $response->{meta}{frozen}; + $self->_store_response_in_cache($frozen_response, $request_cache_key) + if $request_cache_key; + } + + $self->trace_msg("transmit_request is returning a response itself\n") + if $trace && $response; + + return $response unless wantarray; + return ($response, $transmit_sub); +} + + +sub _transmit_request_with_retries { + my ($self, $request, $transmit_sub) = @_; + my $response; + do { + $response = $transmit_sub->(); + } while ( $response && $self->response_needs_retransmit($request, $response) ); + return $response; +} + + +sub receive_response { + my ($self, $request, $retransmit_sub) = @_; + my $to = $self->go_timeout; + + my $receive_sub = sub { + $self->trace_msg("receive_response\n"); + local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to; + + my $response = eval { + alarm($to) if $to; + $self->receive_response_by_transport($request); + }; + alarm(0) if $to; + + if ($@) { + return $self->transport_timedout("receive_response", $to) + if $@ eq "TIMEOUT\n"; + return $self->new_response({ err => 1, errstr => $@ }); + } + return $response; + }; + + my $response; + do { + $response = $receive_sub->(); + if ($self->response_needs_retransmit($request, $response)) { + $response = $self->_transmit_request_with_retries($request, $retransmit_sub); + $response ||= $receive_sub->(); + } + } while ( $self->response_needs_retransmit($request, $response) ); + + if ($response) { + my $frozen_response = delete $response->{meta}{frozen}; + my $request_cache_key = $request->{meta}{request_cache_key}; + $self->_store_response_in_cache($frozen_response, $request_cache_key) + if $request_cache_key && $self->{go_cache}; + } + + return $response; +} + + +sub response_retry_preference { + my ($self, $request, $response) = @_; + + # give the user a chance to express a preference (or undef for default) + if (my $go_retry_hook = $self->go_retry_hook) { + my $retry = $go_retry_hook->($request, $response, $self); + $self->trace_msg(sprintf "go_retry_hook returned %s\n", + (defined $retry) ? $retry : 'undef'); + return $retry if defined $retry; + } + + # This is the main decision point. We don't retry requests that got + # as far as executing because the error is probably from the database + # (not transport) so retrying is unlikely to help. But note that any + # severe transport error occuring after execute is likely to return + # a new response object that doesn't have the execute flag set. Beware! + return 0 if $response->executed_flag_set; + + return 1 if ($response->errstr || '') =~ m/induced by DBI_GOFER_RANDOM/; + + return 1 if $request->is_idempotent; # i.e. is SELECT or ReadOnly was set + + return undef; # we couldn't make up our mind +} + + +sub response_needs_retransmit { + my ($self, $request, $response) = @_; + + my $err = $response->err + or return 0; # nothing went wrong + + my $retry = $self->response_retry_preference($request, $response); + + if (!$retry) { # false or undef + $self->trace_msg("response_needs_retransmit: response not suitable for retry\n"); + return 0; + } + + # we'd like to retry but have we retried too much already? + + my $retry_limit = $self->go_retry_limit; + if (!$retry_limit) { + $self->trace_msg("response_needs_retransmit: retries disabled (retry_limit not set)\n"); + return 0; + } + + my $request_meta = $request->meta; + my $retry_count = $request_meta->{retry_count} || 0; + if ($retry_count >= $retry_limit) { + $self->trace_msg("response_needs_retransmit: $retry_count is too many retries\n"); + # XXX should be possible to disable altering the err + $response->errstr(sprintf "%s (after %d retries by gofer)", $response->errstr, $retry_count); + return 0; + } + + # will retry now, do the admin + ++$retry_count; + $self->trace_msg("response_needs_retransmit: retry $retry_count\n"); + + # hook so response_retry_preference can defer some code execution + # until we've checked retry_count and retry_limit. + if (ref $retry eq 'CODE') { + $retry->($retry_count, $retry_limit) + and warn "should return false"; # protect future use + } + + ++$request_meta->{retry_count}; # update count for this request object + ++$self->meta->{request_retry_count}; # update cumulative transport stats + + return 1; +} + + +sub transport_timedout { + my ($self, $method, $timeout) = @_; + $timeout ||= $self->go_timeout; + return $self->new_response({ err => 1, errstr => "DBD::Gofer $method timed-out after $timeout seconds" }); +} + + +# return undef if we don't want to cache this request +# subclasses may use more specialized rules +sub get_cache_key_for_request { + my ($self, $request) = @_; + + # we only want to cache idempotent requests + # is_idempotent() is true if GOf_REQUEST_IDEMPOTENT or GOf_REQUEST_READONLY set + return undef if not $request->is_idempotent; + + # XXX would be nice to avoid the extra freeze here + my $key = $self->freeze_request($request, undef, 1); + + #use Digest::MD5; warn "get_cache_key_for_request: ".Digest::MD5::md5_base64($key)."\n"; + + return $key; +} + + +sub _store_response_in_cache { + my ($self, $frozen_response, $request_cache_key) = @_; + my $go_cache = $self->{go_cache} + or return; + + # new() ensures that enabling go_cache also enables keep_meta_frozen + warn "No meta frozen in response" if !$frozen_response; + warn "No request_cache_key" if !$request_cache_key; + + if ($frozen_response && $request_cache_key) { + $self->trace_msg("receive_response added response to cache $go_cache\n"); + eval { $go_cache->set($request_cache_key, $frozen_response) }; + warn $@ if $@; + ++$self->{cache_store}; + } +} + +1; + +__END__ + +=head1 NAME + +DBD::Gofer::Transport::Base - base class for DBD::Gofer client transports + +=head1 SYNOPSIS + + my $remote_dsn = "..." + DBI->connect("dbi:Gofer:transport=...;url=...;timeout=...;retry_limit=...;dsn=$remote_dsn",...) + +or, enable by setting the DBI_AUTOPROXY environment variable: + + export DBI_AUTOPROXY='dbi:Gofer:transport=...;url=...' + +which will force I<all> DBI connections to be made via that Gofer server. + +=head1 DESCRIPTION + +This is the base class for all DBD::Gofer client transports. + +=head1 ATTRIBUTES + +Gofer transport attributes can be specified either in the attributes parameter +of the connect() method call, or in the DSN string. When used in the DSN +string, attribute names don't have the C<go_> prefix. + +=head2 go_dsn + +The full DBI DSN that the Gofer server should connect to on your behalf. + +When used in the DSN it must be the last element in the DSN string. + +=head2 go_timeout + +A time limit for sending a request and receiving a response. Some drivers may +implement sending and receiving as separate steps, in which case (currently) +the timeout applies to each separately. + +If a request needs to be resent then the timeout is restarted for each sending +of a request and receiving of a response. + +=head2 go_retry_limit + +The maximum number of times an request may be retried. The default is 2. + +=head2 go_retry_hook + +This subroutine reference is called, if defined, for each response received where $response->err is true. + +The subroutine is pass three parameters: the request object, the response object, and the transport object. + +If it returns an undefined value then the default retry behaviour is used. See L</RETRY ON ERROR> below. + +If it returns a defined but false value then the request is not resent. + +If it returns true value then the request is resent, so long as the number of retries does not exceed C<go_retry_limit>. + +=head1 RETRY ON ERROR + +The default retry on error behaviour is: + + - Retry if the error was due to DBI_GOFER_RANDOM. See L<DBI::Gofer::Execute>. + + - Retry if $request->is_idempotent returns true. See L<DBI::Gofer::Request>. + +A retry won't be allowed if the number of previous retries has reached C<go_retry_limit>. + +=head1 TRACING + +Tracing of gofer requests and reponses can be enabled by setting the +C<DBD_GOFER_TRACE> environment variable. A value of 1 gives a reasonably +compact summary of each request and response. A value of 2 or more gives a +detailed, and voluminous, dump. + +The trace is written using DBI->trace_msg() and so is written to the default +DBI trace output, which is usually STDERR. + +=head1 METHODS + +I<This section is currently far from complete.> + +=head2 response_retry_preference + + $retry = $transport->response_retry_preference($request, $response); + +The response_retry_preference is called by DBD::Gofer when considering if a +request should be retried after an error. + +Returns true (would like to retry), false (must not retry), undef (no preference). + +If a true value is returned in the form of a CODE ref then, if DBD::Gofer does +decide to retry the request, it calls the code ref passing $retry_count, $retry_limit. +Can be used for logging and/or to implement exponential backoff behaviour. +Currently the called code must return using C<return;> to allow for future extensions. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007-2008, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=head1 SEE ALSO + +L<DBD::Gofer>, L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>. + +and some example transports: + +L<DBD::Gofer::Transport::stream> + +L<DBD::Gofer::Transport::http> + +L<DBI::Gofer::Transport::mod_perl> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/null.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/null.pm new file mode 100755 index 00000000000..4b8d86c6271 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/null.pm @@ -0,0 +1,111 @@ +package DBD::Gofer::Transport::null; + +# $Id: null.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +use base qw(DBD::Gofer::Transport::Base); + +use DBI::Gofer::Execute; + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); + +__PACKAGE__->mk_accessors(qw( + pending_response + transmit_count +)); + +my $executor = DBI::Gofer::Execute->new(); + + +sub transmit_request_by_transport { + my ($self, $request) = @_; + $self->transmit_count( ($self->transmit_count()||0) + 1 ); # just for tests + + my $frozen_request = $self->freeze_request($request); + + # ... + # the request is magically transported over to ... ourselves + # ... + + my $response = $executor->execute_request( $self->thaw_request($frozen_request, undef, 1) ); + + # put response 'on the shelf' ready for receive_response() + $self->pending_response( $response ); + + return undef; +} + + +sub receive_response_by_transport { + my $self = shift; + + my $response = $self->pending_response; + + my $frozen_response = $self->freeze_response($response, undef, 1); + + # ... + # the response is magically transported back to ... ourselves + # ... + + return $self->thaw_response($frozen_response); +} + + +1; +__END__ + +=head1 NAME + +DBD::Gofer::Transport::null - DBD::Gofer client transport for testing + +=head1 SYNOPSIS + + my $original_dsn = "..." + DBI->connect("dbi:Gofer:transport=null;dsn=$original_dsn",...) + +or, enable by setting the DBI_AUTOPROXY environment variable: + + export DBI_AUTOPROXY="dbi:Gofer:transport=null" + +=head1 DESCRIPTION + +Connect via DBD::Gofer but execute the requests within the same process. + +This is a quick and simple way to test applications for compatibility with the +(few) restrictions that DBD::Gofer imposes. + +It also provides a simple, portable way for the DBI test suite to be used to +test DBD::Gofer on all platforms with no setup. + +Also, by measuring the difference in performance between normal connections and +connections via C<dbi:Gofer:transport=null> the basic cost of using DBD::Gofer +can be measured. Furthermore, the additional cost of more advanced transports can be +isolated by comparing their performance with the null transport. + +The C<t/85gofer.t> script in the DBI distribution includes a comparative benchmark. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=head1 SEE ALSO + +L<DBD::Gofer::Transport::Base> + +L<DBD::Gofer> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/pipeone.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/pipeone.pm new file mode 100755 index 00000000000..3df2bf3f103 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/pipeone.pm @@ -0,0 +1,253 @@ +package DBD::Gofer::Transport::pipeone; + +# $Id: pipeone.pm 10087 2007-10-16 12:42:37Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +use Carp; +use Fcntl; +use IO::Select; +use IPC::Open3 qw(open3); +use Symbol qw(gensym); + +use base qw(DBD::Gofer::Transport::Base); + +our $VERSION = sprintf("0.%06d", q$Revision: 10087 $ =~ /(\d+)/o); + +__PACKAGE__->mk_accessors(qw( + connection_info + go_perl +)); + + +sub new { + my ($self, $args) = @_; + $args->{go_perl} ||= do { + ($INC{"blib.pm"}) ? [ $^X, '-Mblib' ] : [ $^X ]; + }; + if (not ref $args->{go_perl}) { + # user can override the perl to be used, either with an array ref + # containing the command name and args to use, or with a string + # (ie via the DSN) in which case, to enable args to be passed, + # we split on two or more consecutive spaces (otherwise the path + # to perl couldn't contain a space itself). + $args->{go_perl} = [ split /\s{2,}/, $args->{go_perl} ]; + } + return $self->SUPER::new($args); +} + + +# nonblock($fh) puts filehandle into nonblocking mode +sub nonblock { + my $fh = shift; + my $flags = fcntl($fh, F_GETFL, 0) + or croak "Can't get flags for filehandle $fh: $!"; + fcntl($fh, F_SETFL, $flags | O_NONBLOCK) + or croak "Can't make filehandle $fh nonblocking: $!"; +} + + +sub start_pipe_command { + my ($self, $cmd) = @_; + $cmd = [ $cmd ] unless ref $cmd eq 'ARRAY'; + + # if it's important that the subprocess uses the same + # (versions of) modules as us then the caller should + # set PERL5LIB itself. + + # limit various forms of insanity, for now + local $ENV{DBI_TRACE}; # use DBI_GOFER_TRACE instead + local $ENV{DBI_AUTOPROXY}; + local $ENV{DBI_PROFILE}; + + my ($wfh, $rfh, $efh) = (gensym, gensym, gensym); + my $pid = open3($wfh, $rfh, $efh, @$cmd) + or die "error starting @$cmd: $!\n"; + if ($self->trace) { + $self->trace_msg(sprintf("Started pid $pid: @$cmd {fd: w%d r%d e%d, ppid=$$}\n", fileno $wfh, fileno $rfh, fileno $efh),0); + } + nonblock($rfh); + nonblock($efh); + my $ios = IO::Select->new($rfh, $efh); + + return { + cmd=>$cmd, + pid=>$pid, + wfh=>$wfh, rfh=>$rfh, efh=>$efh, + ios=>$ios, + }; +} + + +sub cmd_as_string { + my $self = shift; + # XXX meant to return a properly shell-escaped string suitable for system + # but its only for debugging so that can wait + my $connection_info = $self->connection_info; + return join " ", map { (m/^[-:\w]*$/) ? $_ : "'$_'" } @{$connection_info->{cmd}}; +} + + +sub transmit_request_by_transport { + my ($self, $request) = @_; + + my $frozen_request = $self->freeze_request($request); + + my $cmd = [ @{$self->go_perl}, qw(-MDBI::Gofer::Transport::pipeone -e run_one_stdio)]; + my $info = $self->start_pipe_command($cmd); + + my $wfh = delete $info->{wfh}; + # send frozen request + local $\; + print $wfh $frozen_request + or warn "error writing to @$cmd: $!\n"; + # indicate that there's no more + close $wfh + or die "error closing pipe to @$cmd: $!\n"; + + $self->connection_info( $info ); + return; +} + + +sub read_response_from_fh { + my ($self, $fh_actions) = @_; + my $trace = $self->trace; + + my $info = $self->connection_info || die; + my ($ios) = @{$info}{qw(ios)}; + my $errors = 0; + my $complete; + + die "No handles to read response from" unless $ios->count; + + while ($ios->count) { + my @readable = $ios->can_read(); + for my $fh (@readable) { + local $_; + my $actions = $fh_actions->{$fh} || die "panic: no action for $fh"; + my $rv = sysread($fh, $_='', 1024*31); # to fit in 32KB slab + unless ($rv) { # error (undef) or end of file (0) + my $action; + unless (defined $rv) { # was an error + $self->trace_msg("error on handle $fh: $!\n") if $trace >= 4; + $action = $actions->{error} || $actions->{eof}; + ++$errors; + # XXX an error may be a permenent condition of the handle + # if so we'll loop here - not good + } + else { + $action = $actions->{eof}; + $self->trace_msg("eof on handle $fh\n") if $trace >= 4; + } + if ($action->($fh)) { + $self->trace_msg("removing $fh from handle set\n") if $trace >= 4; + $ios->remove($fh); + } + next; + } + # action returns true if the response is now complete + # (we finish all handles + $actions->{read}->($fh) && ++$complete; + } + last if $complete; + } + return $errors; +} + + +sub receive_response_by_transport { + my $self = shift; + + my $info = $self->connection_info || die; + my ($pid, $rfh, $efh, $ios, $cmd) = @{$info}{qw(pid rfh efh ios cmd)}; + + my $frozen_response; + my $stderr_msg; + + $self->read_response_from_fh( { + $efh => { + error => sub { warn "error reading response stderr: $!"; 1 }, + eof => sub { warn "eof on stderr" if 0; 1 }, + read => sub { $stderr_msg .= $_; 0 }, + }, + $rfh => { + error => sub { warn "error reading response: $!"; 1 }, + eof => sub { warn "eof on stdout" if 0; 1 }, + read => sub { $frozen_response .= $_; 0 }, + }, + }); + + waitpid $info->{pid}, 0 + or warn "waitpid: $!"; # XXX do something more useful? + + die ref($self)." command (@$cmd) failed: $stderr_msg" + if not $frozen_response; # no output on stdout at all + + # XXX need to be able to detect and deal with corruption + my $response = $self->thaw_response($frozen_response); + + if ($stderr_msg) { + # add stderr messages as warnings (for PrintWarn) + $response->add_err(0, $stderr_msg, undef, $self->trace) + # but ignore warning from old version of blib + unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/; + } + + return $response; +} + + +1; + +__END__ + +=head1 NAME + +DBD::Gofer::Transport::pipeone - DBD::Gofer client transport for testing + +=head1 SYNOPSIS + + $original_dsn = "..."; + DBI->connect("dbi:Gofer:transport=pipeone;dsn=$original_dsn",...) + +or, enable by setting the DBI_AUTOPROXY environment variable: + + export DBI_AUTOPROXY="dbi:Gofer:transport=pipeone" + +=head1 DESCRIPTION + +Connect via DBD::Gofer and execute each request by starting executing a subprocess. + +This is, as you might imagine, spectacularly inefficient! + +It's only intended for testing. Specifically it demonstrates that the server +side is completely stateless. + +It also provides a base class for the much more useful L<DBD::Gofer::Transport::stream> +transport. + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=head1 SEE ALSO + +L<DBD::Gofer::Transport::Base> + +L<DBD::Gofer> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/stream.pm b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/stream.pm new file mode 100755 index 00000000000..ed31637e733 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Gofer/Transport/stream.pm @@ -0,0 +1,291 @@ +package DBD::Gofer::Transport::stream; + +# $Id: stream.pm 10905 2008-03-10 22:01:04Z timbo $ +# +# Copyright (c) 2007, Tim Bunce, Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +use strict; +use warnings; + +use Carp; + +use base qw(DBD::Gofer::Transport::pipeone); + +our $VERSION = sprintf("0.%06d", q$Revision: 10905 $ =~ /(\d+)/o); + +__PACKAGE__->mk_accessors(qw( + go_persist +)); + +my $persist_all = 5; +my %persist; + + +sub _connection_key { + my ($self) = @_; + return join "~", $self->go_url||"", @{ $self->go_perl || [] }; +} + + +sub _connection_get { + my ($self) = @_; + + my $persist = $self->go_persist; # = 0 can force non-caching + $persist = $persist_all if not defined $persist; + my $key = ($persist) ? $self->_connection_key : ''; + if ($persist{$key} && $self->_connection_check($persist{$key})) { + $self->trace_msg("reusing persistent connection $key\n",0) if $self->trace >= 1; + return $persist{$key}; + } + + my $connection = $self->_make_connection; + + if ($key) { + %persist = () if keys %persist > $persist_all; # XXX quick hack to limit subprocesses + $persist{$key} = $connection; + } + + return $connection; +} + + +sub _connection_check { + my ($self, $connection) = @_; + $connection ||= $self->connection_info; + my $pid = $connection->{pid}; + my $ok = (kill 0, $pid); + $self->trace_msg("_connection_check: $ok (pid $$)\n",0) if $self->trace; + return $ok; +} + + +sub _connection_kill { + my ($self) = @_; + my $connection = $self->connection_info; + my ($pid, $wfh, $rfh, $efh) = @{$connection}{qw(pid wfh rfh efh)}; + $self->trace_msg("_connection_kill: closing write handle\n",0) if $self->trace; + # closing the write file handle should be enough, generally + close $wfh; + # in future we may want to be more aggressive + #close $rfh; close $efh; kill 15, $pid + # but deleting from the persist cache... + delete $persist{ $self->_connection_key }; + # ... and removing the connection_info should suffice + $self->connection_info( undef ); + return; +} + + +sub _make_connection { + my ($self) = @_; + + my $go_perl = $self->go_perl; + my $cmd = [ @$go_perl, qw(-MDBI::Gofer::Transport::stream -e run_stdio_hex)]; + + #push @$cmd, "DBI_TRACE=2=/tmp/goferstream.log", "sh", "-c"; + if (my $url = $self->go_url) { + die "Only 'ssh:user\@host' style url supported by this transport" + unless $url =~ s/^ssh://; + my $ssh = $url; + my $setup_env = join "||", map { "source $_ 2>/dev/null" } qw(.bash_profile .bash_login .profile); + my $setup = $setup_env.q{; exec "$@"}; + # don't use $^X on remote system by default as it's possibly wrong + $cmd->[0] = 'perl' if "@$go_perl" eq $^X; + # -x not only 'Disables X11 forwarding' but also makes connections *much* faster + unshift @$cmd, qw(ssh -xq), split(' ', $ssh), qw(bash -c), $setup; + } + + $self->trace_msg("new connection: @$cmd\n",0) if $self->trace; + + # XXX add a handshake - some message from DBI::Gofer::Transport::stream that's + # sent as soon as it starts that we can wait for to report success - and soak up + # and report useful warnings etc from ssh before we get it? Increases latency though. + my $connection = $self->start_pipe_command($cmd); + return $connection; +} + + +sub transmit_request_by_transport { + my ($self, $request) = @_; + my $trace = $self->trace; + + my $connection = $self->connection_info || do { + my $con = $self->_connection_get; + $self->connection_info( $con ); + $con; + }; + + my $encoded_request = unpack("H*", $self->freeze_request($request)); + $encoded_request .= "\015\012"; + + my $wfh = $connection->{wfh}; + $self->trace_msg(sprintf("transmit_request_by_transport: to fh %s fd%d\n", $wfh, fileno($wfh)),0) + if $trace >= 4; + + # send frozen request + local $\; + print $wfh $encoded_request # autoflush enabled + or do { + # XXX should make new connection and retry + $self->_connection_kill; + die "Error sending request: $!"; + }; + $self->trace_msg("Request sent: $encoded_request\n",0) if $trace >= 4; + + return; +} + + +sub receive_response_by_transport { + my $self = shift; + my $trace = $self->trace; + + $self->trace_msg("receive_response_by_transport: awaiting response\n",0) if $trace >= 4; + my $connection = $self->connection_info || die; + my ($pid, $rfh, $efh, $cmd) = @{$connection}{qw(pid rfh efh cmd)}; + + my $errno = 0; + my $encoded_response; + my $stderr_msg; + + $self->read_response_from_fh( { + $efh => { + error => sub { warn "error reading response stderr: $!"; $errno||=$!; 1 }, + eof => sub { warn "eof reading efh" if $trace >= 4; 1 }, + read => sub { $stderr_msg .= $_; 0 }, + }, + $rfh => { + error => sub { warn "error reading response: $!"; $errno||=$!; 1 }, + eof => sub { warn "eof reading rfh" if $trace >= 4; 1 }, + read => sub { $encoded_response .= $_; ($encoded_response=~s/\015\012$//) ? 1 : 0 }, + }, + }); + + # if we got no output on stdout at all then the command has + # probably exited, possibly with an error to stderr. + # Turn this situation into a reasonably useful DBI error. + if (not $encoded_response) { + my @msg; + push @msg, "error while reading response: $errno" if $errno; + if ($stderr_msg) { + chomp $stderr_msg; + push @msg, sprintf "error reported by \"%s\" (pid %d%s): %s", + $self->cmd_as_string, + $pid, ((kill 0, $pid) ? "" : ", exited"), + $stderr_msg; + } + die join(", ", "No response received", @msg)."\n"; + } + + $self->trace_msg("Response received: $encoded_response\n",0) + if $trace >= 4; + + $self->trace_msg("Gofer stream stderr message: $stderr_msg\n",0) + if $stderr_msg && $trace; + + my $frozen_response = pack("H*", $encoded_response); + + # XXX need to be able to detect and deal with corruption + my $response = $self->thaw_response($frozen_response); + + if ($stderr_msg) { + # add stderr messages as warnings (for PrintWarn) + $response->add_err(0, $stderr_msg, undef, $trace) + # but ignore warning from old version of blib + unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/; + } + + return $response; +} + +sub transport_timedout { + my $self = shift; + $self->_connection_kill; + return $self->SUPER::transport_timedout(@_); +} + +1; + +__END__ + +=head1 NAME + +DBD::Gofer::Transport::stream - DBD::Gofer transport for stdio streaming + +=head1 SYNOPSIS + + DBI->connect('dbi:Gofer:transport=stream;url=ssh:username@host.example.com;dsn=dbi:...',...) + +or, enable by setting the DBI_AUTOPROXY environment variable: + + export DBI_AUTOPROXY='dbi:Gofer:transport=stream;url=ssh:username@host.example.com' + +=head1 DESCRIPTION + +Without the C<url=> parameter it launches a subprocess as + + perl -MDBI::Gofer::Transport::stream -e run_stdio_hex + +and feeds requests into it and reads responses from it. But that's not very useful. + +With a C<url=ssh:username@host.example.com> parameter it uses ssh to launch the subprocess +on a remote system. That's much more useful! + +It gives you secure remote access to DBI databases on any system you can login to. +Using ssh also gives you optional compression and many other features (see the +ssh manual for how to configure that and many other options via ~/.ssh/config file). + +The actual command invoked is something like: + + ssh -xq ssh:username@host.example.com bash -c $setup $run + +where $run is the command shown above, and $command is + + . .bash_profile 2>/dev/null || . .bash_login 2>/dev/null || . .profile 2>/dev/null; exec "$@" + +which is trying (in a limited and fairly unportable way) to setup the environment +(PATH, PERL5LIB etc) as it would be if you had logged in to that system. + +The "C<perl>" used in the command will default to the value of $^X when not using ssh. +On most systems that's the full path to the perl that's currently executing. + + +=head1 PERSISTENCE + +Currently gofer stream connections persist (remain connected) after all +database handles have been disconnected. This makes later connections in the +same process very fast. + +Currently up to 5 different gofer stream connections (based on url) can +persist. If more than 5 are in the cache when a new connection is made then +the cache is cleared before adding the new connection. Simple but effective. + +=head1 TO DO + +Document go_perl attribute + +Automatically reconnect (within reason) if there's a transport error. + +Decide on default for persistent connection - on or off? limits? ttl? + +=head1 AUTHOR + +Tim Bunce, L<http://www.tim.bunce.name> + +=head1 LICENCE AND COPYRIGHT + +Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved. + +This module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. See L<perlartistic>. + +=head1 SEE ALSO + +L<DBD::Gofer::Transport::Base> + +L<DBD::Gofer> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/NullP.pm b/Master/tlpkg/tlperl/lib/DBD/NullP.pm new file mode 100755 index 00000000000..51cc259770f --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/NullP.pm @@ -0,0 +1,151 @@ +{ + package DBD::NullP; + + require DBI; + require Carp; + + @EXPORT = qw(); # Do NOT @EXPORT anything. + $VERSION = sprintf("12.%06d", q$Revision: 9215 $ =~ /(\d+)/o); + +# $Id: NullP.pm 9215 2007-03-08 17:03:58Z timbo $ +# +# Copyright (c) 1994-2007 Tim Bunce +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + + $drh = undef; # holds driver handle once initialised + + sub driver{ + return $drh if $drh; + my($class, $attr) = @_; + $class .= "::dr"; + ($drh) = DBI::_new_drh($class, { + 'Name' => 'NullP', + 'Version' => $VERSION, + 'Attribution' => 'DBD Example Null Perl stub by Tim Bunce', + }, [ qw'example implementors private data']); + $drh; + } + + sub CLONE { + undef $drh; + } +} + + +{ package DBD::NullP::dr; # ====== DRIVER ====== + $imp_data_size = 0; + use strict; + + sub connect { # normally overridden, but a handy default + my $dbh = shift->SUPER::connect(@_) + or return; + $dbh->STORE(Active => 1); + $dbh; + } + + + sub DESTROY { undef } +} + + +{ package DBD::NullP::db; # ====== DATABASE ====== + $imp_data_size = 0; + use strict; + use Carp qw(croak); + + sub prepare { + my ($dbh, $statement)= @_; + + my ($outer, $sth) = DBI::_new_sth($dbh, { + 'Statement' => $statement, + }); + + return $outer; + } + + sub FETCH { + my ($dbh, $attrib) = @_; + # In reality this would interrogate the database engine to + # either return dynamic values that cannot be precomputed + # or fetch and cache attribute values too expensive to prefetch. + return $dbh->SUPER::FETCH($attrib); + } + + sub STORE { + my ($dbh, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + if ($attrib eq 'AutoCommit') { + Carp::croak("Can't disable AutoCommit") unless $value; + # convert AutoCommit values to magic ones to let DBI + # know that the driver has 'handled' the AutoCommit attribute + $value = ($value) ? -901 : -900; + } + return $dbh->SUPER::STORE($attrib, $value); + } + + sub ping { 1 } + + sub disconnect { + shift->STORE(Active => 0); + } + +} + + +{ package DBD::NullP::st; # ====== STATEMENT ====== + $imp_data_size = 0; + use strict; + + sub bind_param { + my ($sth, $param, $value, $attr) = @_; + $sth->{ParamValues}{$param} = $value; + $sth->{ParamAttr}{$param} = $attr + if defined $attr; # attr is sticky if not explicitly set + return 1; + } + + sub execute { + my $sth = shift; + $sth->bind_param($_, $_[$_-1]) for (1..@_); + if ($sth->{Statement} =~ m/^ \s* SELECT \s+/xmsi) { + $sth->STORE(NUM_OF_FIELDS => 1); + $sth->{NAME} = [ "fieldname" ]; + # just for the sake of returning something, we return the params + my $params = $sth->{ParamValues} || {}; + $sth->{dbd_nullp_data} = [ @{$params}{ sort keys %$params } ]; + $sth->STORE(Active => 1); + } + 1; + } + + sub fetchrow_arrayref { + my $sth = shift; + my $data = $sth->{dbd_nullp_data}; + if (!$data || !@$data) { + $sth->finish; # no more data so finish + return undef; + } + return $sth->_set_fbav(shift @$data); + } + *fetch = \&fetchrow_arrayref; # alias + + sub FETCH { + my ($sth, $attrib) = @_; + # would normally validate and only fetch known attributes + # else pass up to DBI to handle + return $sth->SUPER::FETCH($attrib); + } + + sub STORE { + my ($sth, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + return $sth->SUPER::STORE($attrib, $value); + } + +} + +1; diff --git a/Master/tlpkg/tlperl/lib/DBD/ODBC.pm b/Master/tlpkg/tlperl/lib/DBD/ODBC.pm new file mode 100755 index 00000000000..3b77711cd5d --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/ODBC.pm @@ -0,0 +1,1690 @@ +# $Id: ODBC.pm 13205 2009-08-19 08:51:11Z mjevans $ +# +# Copyright (c) 1994,1995,1996,1998 Tim Bunce +# portions Copyright (c) 1997-2004 Jeff Urlwin +# portions Copyright (c) 1997 Thomas K. Wenrich +# portions Copyright (c) 2007-2009 Martin J. Evans +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + +## no critic (ProhibitManyArgs ProhibitMultiplePackages) + +require 5.006; + +$DBD::ODBC::VERSION = '1.23'; + +{ + ## no critic (ProhibitMagicNumbers ProhibitExplicitISA) + ## no critic (ProhibitPackageVars) + package DBD::ODBC; + + use DBI (); + use DynaLoader (); + use Exporter (); + + @ISA = qw(Exporter DynaLoader); + + # my $Revision = substr(q$Id: ODBC.pm 13205 2009-08-19 08:51:11Z mjevans $, 13,2); + + require_version DBI 1.21; + + bootstrap DBD::ODBC $VERSION; + + $err = 0; # holds error code for DBI::err + $errstr = q{}; # holds error string for DBI::errstr + $sqlstate = "00000"; + $drh = undef; # holds driver handle once initialised + + sub parse_trace_flag { + my ($class, $name) = @_; + return 0x02_00_00_00 if $name eq 'odbcunicode'; + return 0x04_00_00_00 if $name eq 'odbcconnection'; + return DBI::parse_trace_flag($class, $name); + } + + sub parse_trace_flags { + my ($class, $flags) = @_; + return DBI::parse_trace_flags($class, $flags); + } + + sub driver{ + return $drh if $drh; + my($class, $attr) = @_; + + $class .= "::dr"; + + # not a 'my' since we use it above to prevent multiple drivers + + $drh = DBI::_new_drh($class, { + 'Name' => 'ODBC', + 'Version' => $VERSION, + 'Err' => \$DBD::ODBC::err, + 'Errstr' => \$DBD::ODBC::errstr, + 'State' => \$DBD::ODBC::sqlstate, + 'Attribution' => 'DBD::ODBC by Jeff Urlwin, Tim Bunce and Martin J. Evans', + }); + + return $drh; + } + + sub CLONE { undef $drh } + 1; +} + + +{ package DBD::ODBC::dr; # ====== DRIVER ====== + use strict; + use warnings; + + ## no critic (ProhibitBuiltinHomonyms) + sub connect { + my($drh, $dbname, $user, $auth, $attr)= @_; + #$user = q{} unless defined $user; + #$auth = q{} unless defined $auth; + + # create a 'blank' dbh + my $this = DBI::_new_dbh($drh, { + 'Name' => $dbname, + 'USER' => $user, + 'CURRENT_USER' => $user, + }); + + # Call ODBC _login func in Driver.xst file => dbd_db_login6 + # and populate internal handle data. + # There are 3 versions (currently) if you have a recent DBI: + # dbd_db_login (oldest) + # dbd_db_login6 (with attribs hash & char * args) and + # dbd_db_login6_sv (as dbd_db_login6 with perl scalar args + + DBD::ODBC::db::_login($this, $dbname, $user, $auth, $attr) or return; + + return $this; + } + ## use critic + +} + + +{ package DBD::ODBC::db; # ====== DATABASE ====== + use strict; + use warnings; + + use constant SQL_DRIVER_HSTMT => 5; + use constant SQL_DRIVER_HLIB => 76; + use constant SQL_DRIVER_HDESC => 135; + + + sub parse_trace_flag { + my ($h, $name) = @_; + return DBD::ODBC->parse_trace_flag($name); + } + + sub private_attribute_info { + return { + odbc_ignore_named_placeholders => undef, # sth and dbh + odbc_default_bind_type => undef, # sth and dbh + odbc_force_rebind => undef, # sth & dbh + odbc_async_exec => undef, # sth and dbh + odbc_exec_direct => undef, + odbc_SQL_ROWSET_SIZE => undef, + SQL_DRIVER_ODBC_VER => undef, + odbc_cursortype => undef, + odbc_query_timeout => undef, # sth and dbh + odbc_has_unicode => undef, + odbc_out_connect_string => undef, + odbc_version => undef, + odbc_err_handler => undef, + odbc_putdata_start => undef, # sth and dbh + odbc_column_display_size => undef # sth and dbh + }; + } + + sub prepare { + my($dbh, $statement, @attribs)= @_; + + # create a 'blank' sth + my $sth = DBI::_new_sth($dbh, { + 'Statement' => $statement, + }); + + # Call ODBC func in ODBC.xs file. + # (This will actually also call SQLPrepare for you.) + # and populate internal handle data. + + DBD::ODBC::st::_prepare($sth, $statement, @attribs) + or return; + + return $sth; + } + + sub column_info { + my ($dbh, $catalog, $schema, $table, $column) = @_; + + $catalog = q{} if (!$catalog); + $schema = q{} if (!$schema); + $table = q{} if (!$table); + $column = q{} if (!$column); + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLColumns" }); + + _columns($dbh,$sth, $catalog, $schema, $table, $column) + or return; + + return $sth; + } + + sub columns { + my ($dbh, $catalog, $schema, $table, $column) = @_; + + $catalog = q{} if (!$catalog); + $schema = q{} if (!$schema); + $table = q{} if (!$table); + $column = q{} if (!$column); + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLColumns" }); + + _columns($dbh,$sth, $catalog, $schema, $table, $column) + or return; + + return $sth; + } + + + sub table_info { + my ($dbh, $catalog, $schema, $table, $type) = @_; + + if ($#_ == 1) { + my $attrs = $_[1]; + $catalog = $attrs->{TABLE_CAT}; + $schema = $attrs->{TABLE_SCHEM}; + $table = $attrs->{TABLE_NAME}; + $type = $attrs->{TABLE_TYPE}; + } + + $catalog = q{} if (!$catalog); + $schema = q{} if (!$schema); + $table = q{} if (!$table); + $type = q{} if (!$type); + + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLTables" }); + + DBD::ODBC::st::_tables($dbh,$sth, $catalog, $schema, $table, $type) + or return; + return $sth; + } + + sub primary_key_info { + my ($dbh, $catalog, $schema, $table ) = @_; + + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLPrimaryKeys" }); + + $catalog = q{} if (!$catalog); + $schema = q{} if (!$schema); + $table = q{} if (!$table); + DBD::ODBC::st::_primary_keys($dbh,$sth, $catalog, $schema, $table ) + or return; + return $sth; + } + sub statistics_info { + my ($dbh, $catalog, $schema, $table, $unique, $quick ) = @_; + + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLStatistics" }); + + $catalog = q{} if (!$catalog); + $schema = q{} if (!$schema); + $table = q{} if (!$table); + $unique = 1 if (!$unique); + $quick = 1 if (!$quick); + + DBD::ODBC::st::_statistics($dbh, $sth, $catalog, $schema, $table, + $unique, $quick) + or return; + return $sth; + } + + sub foreign_key_info { + my ($dbh, $pkcatalog, $pkschema, $pktable, $fkcatalog, $fkschema, $fktable ) = @_; + + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLForeignKeys" }); + + $pkcatalog = q{} if (!$pkcatalog); + $pkschema = q{} if (!$pkschema); + $pktable = q{} if (!$pktable); + $fkcatalog = q{} if (!$fkcatalog); + $fkschema = q{} if (!$fkschema); + $fktable = q{} if (!$fktable); + _GetForeignKeys($dbh, $sth, $pkcatalog, $pkschema, $pktable, $fkcatalog, $fkschema, $fktable) or return; + return $sth; + } + + sub ping { + my $dbh = shift; + + # DBD::Gofer does the following (with a 0 instead of "0") but it I + # cannot make it set a warning. + #return $dbh->SUPER::set_err("0", "can't ping while not connected") # warning + # unless $dbh->SUPER::FETCH('Active'); + + #my $pe = $dbh->FETCH('PrintError'); + #$dbh->STORE('PrintError', 0); + my $evalret = eval { + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLTables_PING" }) + or return 1; + + my ($catalog, $schema, $table, $type); + + $catalog = q{}; + $schema = q{}; + $table = 'NOXXTABLE'; + $type = q{}; + + DBD::ODBC::st::_tables($dbh,$sth, $catalog, $schema, $table, $type) + or return 1; + $sth->finish; + return 0; + }; + #$dbh->STORE('PrintError', $pe); + $dbh->set_err(undef,'',''); # clear any stored error from eval above + if ($evalret == 0) { + return 1; + } else { + return 0; + } + } + +##### # saved, just for posterity. +##### sub oldping { +##### my $dbh = shift; +##### my $state = undef; +##### +##### # should never 'work' but if it does, that's okay! +##### # JLU incorporated patches from Jon Smirl 5/4/99 +##### { +##### local $dbh->{RaiseError} = 0 if $dbh->{RaiseError}; +##### # JLU added local PrintError handling for completeness. +##### # it shouldn't print, I think. +##### local $dbh->{PrintError} = 0 if $dbh->{PrintError}; +##### my $sql = "select sysdate from dual1__NOT_FOUND__CANNOT"; +##### my $sth = $dbh->prepare($sql); +##### # fixed "my" $state = below. Was causing problem with +##### # ping! Also, fetching fields as some drivers (Oracle 8) +##### # may not actually check the database for activity until +##### # the query is "described". +##### # Right now, Oracle8 is the only known version which +##### # does not actually check the server during prepare. +##### my $ok = $sth && $sth->execute(); +##### +##### $state = $dbh->state; +##### $DBD::ODBC::err = 0; +##### $DBD::ODBC::errstr = ""; +##### $DBD::ODBC::sqlstate = "00000"; +##### return 1 if $ok; +##### } +##### return 1 if $state eq 'S0002'; # Base table not found +##### return 1 if $state eq '42S02'; # Base table not found.Solid EE v3.51 +##### return 1 if $state eq 'S0022'; # Column not found +##### return 1 if $state eq '37000'; # statement could not be prepared (19991011, JLU) +##### # return 1 if $state eq 'S1000'; # General Error? ? 5/30/02, JLU. This is what Openlink is returning +##### # We assume that any other error means the database +##### # is no longer connected. +##### # Some special cases may need to be added to the code above. +##### return 0; +##### } + + # New support for DBI which has the get_info command. + # leaving support for ->func(xxx, GetInfo) (below) for a period of time + # to support older applications which used this. + sub get_info { + my ($dbh, $item) = @_; + # Ignore some we cannot do + if ($item == SQL_DRIVER_HSTMT || + $item == SQL_DRIVER_HLIB || + $item == SQL_DRIVER_HDESC) { + return; + } + return _GetInfo($dbh, $item); + } + + # new override of do method provided by Merijn Broeren + # this optimizes "do" to use SQLExecDirect for simple + # do statements without parameters. + ## no critic (ProhibitBuiltinHomonyms) + sub do { + my($dbh, $statement, $attr, @params) = @_; + my $rows = 0; + ## no critic (ProhibitMagicNumbers) + if( -1 == $#params ) + { + # No parameters, use execute immediate + $rows = ExecDirect( $dbh, $statement ); + if( 0 == $rows ) + { + $rows = "0E0"; # 0 but true + } + elsif( $rows < -1 ) + { + undef $rows; + } + } + else + { + $rows = $dbh->SUPER::do( $statement, $attr, @params ); + } + return $rows + } + ## use critic + # + # can also be called as $dbh->func($sql, ExecDirect); + # if, for some reason, there are compatibility issues + # later with DBI's do. + # + sub ExecDirect { + my ($dbh, $sql) = @_; + return _ExecDirect($dbh, $sql); + } + + # Call the ODBC function SQLGetInfo + # Args are: + # $dbh - the database handle + # $item: the requested item. For example, pass 6 for SQL_DRIVER_NAME + # See the ODBC documentation for more information about this call. + # + sub GetInfo { + my ($dbh, $item) = @_; + return get_info($dbh, $item); + } + + # Call the ODBC function SQLStatistics + # Args are: + # See the ODBC documentation for more information about this call. + # + sub GetStatistics { + my ($dbh, $catalog, $schema, $table, $unique) = @_; + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLStatistics" }); + _GetStatistics($dbh, $sth, $catalog, $schema, + $table, $unique) or return; + return $sth; + } + + # Call the ODBC function SQLForeignKeys + # Args are: + # See the ODBC documentation for more information about this call. + # + sub GetForeignKeys { + my ($dbh, $pk_catalog, $pk_schema, $pk_table, + $fk_catalog, $fk_schema, $fk_table) = @_; + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLForeignKeys" }); + _GetForeignKeys($dbh, $sth, $pk_catalog, $pk_schema, $pk_table, + $fk_catalog, $fk_schema, $fk_table) or return; + return $sth; + } + + # Call the ODBC function SQLPrimaryKeys + # Args are: + # See the ODBC documentation for more information about this call. + # + sub GetPrimaryKeys { + my ($dbh, $catalog, $schema, $table) = @_; + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLPrimaryKeys" }); + _GetPrimaryKeys($dbh, $sth, $catalog, $schema, $table) or return; + return $sth; + } + + # Call the ODBC function SQLSpecialColumns + # Args are: + # See the ODBC documentation for more information about this call. + # + sub GetSpecialColumns { + my ($dbh, $identifier, $catalog, $schema, $table, $scope, $nullable) = @_; + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLSpecialColumns" }); + _GetSpecialColumns($dbh, $sth, $identifier, $catalog, $schema, + $table, $scope, $nullable) or return; + return $sth; + } + + sub GetTypeInfo { + my ($dbh, $sqltype) = @_; + # create a "blank" statement handle + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLGetTypeInfo" }); + # print "SQL Type is $sqltype\n"; + _GetTypeInfo($dbh, $sth, $sqltype) or return; + return $sth; + } + + sub type_info_all { + my ($dbh, $sqltype) = @_; + $sqltype = DBI::SQL_ALL_TYPES unless defined $sqltype; + my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLGetTypeInfo" }); + _GetTypeInfo($dbh, $sth, $sqltype) or return; + my $info = $sth->fetchall_arrayref; + unshift @{$info}, { + map { ($sth->{NAME}->[$_] => $_) } 0..$sth->{NUM_OF_FIELDS}-1 + }; + return $info; + } +} + + +{ package DBD::ODBC::st; # ====== STATEMENT ====== + use strict; + use warnings; + + *parse_trace_flag = \&DBD::ODBC::db::parse_trace_flag; + + sub private_attribute_info { + return { + odbc_ignore_named_placeholders => undef, # sth and dbh + odbc_default_bind_type => undef, # sth and dbh + odbc_force_rebind => undef, # sth & dbh + odbc_async_exec => undef, # sth and dbh + odbc_query_timeout => undef, # sth and dbh + odbc_putdata_start => undef, # sth and dbh + odbc_column_display_size => undef # sth and dbh + }; + } + + sub ColAttributes { # maps to SQLColAttributes + my ($sth, $colno, $desctype) = @_; + my $tmp = _ColAttributes($sth, $colno, $desctype); + return $tmp; + } + + sub cancel { + my $sth = shift; + my $tmp = _Cancel($sth); + return $tmp; + } +} + +1; +__END__ + +=head1 NAME + +DBD::ODBC - ODBC Driver for DBI + +=head1 VERSION + +This documentation refers to DBD::ODBC version 1.22_2. + +=head1 SYNOPSIS + + use DBI; + + $dbh = DBI->connect('dbi:ODBC:DSN', 'user', 'password'); + +See L<DBI> for more information. + +=head1 DESCRIPTION + +=head2 Change log and FAQs + +Please note that the change log has been moved to +DBD::ODBC::Changes. To access this documentation, use +C<perldoc DBD::ODBC::Changes>. + +The FAQs have also moved to DBD::ODBC::FAQ.pm. To access the FAQs use +C<perldoc DBD::ODBC::FAQ>. + +=head2 Important note about the tests + +Please note that some tests may fail or report they are unsupported on +this platform. Notably Oracle's ODBC driver will fail the "advanced" +binding tests in t/08bind2.t. These tests run perfectly under SQL +Server 2000. This is normal and expected. Until Oracle fixes their +drivers to do the right thing from an ODBC perspective, it's going to +be tough to fix the issue. The workaround for Oracle is to bind date +types with SQL_TIMESTAMP. Also note that some tests may be skipped, +such as t/09multi.t, if your driver doesn't seem to support returning +multiple result sets. This is normal. + +=head2 Version Control + +DBD::ODBC source code is under version control at svn.perl.org. If +you would like to use the "bleeding" edge version, you can get the +latest from svn.perl.org via Subversion version control. Note there +is no guarantee that this version is any different than what you get +from the tarball from CPAN, but it might be :) + +You may read about Subversion at L<http://subversion.tigris.org> + +You can get a subversion client from there and check dbd-odbc out via: + + svn checkout http://svn.perl.org/modules/dbd-odbc/trunk <your directory name here> + +Which will pull all the files from the subversion trunk to your +specified directory. If you want to see what has changed since the +last release of DBD::ODBC read the Changes file or use "svn log" to +get a list of checked in changes. + +=head2 Contributing + +There are six main ways you may help with the development and +maintenance of this module: + +=over + +=item Submitting patches + +Please use Subversion (see above) to get the latest version of +DBD::ODBC from the trunk and submit any patches against that. + +Please, before submitting a patch: + + svn update + <try and included a test which demonstrates the fix/change working> + <test your patch> + svn diff > describe_my_diffs.patch + +and send the resulting file to me and cc the dbi-users@perl.org +mailing list (if you are not a member - why not!). + +=item Reporting installs + +Install CPAN::Reporter and report you installations. This is easy to +do - see L</CPAN Testers Reporting>. + +=item Report bugs + +If you find what you believe is a bug then enter it into the +L<http://rt.cpan.org/Dist/Display.html?Name=DBD-ODBC> system. Where +possible include code which reproduces the problem including any +schema required and the versions of software you are using. + +If you are unsure whether you have found a bug report it anyway or +post it to the dbi-users mailing list. + +=item pod comments and corrections + +If you find inaccuracies in the DBD::ODBC pod or have a comment which +you think should be added then go to L<http://annocpan.org> and submit +them there. I get an email for every comment added and will review +each one and apply any changes to the documentation. + +=item Review DBD::ODBC + +Add your review of DBD::ODBC on L<http://cpanratings.perl.org>. + +If you are a member on ohloh then add your review or register your +use of DBD::ODBC at L<http://www.ohloh.net/projects/perl_dbd_odbc>. + +=item submit test cases + +Most DBDs are built against a single client library for the database. + +Unlike other DBDs, DBD::ODBC works with many different ODBC drivers. +Although they all should be written with regard to the ODBC +specification drivers have bugs and in some places the specification is +open to interpretation. As a result, when changes are applied to +DBD::ODBC it is very easy to break something in one ODBC driver. + +What helps enormously to identify problems in the many combinations +of DBD::ODBC and ODBC drivers is a large test suite. I would greatly +appreciate any test cases and in particular any new test cases for +databases other than MS SQL Server. + +=back + +=head2 DBI attribute handling + +If a DBI defined attribute is not mentioned here it behaves as per the +DBI specification. + +=head3 ReadOnly (boolean) + +DBI documents the C<ReadOnly> attribute as being settleable and +retrievable on connection and statement handles. In ODBC setting +ReadOnly to true causes the connection attribute C<SQL_ATTR_ACCESS_MODE> +to be set to C<SQL_MODE_READ_ONLY> and setting it to false will set the +access mode to C<SQL_MODE_READ_WRITE> (which is the default in ODBC). + +B<Note:> There is no equivalent of setting ReadOnly on a statement +handle in ODBC. + +B<Note:> See ODBC documentation on C<SQL_ATTR_ACCESS_MODE> as setting it +to C<SQL_MODE_READ_ONLY> does B<not> prevent your script from running +updates or deletes; it is simply a hint to the driver/database that +you won't being doing updates. + +This attribute requires DBI version 1.55 or better. + +=head2 Private attributes common to connection and statement handles + +=head3 odbc_ignore_named_placeholders + +Use this if you have special needs (such as Oracle triggers, etc) +where :new or :name mean something special and are not just place +holder names. You I<must> then use ? for binding parameters. Example: + + $dbh->{odbc_ignore_named_placeholders} = 1; + $dbh->do("create trigger foo as if :new.x <> :old.x then ... etc"); + +Without this, DBD::ODBC will think :new and :old are placeholders for +binding and get confused. + +=head3 odbc_default_bind_type + +This value defaults to 0. + +Older versions of DBD::ODBC assumed that the parameter binding type +was 12 (C<SQL_VARCHAR>). Newer versions always attempt to call +C<SQLDescribeParam> to find the parameter types but if +C<SQLDescribeParam> is unavailable DBD::ODBC falls back to a default +bind type. The internal default bind type is C<SQL_VARCHAR> (for +non-unicode build) and C<SQL_WVARCHAR> (for a unicode build). If you +set C<odbc_default_bind_type> to a value other than 0 you override the +internal default. + +B<N.B> If you call the C<bind_param> method with a SQL type this +overrides everything else above. + +=head3 odbc_force_rebind + +This is to handle special cases, especially when using multiple result sets. +Set this before execute to "force" DBD::ODBC to re-obtain the result set's +number of columns and column types for each execute. Especially useful for +calling stored procedures which may return different result sets each +execute. The only performance penalty is during execute(), but I didn't +want to incur that penalty for all circumstances. It is probably fairly +rare that this occurs. This attribute will be automatically set when +multiple result sets are triggered. Most people shouldn't have to worry +about this. + +=head3 odbc_async_exec + +Allow asynchronous execution of queries. This causes a spin-loop +(with a small "sleep") until the ODBC API being called is complete +(i.e., while the ODBC API returns C<SQL_STILL_EXECUTING>). This is +useful, however, if you want the error handling and asynchronous +messages (see the L</odbc_err_handler> and F<t/20SQLServer.t> for an +example of this. + +=head3 odbc_query_timeout + +This allows you to change the ODBC query timeout (the ODBC statement +attribute C<SQL_ATTR_QUERY_TIMEOUT>). ODBC defines the query time out as +the number of seconds to wait for a SQL statement to execute before +returning to the application. A value of 0 (the default) means there +is no time out. Do not confuse this with the ODBC attributes +C<SQL_ATTR_LOGIN_TIMEOUT> and C<SQL_ATTR_CONNECTION_TIMEOUT>. Add + + { odbc_query_timeout => 30 } + +to your connect, set on the C<dbh> before creating a statement or +explicitly set it on your statement handle. The odbc_query_timeout on +a statement is inherited from the parent connection. + +Note that internally DBD::ODBC only sets the query timeout if you set it +explicitly and the default of 0 (no time out) is implemented by the +ODBC driver and not DBD::ODBC. + +Note that some ODBC drivers implement a maximum query timeout value +and will limit timeouts set above their maximum. You may see a +warning if your time out is capped by the driver but there is +currently no way to retrieve the capped value back from the driver. + +Note that some drivers may not support this attribute. + +See F<t/20SqlServer.t> for an example. + +=head3 odbc_putdata_start + +C<odbc_putdata_start> defines the size at which DBD::ODBC uses +C<SQLPutData> and C<SQLParamData> to send larger objects to the +database instead of simply binding them as normal with +C<SQLBindParameter>. It is mostly a placeholder for future changes +allowing chunks of data to be sent to the database and there is little +reason for anyone to change it currently. + +The default for odbc_putdata_start is 32768 because this value was +hard-coded in DBD::ODBC until 1.16_1. + +=head3 odbc_column_display_size + +If you ODBC driver does not support the SQL_COLUMN_DISPLAY_SIZE and +SQL_COLUMN_LENGTH attributes to SQLColAtrributes then DBD::ODBC does +not know how big the column might be. odbc_column_display_size sets +the default value for the column size when retrieving column data +where the size cannot be determined. + +The default for odbc_column_display_size is 2001 because this value was +hard-coded in DBD::ODBC until 1.17_3. + +=head2 Private connection attributes + +=head3 odbc_err_handler + +B<NOTE:> There should be no reason to use this now as there is a DBI +attribute of a similar name. In future versions this attribute will +be deleted. + +Allow errors to be handled by the application. A call-back function +supplied by the application to handle or ignore messages. + +The callback function receives three parameters: state (string), +error (string) and the native error code (number). + +If the error handler returns 0, the error is ignored, otherwise the +error is passed through the normal DBI error handling. + +This can also be used for procedures under MS SQL Server (Sybase too, +probably) to obtain messages from system procedures such as DBCC. +Check F<t/20SQLServer.t> and F<t/10handler.t>. + + $dbh->{RaiseError} = 1; + sub err_handler { + ($state, $msg, $native) = @_; + if ($state = '12345') + return 0; # ignore this error + else + return 1; # propagate error + } + $dbh->{odbc_err_handler} = \$err_handler; + # do something to cause an error + $dbh->{odbc_err_handler} = undef; # cancel the handler + +=head3 odbc_SQL_ROWSET_SIZE + +Here is the information from the original patch, however, this +could/has caused SQL Server to "lock up". Please use at your own +risk! + +C<SQL_ROWSET_SIZE> attribute patch from Andrew Brown + + > There are only 2 additional lines allowing for the setting of + > SQL_ROWSET_SIZE as db handle option. + > + > The purpose to my madness is simple. SqlServer (7 anyway) by default + > supports only one select statement at once (using standard ODBC cursors). + > According to the SqlServer documentation you can alter the default + > setting of three values to force the use of server cursors - in + > which case multiple selects are possible. + > + > The code change allows for: + > $dbh->{SQL_ROWSET_SIZE} = 2; # Any value > 1 + > + > For this very purpose. + > + > The setting of SQL_ROWSET_SIZE only affects the extended fetch + > command as far as I can work out and thus setting this option + > shouldn't affect DBD::ODBC operations directly in any way. + > + > Andrew + > + +In versions of SQL Server 2008 and later see "Multiple Active +Statements (MAS)" in the DBD::ODBC::FAQ instead of using this +attribute. + +=head3 odbc_exec_direct + +Force DBD::ODBC to use C<SQLExecDirect> instead of +C<SQLPrepare>/C<SQLExecute>. + +There are drivers that only support C<SQLExecDirect> and the DBD::ODBC +do() override does not allow returning result sets. Therefore, the +way to do this now is to set the attribute odbc_exec_direct. + +NOTE: You may also want to use this option if you are creating +temporary objects (e.g., tables) in MS SQL Server and for some +reason cannot use the C<do> method. see +L<http://technet.microsoft.com/en-US/library/ms131667.aspx> which says +I<Prepared statements cannot be used to create temporary objects on +SQL Server 2000 or later...>. Without odbc_exec_direct, the temporary +object will disappear before you can use it. + +There are currently two ways to get this: + + $dbh->prepare($sql, { odbc_exec_direct => 1}); + +and + + $dbh->{odbc_exec_direct} = 1; + +B<NOTE:> Even if you build DBD::ODBC with unicode support you can +still not pass unicode strings to the prepare method if you also set +odbc_exec_direct. This is a restriction in this attribute which is +unavoidable. + +=head3 SQL_DRIVER_ODBC_VER + +This, while available via get_info() is captured here. I may get rid +of this as I only used it for debugging purposes. + +=head3 odbc_cursortype + +This allows multiple concurrent statements on SQL*Server. In your +connect, add + + { odbc_cursortype => 2 }. + +If you are using DBI > 1.41, you should also be able to use + + { odbc_cursortype => DBI::SQL_CURSOR_DYNAMIC } + +instead. For example: + + my $dbh = DBI->connect("dbi:ODBC:$DSN", $user, $pass, + { RaiseError => 1, odbc_cursortype => 2}); + my $sth = $dbh->prepare("one statement"); + my $sth2 = $dbh->prepare("two statement"); + $sth->execute; + my @row; + while (@row = $sth->fetchrow_array) { + $sth2->execute($row[0]); + } + +See F<t/20SqlServer.t> for an example. + +=head3 odbc_has_unicode + +A read-only attribute signifying whether DBD::ODBC was built with the +C macro WITH_UNICODE or not. A value of 1 indicates DBD::ODBC was built +with WITH_UNICODE else the value returned is 0. + +Building WITH_UNICODE affects columns and parameters which are +SQL_C_WCHAR, SQL_WCHAR, SQL_WVARCHAR, and SQL_WLONGVARCHAR, SQL, +the connect method and a lot more. See L</Unicode>. + +When odbc_has_unicode is 1, DBD::ODBC will: + +=over + +=item bind columns the database declares as wide characters as SQL_Wxxx + +This means that UNICODE data stored in these columns will be returned +to Perl in UTF-8 and with the UTF8 flag set. + +=item bind parameters the database declares as wide characters as SQL_Wxxx + +Parameters bound where the database declares the parameter as being +a wide character (or where the parameter type is explicitly set to a +wide type - SQL_Wxxx) can be UTF8 in Perl and will be mapped to UTF16 +before passing to the driver. + +=item SQL + +SQL passed to the C<prepare> or C<do> methods which has the UTF8 flag set +will be converted to UTF16 before being passed to the ODBC APIs C<SQLPrepare> +or C<SQLExecDirect>. + +=item connection strings + +Connection strings passed to the C<connect> method will be converted +to UTF16 before being passed to the ODBC API C<SQLDriverConnectW>. This happens +irrespective of whether the UTF8 flag is set on the perl connect strings +because unixODBC requires an application to call SQLDriverConnectW to indicate +it will be calling the wide ODBC APIs. + +=back + +NOTE: You will need at least Perl 5.8.1 to use UNICODE with DBD::ODBC. + +NOTE: Binding of unicode output parameters is coded but untested. + +NOTE: When building DBD::ODBC on Windows ($^O eq 'MSWin32') the +WITH_UNICODE macro is automatically added. To disable specify -nou as +an argument to Makefile.PL (e.g. C<perl Makefile.PL -nou>). On non-Windows +platforms the WITH_UNICODE macro is B<not> enabled by default and to enable +you need to specify the -u argument to Makefile.PL. Please bare in mind +that some ODBC drivers do not support SQL_Wxxx columns or parameters. + +NOTE: Unicode support on Windows 64 bit platforms is currently +untested. Let me know how you get on with it. + +UNICODE support in ODBC Drivers differs considerably. Please read the +README.unicode file for further details. + +=head3 odbc_out_connect_string + +After calling the connect method this will be the ODBC driver's +out connection string - see documentation on SQLDriverConnect. + +=head3 odbc_version + +This was added prior to the move to ODBC 3.x to allow the caller to +"force" ODBC 3.0 compatibility. It's probably not as useful now, but +it allowed get_info and get_type_info to return correct/updated +information that ODBC 2.x didn't permit/provide. Since DBD::ODBC is +now 3.x, this can be used to force 2.x behavior via something like: my + + $dbh = DBI->connect("dbi:ODBC:$DSN", $user, $pass, + { odbc_version =>2}); + +=head2 Private statement attributes + +=head3 odbc_more_results + +Use this attribute to determine if there are more result sets +available. SQL Server supports this feature. Use this as follows: + + do { + my @row; + while (@row = $sth->fetchrow_array()) { + # do stuff here + } + } while ($sth->{odbc_more_results}); + +Note that with multiple result sets and output parameters (i.e,. using +bind_param_inout, don't expect output parameters to be bound until ALL +result sets have been retrieved. + +=head2 Private DBD::ODBC Functions + +You use DBD::ODBC private functions like this: + + $dbh->func(arg, private_function_name, @args); + +=head3 GetInfo + +B<This private function is now superceded by DBI's get_info method.> + +This function maps to the ODBC SQLGetInfo call and the argument +should be a valid ODBC information type (see ODBC specification). +e.g. + + $value = $dbh->func(6, 'GetInfo'); + +which returns the C<SQL_DRIVER_NAME>. + +This function returns a scalar value, which can be a numeric or string +value depending on the information value requested. + +=head3 SQLGetTypeInfo + +B<This private function is now superceded by DBI's type_info and +type_info_all methods.> + +This function maps to the ODBC SQLGetTypeInfo API and the argument +should be a SQL type number (e.g. SQL_VARCHAR) or +SQL_ALL_TYPES. SQLGetTypeInfo returns information about a data type +supported by the data source. + +e.g. + + use DBI qw(:sql_types); + + $sth = $dbh->func(SQL_ALL_TYPES, GetTypeInfo); + DBI::dump_results($sth); + +This function returns a DBI statement handle for the SQLGetTypeInfo +result-set containing many columns of type attributes (see ODBC +specification). + +NOTE: It is VERY important that the C<use DBI> includes the +C<qw(:sql_types)> so that values like SQL_VARCHAR are correctly +interpreted. This "imports" the sql type names into the program's +name space. A very common mistake is to forget the C<qw(:sql_types)> +and obtain strange results. + +=head3 GetFunctions + +This function maps to the ODBC SQLGetFunctions API which returns +information on whether a function is supported by the ODBC driver. + +The argument should be C<SQL_API_ALL_FUNCTIONS> (0) for all functions +or a valid ODBC function number (e.g. C<SQL_API_SQLDESCRIBEPARAM> +which is 58). See ODBC specification or examine your sqlext.h and +sql.h header files for all the SQL_API_XXX macros. + +If called with C<SQL_API_ALL_FUNCTIONS> (0), then a 100 element array is +returned where each element will contain a '1' if the ODBC function with +that SQL_API_XXX index is supported or '' if it is not. + +If called with a specific SQL_API_XXX value for a single function it will +return true if the ODBC driver supports that function, otherwise false. + +e.g. + + + my @x = $dbh->func(0,"GetFunctions"); + print "SQLDescribeParam is supported\n" if ($x[58]); + +or + + print "SQLDescribeParam is supported\n" + if $dbh->func(58, "GetFunctions"); + +=head3 GetStatistics + +B<This private function is now superceded by DBI's statistics_info +method.> + +See the ODBC specification for the SQLStatistics API. +You call SQLStatistics like this: + + $dbh->func($catalog, $schema, $table, $unique, 'GetStatistics'); + +Prior to DBD::ODBC 1.16 $unique was not defined as being true/false or +SQL_INDEX_UNIQUE/SQL_INDEX_ALL. In fact, whatever value you provided +for $unique was passed through to the ODBC API SQLStatistics call +unchanged. This changed in 1.16, where $unique became a true/false +value which is interpreted into SQL_INDEX_UNIQUE for true and +SQL_INDEX_ALL for false. + +=head3 GetForeignKeys + +B<This private function is now superceded by DBI's foreign_key_info +method.> + +See the ODBC specification for the SQLForeignKeys API. +You call SQLForeignKeys like this: + + $dbh->func($pcatalog, $pschema, $ptable, + $fcatalog, $fschema, $ftable, + "GetForeignKeys"); + +=head3 GetPrimaryKeys + +B<This private function is now superceded by DBI's primary_key_info +method.> + +See the ODBC specification for the SQLPrimaryKeys API. +You call SQLPrimaryKeys like this: + + $dbh->func($catalog, $schema, $table, "GetPrimaryKeys"); + +=head3 data_sources + +B<This private function is now superceded by DBI's data_sources +method.> + +You call data_sources like this: + + @dsns = $dbh->func("data_sources); + +Handled since 0.21. + +=head3 GetSpecialColumns + +See the ODBC specification for the SQLSpecialColumns API. +You call SQLSpecialColumns like this: + + $dbh->func($identifier, $catalog, $schema, $table, $scope, + $nullable, 'GetSpecialColumns'); + +Handled as of version 0.28 + +head3 ColAttributes + +B<This private function is now superceded by DBI's statement attributes +NAME, TYPE, PRECISION, SCALE, NULLABLE etc).> + +See the ODBC specification for the SQLColAttributes API. +You call SQLColAttributes like this: + + $dbh->func($column, $ftype, "ColAttributes"); + + SQL_COLUMN_COUNT = 0 + SQL_COLUMN_NAME = 1 + SQL_COLUMN_TYPE = 2 + SQL_COLUMN_LENGTH = 3 + SQL_COLUMN_PRECISION = 4 + SQL_COLUMN_SCALE = 5 + SQL_COLUMN_DISPLAY_SIZE = 6 + SQL_COLUMN_NULLABLE = 7 + SQL_COLUMN_UNSIGNED = 8 + SQL_COLUMN_MONEY = 9 + SQL_COLUMN_UPDATABLE = 10 + SQL_COLUMN_AUTO_INCREMENT = 11 + SQL_COLUMN_CASE_SENSITIVE = 12 + SQL_COLUMN_SEARCHABLE = 13 + SQL_COLUMN_TYPE_NAME = 14 + SQL_COLUMN_TABLE_NAME = 15 + SQL_COLUMN_OWNER_NAME = 16 + SQL_COLUMN_QUALIFIER_NAME = 17 + SQL_COLUMN_LABEL = 18 + +B<Note:>Oracle's ODBC driver for linux in instant client 11r1 often +returns strange values for column name e.g., '20291'. It is wiser to +use DBI's NAME and NAME_xx attributes for portability. + + +head3 DescribeCol + +B<This private function is now superceded by DBI's statement attributes +NAME, TYPE, PRECISION, SCLARE, NULLABLE etc).> + +See the ODBC specification for the SQLDescribeCol API. +You call SQLDescribeCol like this: + + @info = $dbh->func($column, "DescribeCol"); + +The returned array contains the column attributes in the order described +in the ODBC specification for SQLDescribeCol. + +=head2 Tracing + +DBD::ODBC now supports the parse_trace_flag and parse_trace_flags +methods introduced in DBI 1.42 (see DBI for a full description). As +of DBI 1.604, the only trace flag defined which is relevant to +DBD::ODBC is 'SQL' which DBD::ODBC supports by outputting the SQL +strings (after modification) passed to the prepare and do methods. + +Currently DBD::ODBC supports two private trace flags. The +'odbcunicode' flag traces some unicode operations and the +odbcconnection traces the connect process. + +To enable tracing of particular flags you use: + + $h->trace($h->parse_trace_flags('SQL|odbcconnection')); + $h->trace($h->parse_trace_flags('1|odbcunicode')); + +In the first case 'SQL' and 'odbcconnection' tracing is enabled on +$h. In the second case trace level 1 is set and 'odbcunicode' tracing +is enabled. + +If you want to enable a DBD::ODBC private trace flag before connecting +you need to do something like: + + use DBD::ODBC; + DBI->trace(DBD::ODBC->parse_trace_flag('odbcconnection')); + +or + + use DBD::ODBC; + DBI->trace(DBD::ODBC->parse_trace_flags('odbcconnection|odbcunicode')); + +DBD::ODBC outputs tracing at levels 3 and above (as levels 1 and 2 are +reserved for DBI). + +For comprehensive tracing of DBI method calls without all the DBI +internals see L<DBIx::Log4perl>. + +=head2 Deviations from the DBI specification + +=head3 Mixed placeholder types + +There are 3 conventions for place holders in DBI. These are '?', ':N' +and ':name' (where 'N' is a number and 'name' is an alpha numeric +string not beginning with a number). DBD::ODBC supports all these methods +for naming placeholders but you must only use one method throughout +a particular SQL string. If you mix placeholder methods you will get +an error like: + + Can't mix placeholder styles (1/2) + +=head3 Using the same placeholder more than once + +DBD::ODBC does not support (currently) the use of one named placeholder +more than once in the a single SQL string. i.e., + + insert into foo values (:bar, :p1, :p2, :bar); + +is not supported because 'bar' is used more than once but: + + insert into foo values(:bar, :p1, :p2) + +is ok. If you do the former you will get an error like: + + DBD::ODBC does not yet support binding a named parameter more than once + +=head3 Binding named placeholders + +Although the DBI documentation (as of 1.604) does not say how named +parameters are bound Tim Bunce has said that in Oracle they are bound +with the leading ':' as part of the name and that has always been the +case. i.e., + + prepare("insert into mytable values (:fred)"); + bind_param(":foo", 1); + +DBD::ODBC does not support binding named parameters with the ':' introducer. +In the above example you must use: + + bind_param("foo", 1); + +In discussion on the dbi-dev list is was suggested that the ':' could +be made optional and there were no basic objections but it has not +made it's way into the pod yet. + +=head3 Sticky Parameter Types + +The DBI specification post 1.608 says in bind_param: + + The data type is 'sticky' in that bind values passed to execute() + are bound with the data type specified by earlier bind_param() + calls, if any. Portable applications should not rely on being able + to change the data type after the first C<bind_param> call. + +DBD::ODBC does allow a parameter to be rebound with another data type as +ODBC inherently allows this. Therefore you can do: + + # parameter 1 set as a SQL_LONGVARCHAR + $sth->bind_param(1, $data, DBI::SQL_LONGVARCHAR); + # without the bind above the $data parameter would be either a DBD::ODBC + # internal default or whatever the ODBC driver said it was but because + # parameter types are sticky, the type is still SQL_LONGVARCHAR. + $sth->execute($data); + # change the bound type to SQL_VARCHAR + # some DBDs will ignore the type in the following, DBD::ODBC does not + $sth->bind_param(1, $data, DBI::SQL_VARCHAR); + + +=head2 Unicode + +The ODBC specification supports wide character versions (a postfix of +'W') of some of the normal ODBC APIs e.g., SQLDriverConnectW is a wide +character version of SQLDriverConnect. + +In ODBC on Windows the wide characters are defined as SQLWCHARs (2 +bytes) and are UCS-2. On non-Windows, the main driver managers I know +of have implemented the wide character APIs differently: + +=over + +=item unixODBC + +unixODBC mimics the Windows ODBC API precisely meaning the wide +character versions expect and return 2-byte characters in +UCS-2. + +unixODBC will happily recognise ODBC drivers which only have the ANSI +versions of the ODBC API and those that have the wide versions +too. + +unixODBC will allow an ANSI application to work with a unicode +ODBC driver and vice versa (although in the latter case you obviously +cannot actually use unicode). + +unixODBC does not prevent you sending UTF-8 in the ANSI versions of +the ODBC APIs but whether that is understood by your ODBC driver is +another matter. + +unixODBC differs in only one way from the Microsoft ODBC driver in +terms of unicode support in that it avoids unnecessary translations +between single byte and double byte characters when an ANSI +application is using a unicode-aware ODBC driver by requiring unicode +applications to signal their intent by calling SQLDriverConnectW +first. On Windows, the ODBC driver manager always uses the wide +versions of the ODBC API in ODBC drivers which provide the wide +versions regardless of what the application really needs and this +results in a lot of unnecessary character translations when you have +an ANSI application and a unicode ODBC driver. + +=item iODBC + +The wide character versions expect and return wchar_t types. + +=back + +DBD::ODBC has gone with unixODBC so you cannot use iODBC with a +unicode build of DBD::ODBC. However, some ODBC drivers support UTF-8 +(although how they do this with SQLGetData reliably I don't know) +and so you should be able to use those with DBD::ODBC not built for +unicode. + +=head3 Enabling and Disabling Unicode support + +On Windows Unicode support as ss enabled by default and to disable it +you will need to specify C<-nou> to F<Makefile.PL> to get back to the +original behavior of DBD::ODBC before any Unicode support was added. + +e.g., + + perl Makfile.PL -nou + +On non-Windows platforms Unicode support is disabled by default. To +enable it specify C<-u> to F<Makefile.PL> when you configure DBD::ODBC. + +e.g., + + perl Makefile.PL -u + +=head3 Unicode - What is supported? + +As of version 1.17 DBD::ODBC has the following unicode support: + +=over + +=item SQL (introduced in 1.16_2) + +Unicode strings in calls to the C<prepare> and C<do> methods are +supported so long as the C<odbc_execdirect> attribute is not used. + +=item unicode connection strings (introduced in 1.16_2) + +Unicode connection strings are supported but you will need a DBI +post 1.607 for that. + +=item column names + +Unicode column names are returned. + +=item bound columns (introduced in 1.15) + +If the DBMS reports the column as being a wide character (SQL_Wxxx) it +will be bound as a wide character and any returned data will be +converted from UTF16 to UTF8 and the UTF8 flag will then be set on the +data. + +=item bound parameters + +If the perl scalars you bind to parameters are marked UTF8 and the +DBMS reports the type as being a wide type or you bind the parameter +as a wide type they will be converted to wide characters and bound as +such. + +=back + +Since version 1.16_4, the default parameter bind type is SQL_WVARCHAR +for unicode builds of DBD::ODBC. This only affects ODBC drivers which +do not support SQLDescribeParam and only then if you do not +specifically set a sql type on the bind_param method call. + +The above Unicode support has been tested with the SQL Server, Oracle +9.2+ and Postgres drivers on Windows and various Easysoft ODBC drivers +on UNIX. + +=head3 Unicode - What is not supported? + +You cannot use unicode parameter names e.g., + + select * from table where column = :unicode_param_name + +You cannot use unicode strings in calls to prepare if you set the +odbc_execdirect attribute. + +You cannot use the iODBC driver manager with DBD::ODBC built for +unicode. + +=head3 Unicode - Caveats + +For Unicode support on any platform in Perl you will need at least +Perl 5.8.1 - sorry but this is the way it is with Perl. + +The Unicode support in DBD::ODBC expects a WCHAR to be 2 bytes (as it +is on Windows and as the ODBC specification suggests it is). Until +ODBC specifies any other Unicode support it is not envisioned this +will change. On UNIX there are a few different ODBC driver +managers. I have only tested the unixODBC driver manager +(http://www.unixodbc.org) with Unicode support and it was built with +defaults which set WCHAR as 2 bytes. + +I believe that the iODBC driver manager expects wide characters to be +wchar_t types (which are usually 4) and hence DBD::ODBC will not work +iODBC when built for unicode. + +The ODBC Driver must expect Unicode data specified in SQLBindParameter +and SQLBindCol to be UTF16 in local endianess. Similarly, in calls to +SQLPrepareW, SQLDescribeColW and SQLDriverConnectW. + +You should be aware that once Unicode support is enabled it affects a +number of DBI methods (some of which you might not expect). For +instance, when listing tables, columns etc some drivers +(e.g. Microsoft SQL Server) will report the column types as wide types +even if the strings actually fit in 7-bit ASCII. As a result, there is +an overhead for retrieving this column data as 2 bytes per character +will be transmitted (compared with 1 when Unicode support is not +enabled) and these strings will be converted into UTF8 but will end up +fitting (in most cases) into 7bit ASCII so a lot of conversion work +has been performed for nothing. If you don't have Unicode table and +column names or Unicode column data in your tables you are best +disabling Unicode support. + +I am at present unsure if ChopBlanks processing on Unicode strings is +working correctly on UNIX. If nothing else the construct L' ' in +dbdimp.c might not work with all UNIX compilers. Reports of issues and +patches welcome. + +=head3 Unicode implementation in DBD::ODBC + +DBD::ODBC uses the wide character versions of the ODBC API and the +SQL_WCHAR ODBC type to support unicode in Perl. + +Wide characters returned from the ODBC driver will be converted to +UTF-8 and the perl scalars will have the utf8 flag set (by using +sv_utf8_decode). + +perl scalars which are UTF-8 and are sent through the ODBC API will be +converted to UTF-16 and passed to the ODBC wide APIs or signalled as +SQL_WCHARs (e.g., in the case of bound columns). + +When built for unicode, DBD::ODBC will always call SQLDriverConnectW +(and not SQLDriverConnect) even if a) your connection string is not +unicode b) you have not got a DBI later than 1.607, because unixODBC +requires SQLDriverConnectW to be called if you want to call other +unicode ODBC APIs later. As a result, if you build for unicode and +pass ASCII strings to the connect method they will be converted to +UTF-16 and passed to SQLDriverConnectW. This should make no real +difference to perl not using unicode connection strings. + +You will need a DBI later than 1.607 to support unicode connection +strings because until post 1.607 there was no way for DBI to pass +unicode strings to the DBD. + +=head3 Unicode and Oracle + +You have to set the environment variables C<NLS_NCHAR=AL32UTF8> and +C<NLS_LANG=AMERICAN_AMERICA.AL32UTF8> (or any other language setting +ending with C<.AL32UTF8>) before loading DBD::ODBC to make Oracle +return Unicode data. (See also "Oracle and Unicode" in the POD of +DBD::Oracle.) + +On Windows, using the Oracle ODBC Driver you have to enable the B<Force +SQL_WCHAR support> Workaround in the data source configuration to make +Oracle return Unicode to a non-Unicode application. Alternatively, you +can include C<FWC=T> in your connect string. + +Unless you need to use ODBC, if you want Unicode support with Oracle +you are better off using L<DBD::Oracle>. + +=head3 Unicode and PostgreSQL + +Some tests from the original DBD::ODBC 1.13 fail with PostgreSQL +8.0.3, so you may not want to use DBD::ODBC to connect to PostgreSQL +8.0.3. + +Unicode tests fail because PostgreSQL seems not to give any hints +about Unicode, so all data is treated as non-Unicode. + +Unless you need to use ODBC, if you want Unicode support with Postgres +you are better off with L<DBD::Pg> as it has a specific attribute named +C<pg_enable_utf8> to enable Unicode support. + +=head3 Unicode and Easysoft ODBC Drivers + +We have tested the Easysoft SQL Server, Oracle and ODBC Bridge drivers +with DBD::ODBC built for Unicode. All work as described without +modification except for the Oracle driver you will need to set you +NLS_LANG as mentioned above. + +=head3 Unicode and other ODBC drivers + +If you have a unicode-enabled ODBC driver and it works with DBD::ODBC +let me know and I will include it here. + +=head2 ODBC Support in ODBC Drivers + +=head3 Drivers without SQLDescribeParam + +Some drivers do not support the C<SQLDescribeParam> ODBC API (e.g., +Microsoft Access). + +DBD::ODBC uses the C<SQLDescribeParam> API when parameters are bound +to your SQL to find the types of the parameters. If the ODBC driver +does not support C<SQLDescribeParam>, DBD::ODBC assumes the parameters +are C<SQL_VARCHAR> or C<SQL_WVARCHAR> types (depending on whether +DBD::ODBC is built for unicode or not). In any case, if you bind a +parameter and specify a SQL type this overrides any type DBD::ODBC +would choose. + +For ODBC drivers which do not support C<SQLDescribeParam> the default +behavior in DBD::ODBC may not be what you want. To change the default +parameter bind type set L</odbc_default_bind_type>. If, after that you +have some SQL where you need to vary the parameter types used add the +SQL type to the end of the C<bind_param> method. + + use DBI qw(:sql_types); + $h = DBI->connect; + # set the default bound parameter type + $h->{odbc_default_bind_type} = SQL_VARCHAR; + # bind a parameter with a specific type + $s = $h->prepare(q/insert into mytable values(?)/); + $s->bind_param(1, "\x{263a}", SQL_WVARCHAR); + +=head2 CPAN Testers Reporting + +Please, please, please (is that enough), consider installing +CPAN::Reporter so that when you install perl modules a report of the +installation success or failure can be sent to cpan testers. In this +way module authors 1) get feedback on the fact that a module is being +installed 2) get to know if there are any installation problems. Also +other people like you may look at the test reports to see how +successful they are before choosing the version of a module to +install. + +CPAN::Reporter is easy to install and configure like this: + + perl -MCPAN -e shell + cpan> install CPAN::Reporter + cpan> reload cpan + cpan> o conf init test_report + +Simply answer the questions to configure CPAN::Reporter. + +You can find the CPAN testers wiki at L<http://wiki.cpantesters.org/> +and the installation guide for CPAN::Reporter at +L<http://wiki.cpantesters.org/wiki/CPANInstall>. + +=head2 Others/todo? + +Level 2 + + SQLColumnPrivileges + SQLProcedureColumns + SQLProcedures + SQLTablePrivileges + SQLDrivers + SQLNativeSql + +=head2 Random Links + +These are in need of sorting and annotating. Some are relevant only +to ODBC developers. + +You can find DBD::ODBC on ohloh now at: + +L<http://www.ohloh.net/projects/perl_dbd_odbc> + +If you use ohloh and DBD::ODBC please say you use it and rate it. + +There is a good search engine for the various Perl DBI lists at the +following URLS: + +L<http://perl.markmail.org/search/list:org.perl.dbi-users> + +L<http://perl.markmail.org/search/list:org.perl.dbi-dev> + +L<http://perl.markmail.org/search/list:org.perl.dbi-announce> + +L<http://www.syware.com> + +L<http://www.microsoft.com/odbc> + +For Linux/Unix folks, compatible ODBC driver managers can be found at: + +L<http://www.unixodbc.org> (unixODBC source and rpms) + +L<http://www.iodbc.org> (iODBC driver manager source) + +For Linux/Unix folks, you can checkout the following for ODBC Drivers and +Bridges: + +L<http://www.easysoft.com> + +L<http://www.openlinksw.com> + +L<http://www.datadirect.com> + +L<http://www.atinet.com> + +Some useful tutorials: + +Debugging Perl DBI: + +L<http://www.easysoft.com/developer/languages/perl/dbi-debugging.html> + +Enabling ODBC support in Perl with Perl DBI and DBD::ODBC: + + +L<http://www.easysoft.com/developer/languages/perl/dbi_dbd_odbc.html> + +Perl DBI/DBD::ODBC Tutorial Part 1 - Drivers, Data Sources and Connection: + +L<http://www.easysoft.com/developer/languages/perl/dbd_odbc_tutorial_part_1.html> + +Perl DBI/DBD::ODBC Tutorial Part 2 - Introduction to retrieving data from your database: + +L<http://www.easysoft.com/developer/languages/perl/dbd_odbc_tutorial_part_2.html> + +Perl DBI/DBD::ODBC Tutorial Part 3 - Connecting Perl on UNIX or Linux to Microsoft SQL Server: + +L<http://www.easysoft.com/developer/languages/perl/sql_server_unix_tutorial.html> + +Perl DBI - Put Your Data On The Web: + +L<http://www.easysoft.com/developer/languages/perl/tutorial_data_web.html> + +=head2 Frequently Asked Questions + +Frequently asked questions are now in L<DBD::ODBC::FAQ>. Run +C<perldoc DBD::ODBC::FAQ> to view them. + +=head1 CONFIGURATION AND ENVIRONMENT + +You should consult the documentation for the ODBC Driver Manager +you are using. + +=head1 DEPENDENCIES + +L<DBI> + +L<Test::Simple> + +=head1 INCOMPATIBILITIES + +=head1 BUGS AND LIMITATIONS + +None known other than the deviations from the DBI specification mentioned +above in L</Deviations from the DBI specification>. + +Please report any to me via the CPAN RT system. See +L<http://rt.cpan.org/> for more details. + +=head1 AUTHOR + +Tim Bunce + +Jeff Urlwin + +Thomas K. Wenrich + +Martin J. Evans + +=head1 LICENSE AND COPYRIGHT + +This program is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. See L<perlartistic>. This +program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +Portions of this software are Copyright Tim Bunce, Thomas K. Wenrich, +Jeff Urlwin and Martin J. Evans - see the source. + +=head1 SEE ALSO + +L<DBI> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/ODBC/Changes.pm b/Master/tlpkg/tlperl/lib/DBD/ODBC/Changes.pm new file mode 100755 index 00000000000..1388aab9b31 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/ODBC/Changes.pm @@ -0,0 +1,1682 @@ +=head1 NAME + +DBD::ODBC::Changes - Log of significant changes to the DBD::ODBC + +(As of $LastChangedDate: 2009-08-19 09:51:11 +0100 (Wed, 19 Aug 2009) $ $Revision: 10667 $) + +=cut + +=head1 Todo + + Add array parameter binding (per new DBI Spec) + Add row caching/multiple row fetches to speed selects + Better/more tests on multiple statement handles which ensure the + correct number of rows + Better/more tests on all queries which ensure the correct number of + rows and data + Better tests on SQLExecDirect/do + Keep checking Oracle's ODBC drivers for Windows to fix the Date binding problem + Change SQLSetConnectOption to SQLSetConnectAttr as we are ODBC 3 now + Change SQLColAttributes calls (now deprecated) to SQLColAttribute + Change SQLTransact calls to SQLEndTran calls. + Add support for $sth->more_results based on DBD::ODBC-specific attribute + Use odbcinst (if it exists to point people at the ini files they need to use) + Use odbc_config (if it exists to find out about unixODBC). + There is a Columns private ODBC method which is not documented. + Add support for sending lobs in chunks instead of all in one go. Although + DBD::ODBC uses SQLParamData and SQLPutData internally they are not exposed + so anyone binding a lob has to have all of it available before it can + be bound. + Try to produce a Module::Install build. + There is a blob_read method in DBI (undocumented) that DBD:DB2 & DBD::Pg + use and might be useful in DBD::ODBC. + The SQL_DRIVER_ODBC_VER attribute should be odbc_SQL_DRIVER_ODBC_VER. + Why does level 15 tracing of any DBD::ODBC script show alot of these: + !!DBD::ODBC unsupported attribute passed (PrintError) + !!DBD::ODBC unsupported attribute passed (Username) + !!DBD::ODBC unsupported attribute passed (dbi_connect_closure) + !!DBD::ODBC unsupported attribute passed (LongReadLen) + strawberry perl build issue: + http://groups.google.com/group/perl.dbd.pg.changes/browse_thread/thread/8205bf83b5a48f63/cf43d87fe644798f?hl=en&q=easysoft&pli=1 + http://www.nntp.perl.org/group/perl.dbi.users/2008/10/msg33322.html + Add a perlcritic test - see DBD::Pg + Why doesn't http://www.presicient.com/dbidocs/ display DBD::ODBC docs + properly. + ParamTypes attribute not visible until after execute + +=head1 CHANGES + +=head2 Changes in DBD::ODBC 1.23 September 11, 2009 + +Only a readme change and version bumped to 1.23. This is a full +release of all the 1.22_x development releases. + +=head2 Changes in DBD::ODBC 1.22_3 August 19, 2009 + +Fix skip count in rt_38977.t and typo in ok call. + +Workaround a bug in unixODBC 2.2.11 which can write off the end of the +string buffer passed to SQLColAttributes. + +Fix skip count in rt_null_nvarchar.t test for non SQL Server drivers. + +Fix test in 02simple.t which reported a fail if you have no ODBC +datasources. + +In 99_yaml.t pick up the yaml spec version from the meta file instead +of specifying it. + +Change calls to SQLPrepare which passed in the string lenth of the SQL +to use SQL_NTS because a) they are null terminated and more +importantly b) unixODBC contains a bug in versions up to 2.2.16 which +can overwrite the stack by 1 byte if the string length is specified +and not built with iconv support and converting the SQL from ASCII to +Unicode. + +Fixed bug in ping method reported by Lee Anne Lester where it dies if +used after the connection is closed. + +A great deal of changes to Makefile.PL to improve the automatic +detection and configuration for ODBC driver managers - especially on +64bit platforms. See rt47650 from Marten Lehmann which started it all +off. + +Add changes from Chris Clark for detecting IngresCLI. + +Fix for rt 48304. If you are using a Microsoft SQL Server database and +nvarchar(max) you could not insert values between 4001 and 8000 +(inclusive) in size. A test was added to the existing rt_38977.t test. +Thanks to Michael Thomas for spotting this. + +Added FAQ on UTF-8 encoding and IBM iSeries ODBC driver. + +Add support for not passing usernames and passwords in call to +connect. Previously DBD::ODBC would set an unspecified +username/password to '' in ODBC.pm before calling one of the login_xxx +functions. This allows the driver to pull the username/password from +elsewhere e.g., like the odbc.ini file. + +=head2 Changes in DBD::ODBC 1.22_1 June 16, 2009 + +Applied a slightly modified version of patch from Jens Rehsack to +improve support for finding the iODBC driver manager. + +A UNICODE enabled DBD::ODBC (the default on Windows) did not handle +UNICODE usernames and passwords in the connect call properly. + +Updated "Attribution" in ODBC.pm. + +Unicode support is no longer experimental hence warning and prompt +removed from the Makefile.PL. + +old_ping method removed. + +Fixed bug in 02simple.t test which is supposed to check you have at +least one data source defined. Unfortunately, it was checking you had +more than 1 data source defined. + +rt_null_varchar had wrong skip count meaning non-sql-server drivers or +sql server drivers too old skipped 2 tests more than were planned. + +=head2 Changes in DBD::ODBC 1.22 June 10, 2009 + +Fixed bug which led to "Use of uninitialized value in subroutine +entry" warnings when writing a NULL into a NVARCHAR with a +unicode-enabled DBD::ODBC. Thanks to Jirka Novak and Pavel Richter who +found, reported and patched a fix. + +Fixed serious bug in unicode_helper.c for utf16_len which I'm ashamed to say +was using an unsigned short to return the length. This meant you could +never have UTF16 strings of more than ~64K without risking serious +problems. The DBD::ODBC test code actually got a + +*** glibc detected *** /usr/bin/perl: double free or corruption +(out): 0x406dd008 *** + +If you use a UNICODE enabled DBD::ODBC (the default on Windows) and +unicode strings larger than 64K you should definitely upgrade now. + +=head2 Changes in DBD::ODBC 1.21_1 June 2, 2009 + +Fixed bug referred to in rt 46597 reported by taioba and identified by +Tim Bunce. In Calls to bind_param for a given statement handle if you +specify a SQL type to bind as, this should be "sticky" for that +parameter. That means if you do: + +$sth->bind_param(1, $param, DBI::SQL_LONGVARCHAR) + +and follow it up with execute calls that also specify the parameter: + +$sth->execute("a param"); + +then the parameter should stick with the SQL_LONGVARCHAR type and not +revert to the default parameter type. The DBI docs (from 1.609) +make it clear the parameter type is sticky for the duration of the +statement but some DBDs allow the parameter to be rebound with a +different type - DBD::ODBC is one of those drivers. + +=head2 Changes in DBD::ODBC 1.21 April 27, 2009 + +Change 02simple test to output Perl, DBI and DBD::ODBC versions. + +Fixed bug where if ODBC driver supports SQLDescribeParam and it +succeeds for a parameterised query but you override the parameter +type, DBD::ODBC was still using the size returned by +SQLDescribeParam. Thanks to Brian Becker for finding, diagnosing and +fixing this issue. + +Added FAQ entry about SQL Server and calling procedures with named +parameters out of order. + +Added test_results.txt containing some supplied make test results. + +=head2 Changes in DBD::ODBC 1.20 April 20, 2009 + +Fix bug in handling of SQL_WLONGVARCHAR when not built with unicode +support. The column was not identified as a long column and hence the +size of the column was not restricted to LongReadLen. Can cause +DBD::ODBC to attempt to allocate a huge amount of memory. + +Minor changes to Makefile.PL to help diagnose how it decided which +driver manager to use and where it was found. + +Offer suggestion to debian-based systems when some of unixODBC is +found (the bin part) but the development part is missing. + +In 20SqlServer.t attempt to drop any procedures we created if they +still exist at the end of the test. Reported by Michael Higgins. + +In 12blob.t separate code to delete test table into sub and call at +being and end, handle failures from prepare there were two ENDs. + +In ODBCTEST.pm when no acceptable test column type is found output all +the found types and BAIL_OUT the entire test. + +Skip rt_39841.t unless actually using the SQL Server ODBC driver or +native client. + +Handle drivers which return 0 for SQL_MAX_COLUMN_NAME_LEN. + +Double the buffer size used for column names if built with unicode. + +=head2 Changes in DBD::ODBC 1.19 April 2, 2009 + +Some minor diagnostic output during tests when running against freeTDS +to show we know of issues in freeTDS. + +Fixed issue in 20SqlServer.t where the connection string got set with +two consecutive semi-colons. Most drivers don't mind this but freeTDS +ignores everything after that point in the connection string. + +Quieten some delete table output during tests. + +Handle connect failures in 20SqlServer.t in the multiple active +statement tests. + +In 02simple.t cope with ODBC drivers or databases that do not need a +username or password (MS Access). + +In 20SqlServer.t fix skip count and an erroneous assignment for +driver_name. + +Change some if tests to Test::More->is tests in 02simple.t. + +Fix "invalid precision" error during tests with the new ACEODBC.DLL MS +Access driver. Same workaround applied for the old MS Access driver +(ODBCJT32.DLL) some time ago. + +Fix out of memory error during tests against the new MS Access driver +(ACEODBC.DLL). The problem appears to be that the new Access driver +reports ridiculously large parameter sizes for "select ?" queries and +there are some of these in the unicode round trip test. + +Fixed minor typo in Makefile.PL - diagnostic message mentioned "ODBC +HOME" instead of ODBCHOME. + +12blob.t test somehow got lost from MANIFEST - replaced. Also changed +algorithm to get a long char type column as some MS Access drivers +only show SQL_WLONGVARCHAR type in unicode. + +Added diagnostic output to 02simple.t to show the state of +odbc_has_unicode. + +=head2 Changes in DBD::ODBC 1.18_4 March 13, 2009 + +A mistake in the MANIFEST lead to the rt_43384.t test being omitted. + +Brian Becker reported the tables PERL_DBD_39897 and PERL_DBD_TEST are +left behind after testing. I've fixed the former but not the latter +yet. + +Yet another variation on the changes for rt 43384. If the parameter is +bound specifically as SQL_VARCHAR, you got invalid precision +error. Thanks to Øystein Torget for finding this and helping me verify +the fix. + +If you attempt to insert large amounts of data into MS Access (which +does not have SQLDescribeParam) you can get an invalid precision error +which can be worked around by setting the bind type to +SQL_LONGVARCHAR. This version does that for you. + +08bind2.t had a wrong skip count. + +12blob.t had strict commented out and GetTypeInfo was not quoted. Also +introduced a skip if the execute fails as it just leads to more +obvious failures. + +In dbdimp.c/rebind_ph there was a specific workaround for SQL Server +which was not done after testing if we are using SQL Server - this +was breaking tests to MS Access. + +=head2 Changes in DBD::ODBC 1.18_2 March 9, 2009 + +Added yet another workaround for the SQL Server Native Client driver +version 2007.100.1600.22 and 2005.90.1399.00 (driver version +09.00.1399) which leads to HY104, "Invalid precision value" in the +rt_39841.t test. + +=head2 Changes in DBD::ODBC 1.18_1 March 6, 2009 + +Fixed bug reported by Toni Salomäki leading to a describe failed error +when calling procedures with no results. Test cases added to +20SqlServer.t. + +Fixed bug rt 43384 reported by Øystein Torget where you cannot insert +more than 127 characters into a Microsoft Access text(255) column when +DBD::ODBC is built in unicode mode. + +=head2 Changes in DBD::ODBC 1.18 January 16, 2009 + +Major release of all the 1.17 development releases below. + +=head2 Changes in DBD::ODBC 1.17_3 December 19, 2008 + +Reinstated the answer in the FAQ for "Why do I get invalid value for +cast specification" which had got lost - thanks to EvanCarroll in +rt41663. + +rt 41713. Applied patch from JHF Remmelzwaal to handle ODBC drivers +which do not support SQL_COLUMN_DISPLAY_SIZE and SQL_COLUMN_LENGTH +attributes in the SQLColAttributes calls after SQLTables and +SQLColumns. Specifically, the driver he was using was the "Infor +Integration ODBC driver". + +Added notes from JHF Remmelzwaal on resolving some problems he came +across building DBD::ODBC on Windows with Visual Studio 6.0 and SDK +Feb 2003. + +New odbc_column_display_size attribute for when drivers does not +return a display size. + +Loads of tracing changes to make it easier for me to debug problems. + +Fixed bug in tracing of dbd_execute when parameter is char but undef +which was leading to an access violation on Windows when tracing +enabled. + +Minor changes to diagnostic output in some rt tests. + +One of the rt tests was not skipping the correct number of tests if the +driver was not SQL Server. + +=head2 Changes in DBD::ODBC 1.17_2 November 17, 2008 + +Changed ParamTypes attribute to be as specification i.e., + + { + parameter_1 => {TYPE => sql_type} + parameter_2 => {TYPE => sql_type} + ... + } + +and changed the tests in 07bind.t to reflect this. + +A few minor perlcritic changes to ODBC.pm. + +Added 99_yaml.t test to check META.yml. + +Added patch from Spicy Jack to workaround problems with Strawberry +Perl setting INC on the command line when running Makefile.PL. + +=head2 Changes in DBD::ODBC 1.17_1 October 10, 2008 + +Missing newline from end of META.yml upsets cpan + +Add code to Makefile.PL to spot command line containing INC, outline +problem and resolution and not generate Makefile to avoid cpan-testers +failures. + +Loads of pod formatting changes including a section in the wrong place + +New kwalitee test + +Fix rt 39841. Bug in SQL Server ODBC driver which describes parameters +by rearranging your SQL to do a select on the columns matching the +parameters. Sometimes it gets this wrong and ends up describing the +wrong column. This can lead to a varchar(10) being described with a +column-size less than 10 and hence you get data truncation on execute. + +Added a test case for rt 39841. + +Fix rt 39897. 1.17 added support for varchar(max) in SQL Server +but it broke support for SQL_VARCHAR columns in that they had LongReadLen +and LongTruncOk applied to them. This means that in 1.16 you could retrieve +a SQL_VARCHAR column without worrying about how long it was but in 1.17 +if the same column was greater than 80 characters then you would +get a truncated error. The only way the around this was to set +LongTruncOk or LongReadLen. + +Added a test case for rt 39897. + +=head2 Changes in DBD::ODBC 1.17 September 22, 2008 + +In the absence of any bug reports since 1.16_4 this is the official +1.17 release. See below for changes since 1.16. + +Minor pod changes. + +Added support for ParamTypes (see DBI spec) and notes in DBD::ODBC +pod. + +=head2 Changes in DBD::ODBC 1.16_4 September 12, 2008 + +Small change to Makefile.PL to work around problem in darwin 8 with +iODBC which leads to "Symbol not found: _SQLGetPrivateProfileString" +errors. + +Added new [n]varXXX(max) column type tests to 20SqlServer.t. + +Fixed support for SQL_WCHAR and SQL_WVARCHAR support in non-unicode +build. These types had ended up only being included for unicode +builds. + +More changes to ODBC pod to 1) encourage people to use CPAN::Reporter, +2) rework contributing section, 3) mention DBIx::Log4perl 4) add a +BUGS section 5) add a "ODBC Support in ODBC Drivers" section etc. + +Changed default fallback parameter bind type to SQL_WVARCHAR for +unicode builds. This affects ODBC drivers which don't have +SQLDescribeParam. Problem reported by Vasili Galka with MS Access when +reading unicode data from one table and inserting it into another +table. The read data was unicode but it was inserted as SQL_CHARs +because SQLDescribeParam does not exist in MS Access so we fallback to +either a default bind type (which was SQL_VARCHAR) or whatever was +specified in the bind_param call. + +Fixed bug in 20SqlServer.t when DBI_DSN is defined including "DSN=". + +=head2 Changes in DBD::ODBC 1.16_3 September 3, 2008 + +Changed Makefile.PL to add "-framework CoreFoundation" to linker line on +OSX/darwin. + +Disallow building with iODBC if it is a unicode build. + +More tracing for odbcconnect flag. + +Fix bug in out connection string handling that attempted to use an out +connection string when SQLDriverConnect[W] fails. + +Fixed yet more test count problems due to Test::NoWarnings not being +installed. + +Skip private_attribute_info tests if DBI < 1.54 + +About a 30% rewrite of bound parameter code which started with an +attempt to support the new VARBINARY(MAX) and VARCHAR(MAX) columns in +SQL Server when the parameter length is > 400K in size (see elsewhere +in this Changelog). This is a seriously big change to DBD::ODBC to +attempt to be a bit more clever in its handling of drivers which +either do not support SQLDescribeParam or do support SQLDescribeParam +but cannot describe all parameters e.g., MS SQL Server ODBC driver +cannot describe "select ?, LEN(?)". If you specify the bound parameter +type in your calls to bind_param and run them to an ODBC driver which +supports SQLDescribeParam you may want to check carefully and probably +remove the parameter type from the bind_param method call. + +Added rt_38977.t test to test suite to test varchar(max) and +varbinary(max) columns in SQL Server. + +Moved most of README.unicode to ODBC.pm pod. + +Added workaround for problem with the Microsoft SQL Server driver when +attempting to insert more than 400K into a varbinary(max) or +varchar(max) column. Thanks to Julian Lishev for finding the problem +and identifying 2 possible solutions. + +=head2 Changes in DBD::ODBC 1.16_2 September 2, 2008 + +Removed szDummyBuffer field from imp_fbh_st and code in dbd_describe +which clears it. It was never used so this was a waste of time. + +Changed the remaining calls to SQLAllocEnv, SQLAllocConnect and +SQLAllocStmt and their respective free calls to the ODBC 3.0 +SQLAllocHandle and SQLFreeHandle equivalents. + +Rewrote ColAttributes code to understand string and numeric attributes +rather than trying to guess by what the driver returns. If you see any +change in behaviour in ColAttributes calls you'll have to let me know +as there were a number of undocumented workarounds for drivers. + +Unicode build of DBD::ODBC now supports: + +=over + +=item column names + +The retrieval of unicode column names + +=item SQL strings + +Unicode in prepare strings (but not unicode parameter names) e.g., + + select unicode_column from unicode_table + +is fine but + + select * from table where column = :unicode_param_name + +is not so stick to ascii parameter names if you use named parameters. + +Unicode SQL strings passed to the do method are supported. + +SQL strings passed to DBD::ODBC when the odbc_exec_direct attribute +is set will B<not> be passed as unicode strings - this is a limitation of +the odbc_exec_direct attribute. + +=item connection strings + +True unicode connection string support will require a new version +of DBI (post 1.607). + +B<note> that even though unicode connection strings are +not supported currently DBD::ODBC has had to be changed to call +SQLDriverConnectW/SQLConnectW to indicate to the driver manager it's +intention to use some of the ODBC wide APIs. This only affects DBD::ODBC +when built for unicode. + +=item odbcunicode trace flag + +There is a new odbcunicode trace flag to enable unicode-specific +tracing. + +=back + +Skipped 40Unicode.t test if the ODBC driver is Oracle's ODBC as I +cannot make it work. + +Changes internally to use sv_utf8_decode (where defined) instead of +setting utf8 flag. + +Fix problems in the test when Test::NoWarnings is not installed. + +Removed some unused variables that were leading to compiler warnings. + +Changed a lot of tracing to use new odbcconnection flag + +Changed to use dbd_db_login6_sv if DBI supports it. + +Commented out a diag in 20SqlServer.t that was leading to confusion. + +Added diag to 20SqlServer.t in mars test to explain why it may fail. + +Various pod changes for clarification and to note odbc_err_handler is +deprecated. + +Removed odbcdev trace flag - it was not really used. + +New odbc_out_connect_string attribute for connections which returns +the ODBC driver out connection string. + +=head2 Changes in DBD::ODBC 1.16_1 August 28, 2008 + +Fixed bug in odbc_cancel which was checking the ACTIVE flag was on +before calling SQLCancel. Non-select statements can also be cancelled +so this was wrong. Thanks to Dean Arnold for spotting. + +Minor changes to test 01base to attempt to catch install_driver +failing, report it as a fail and skip other tests. + +Fixed bug reported by James K. Lowden with asynchronous mode and +SQLParamData where the code was not expecting SQL_STILL_EXECUTING and +did not handle it + +Added odbc_putdata_start attribute to determine when to start using +SQLPutData on lobs. + +Fixed bug in lob inserts where decimal_digits was being set to the +size of the bound lob unnecessarily. + +Minor change to connect/login code to delete driver-specific attributes +passed to connect so they do not have to be processed again when DBI +calls STORE with them. + +New 12blob.t test. + +A lot of code tidy up but not expecting any real benefit or detriment +when using DBD::ODBC. + +Fixed retrieving [n]varchar(max) columns which were only returning 1 +byte - thanks to Fumiaki Yoshimatsu and perl monks for finding it. +See http://markmail.org/message/fiym5r7q22oqlzsf#query:Fumiaki Yoshimatsu odbc+page:1+mid:fiym5r7q22oqlzsf+state:results + +Various minor changes to get the CPANTS kwalitee score up. + fixed pod issues in FAQ.pm + moved mytest dir to examples + added generated_by and requires perl version to META.yml + added pod and pod-coverage tests + removed executable flag from Makefile.PL + added use warnings to some modules and tests + fixed pod errors in ODBC.pm + added AUTHOR and LICENSE section to ODBC.pm + added Test::NoWarnings to tests + +Added support for setting the new(ish) DBI ReadOnly attribute on a +connection. See notes in pod. + +Changes to test suite to work around problems in Oracle's instant +client 11r1 ODBC driver for Linux (SQLColAttributes problems - see +02simple.t). + +New tests in 30Oracle.t for oracle procedures. + +=head2 Changes in DBD::ODBC 1.16 May 13, 2008 + +=head3 Test Changes + +Small change to the last test in 10handler.t to cope with the prepare +failing instead of the execute failing - spotted by Andrei Kovalevski +with the ODBCng Postgres driver. + +Changed the 20SqlServer.t test to specifically disable MARS for the +test to check multiple active statements and added a new test to check +that when MARS_Connection is enabled multiple active statements are +allowed. + +Changed the 09multi.t test to use ; as a SQL statement seperator +instead of a newline. + +A few minor "use of unitialised" fixes in tests when a test fails. + +In 02simple.t Output DBMS_NAME/VER, DRIVER_NAME/VER as useful +debugging aid when cpan testers report a fail. + +2 new tests for odbc_query_timeout added to 03dbatt.t. + +Changed 02simple.t test which did not work for Oracle due to a "select +1" in the test. Test changed to do "select 1 from dual" for Oracle. + +New tests for numbered and named placeholders. + +=head3 Documentation Changes + +Added references to DBD::ODBC ohloh listing and markmail archives. + +Added Tracing sections. + +Added "Deviations from the DBI specification" section. + +Moved the FAQ entries from ODBC.pm to new FAQ document. You can view +the FAQ with perldoc DBD::ODBC::FAQ. + +Added provisional README.windows document. + +Rewrote pod for odbc_query_timeout. + +Added a README.osx. + +=head3 Internal Changes + +More tracing in dbdimp.c for named parameters. + +#ifdeffed out odbc_get_primary_keys in dbdimp.c as it is no longer +used. $h->func($catalog, $schema, $table, 'GetPrimaryKeys') ends up +in dbdimp.c/dbd_st_primary_keys now. + +Reformatted dbdimp.c to avoid going over 80 columns. + +Tracing changed. Levels reviewed and changed in many cases avoiding levels 1 +and 2 which are reserved for DBI. Now using DBIc_TRACE macro internally. +Also tracing SQL when 'SQL' flag set. + +=head3 Build Changes + +Changes to Makefile.PL to fix a newly introduced bug with 'tr', remove +easysoft OOB detection and to try and use odbc_config and odbcinst if +we find them to aid automatic configuration. This latter change also +adds "odbc_config --cflags" to the CC line when building DBD::ODBC. + +Avoid warning when testing ExtUtils::MakeMaker version and it is a +test release with an underscore in the version. + +=head3 Functionality Changes + +Added support for parse_trace_flag and parse_trace_flags methods and +defined a DBD::ODBC private flag 'odbcdev' as a test case. + +Add support for the 'SQL' trace type. Added private trace type odbcdev +as an experimental start. + +Change odbc_query_timeout attribute handling so if it is set to 0 +after having set it to a non-zero value the default of no time out is +restored. + +Added support for DBI's statistics_info method. + +=head3 Bug Fixes + +Fix bug in support for named placeholders leading to error "Can't +rebind placeholder" when there is more than one named placeholder. + +Guard against scripts attempting to use a named placeholder more than +once in a single SQL statement. + +If you called some methods after disconnecting (e.g., prepare/do and +any of the DBD::ODBC specific methods via "func") then no error was +generated. + +Fixed issue with use of true/false as fields names in structure on MAC +OS X 10.5 (Leopard) thanks to Hayden Stainsby. + +Remove tracing of bound wide characters as it relies on +null-terminated strings that don't exist. + +Fix issue causing a problem with repeatedly executing a stored +procedure which returns no result-set. SQLMoreResults was only called +on the first execute and some drivers (SQL Server) insist a procedure +is not finished until SQLMoreResults returns SQL_NO_DATA. + +=head2 Changes in DBD::ODBC 1.15 January 29, 2008 + +1.15 final release. + +Fixed bug reported by Toni Salomaki where DBD::ODBC may call +SQLNumResultCols after SQLMoreResults returns SQL_NO_DATA. It led to +the error: + +Describe failed during DBI::st=HASH(0x19c2048)->FETCH(NUM_OF_FIELDS,0) + +when NUM_OF_FIELDS was referenced in the Perl script. + +Updated odbc_exec_direct documentation to describe its requirement +when creating temporary objects in SQL Server. + +Added FAQ on SQL Server temporary tables. + +Fixed bug in dbdimp.c which was using SQL_WCHAR without testing it was +defined - thanks Jergen Dutch. + +Fixed use of "our" in UCHelp.pm which fails on older Perls. + +Minor changes to 02simple.t and 03dbatt.t to fix diagnostics output +and help debug DBD which does not handle long data properly. + +Further changes to Makefile.PL to avoid change in behavior of +ExtUtils::MakeMaker wrt order of execution of PREREQ_PM and CONFIGURE. +Now if DBI::DBD is not installed we just warn and exit 0 to avoid a +cpan-testers failure. + +=head2 Changes in DBD::ODBC 1.15_2 November 14, 2007 + +Fix bug in DBD::ODBC's private function data_sources which was +returning data sources prefixed with "DBI:ODBC" instead of "dbi:ODBC". + +If you don't have at least DBI 1.21 it is now a fatal error instead of +just a warning. + +DBI->connect changed so informational diagnostics like "Changed +database context to 'master'" from SQL Server are available in +errstr/state. These don't cause DBI->connect to die but you can test +$h->err eq "" after connect and obtain the informational diagnostics +from errstr/state if you want them. + +Fixed problem in 41Unicode.t where utf8 was used before testing we had +a recent enough Perl - thank you cpan testers. + +Changed "our" back to "my" in Makefile.PL - thank you cpan testers. + +Removed all calls to DBIh_EVENT2 in dbdimp.c as it is no longer used +(see posts on dbi-dev). + +Changed text output when a driver manager is not found to stop +referring to iodbcsrc which is no longer included with DBD::ODBC. + +Changed Makefile.PL to attempt to find unixODBC if -o or ODBCHOME not +specified. + +Updated META.yml based on new 1.2 spec. + +Changed Makefile.PL so if an ODBC driver manager is not found then we +issue warning and exit cleanly but not generating a Makefile. This +should stop cpan-testers from flagging a fail because they haven't got +an ODBC driver manager. + +Changed Makefile.PL so it no longer "use"s DBI/DBI::DBD because this +makes cpan-testers log a fail if DBI is not installed. Changed to put +the DBI::DBD use in the CONFIGURE sub so PREREQ_PM will filter out +machines where DBI is not installed. + +Fix a possible typo, used once in 10handler.t. + +=head2 Changes in DBD::ODBC 1.15_1 November 6, 2007 + +Minor changes to 20SqlServer.t test for SQL Server 2008 (Katmai). +Timestamps now return an extra 4 digits of precision (all 0000) and +the driver reported in dbcc messages has a '.' in the version which +was not handled. + +New FAQ entry and test code for "Datetime field overflow" problem in +Oracle. + +Changed all ODBC code to use new SQLLEN/SQLULEN types where +Microsoft's headers indicate, principally so DBD::ODBC builds and +works on win64. NOTE: you will need an ODBC Driver Manager on UNIX +which knows SQLLEN/SQLULEN types. The unixODBC driver manager uses +SQLLEN/SQLULEN in versions from at least 2.2.12. Thanks to Nelson +Oliveira for finding, patching and testing this and then fixing +problems with bound parameters on 64 bit Windows. + +Added private_attribute_info method DBI introduced (see DBI docs) +and test cases to 02simple.t. + +Fairly major changes to dbd_describe in dbdimp.c to reduce ODBC calls +by 1 SQLDescribeCol call per column when describing result +sets. Instead of calculating the amount of memory required to hold +every column name we work on the basis that (num_columns + 1) * +SQL_MAX_COLUMN_NAME_LEN can hold all column names. However, to avoid +using a large amount of memory unnecessarily if an ODBC driver +supports massive column name lengths the maximum size per column is +restricted to 256. + +Changed to avoid using explicit use of DBIc_ERRXXX in favour of newish +(ok, DBD::ODBC is a bit behind the times in this respect) +DBIh_SET_ERR_CHAR. This involved a reworking or the error handling +and although all test cases still pass I cannot guarantee it has no +other effects - please let me know if you spot differences in error +messages. + +Fixed bug in 20SqlServer test for multiple results that was passing +but for the wrong reason (namely, that the odbc_err_handler was being +called when it should not have been). + +Fixed bug in odbc_err_handler that prevented it from being reset so +you don't have an error handler. Looks like the problem was in +dbd_db_STORE_attrib where "if(valuesv == &PL_sv_undef)" was used to +detect undef and I think it should be "if (!SvOK(valuesv))". + +Improvements to odbc_err_handler documentation. + +Added 10handler.t test cases. + +More tests in 02simple.t to check NUM_OF_FIELDS and NAMES_uc. + +Bit of a tidy up: + +Removed some unused variable declarations from dbdimp.c. + +Lots of changes to DBD::ODBC tracing, particularly in dbd_describe, +and dbd_error2 and login6. + +Removed a lot of tracing code in comments or #if 0 as it never gets +built. + +Changed dual tests on SQL_SUCCESS and SQL_SUCCESS_WITH_INFO to use +SQL_SUCCEEDED. + +=head2 Changes in DBD::ODBC 1.14 July 17, 2007 + +Fix bug reported where ping crashes after disconnect thanks to Steffen +Goeldner. + +Fix bug in dbd_bind_ph which leads to the error Can't change param 1 +maxlen (51->50) after first bind in the 20SqlServer test. This is +caused by svGROW in Perl 5.8.8 being changed to possibly grow by more +than you asked (e.g. up to the next longword boundary). + +Fix problem with binding undef as an output parameter. Reported by +Stephen More with IBM's ODBC driver for iSeries. + +Removed comment delimiters in comments in dbdimp.h leading to warnings. + +Removed some unused variable declarations leading to warnings. + +Removed PerlIO_flush calls as it is believed they are not required. + +Add logging for whether SQLDescribeParam is supported. + +Fixed use of unitialised variable in dbd_bind_ph where an undef is +bound and tracing is enabled. + +Fixed issue with TRACESTATUS change in 20SqlServer.t tests 28, 31, 32 +and 33 leading to those tests failing when testing with SQL Server +2005 or Express. + +Many compiler warnings fixed - especially for incompatible types. + +Add provisional Unicode support - thanks to Alexander Foken. This +change is very experimental (especially on UNIX). Please see ODBC.pm +documentation. Also see README.unicode and README.af. New database +attribute odbc_has_unicode to test if DBD::ODBC was built with UNICODE +support. New tests for Unicode. New requirement for Perl 5.8.1 if +Unicode support required. New -[no]u argument to Makefile.PL. New +warning in Makefile.PL if Unicode support built for UNIX. + +Fix use of unitialised var in Makefile.PL. + +Fix use of scalar with no effect on Makefile.PL + +Added warning to Makefile.PL about building/running with LANG using +UTF8. + +Added warning to Makefile.PL about using thread-safe ODBC drivers. + +Updated MANIFEST to include more test code from mytest and remove +MANIFEST.SKIP etc. + +Removed calls to get ODBC errors when SQLMoreResults returns SQL_NO_DATA. +These are a waste of time since SQL_NO_DATA is expected and there is no +error diagnostic to retrieve. + +Changes to test 17 of 02simple.t which got "not ok 17 - Col count +matches correct col count" errors with some Postgres ODBC +drivers. Caused by test expecting column names to come back +uppercase. Fixes by uppercasing returned column names. + +Changes to tests in 03batt.t which correctly expects an ODBC 3 driver +to return the column names in SQLTables result-set as per ODBC 3.0 +spec. Postgres which reports itself as an ODBC 3.0 driver seems to +return the ODBC 2 defined column names. Changed tests to catch ODBC +2.0 names are pass test put issue warning. + +For postgres skip test (with warning) checking $sth->{NAME} returns +empty listafter execute on update + +Updated FAQ, added a few more questions etc. + +DBD::ODBC requires at least 5.6.0 of Perl. + +Many updates to pod documentation. + +Removed some dead HTTP links in the pod I could not find equivalents for - + let me know if you have working replacements for ones removed + +Add some HTTP links to useful tutorials on DBD::ODBC + +=head2 Changes in DBD::ODBC 1.13 November 8, 2004 + +Fix inconsistency/bug with odbc_exec_direct vs. odbc_execdirect settings. Now made consistent with +odbc_exec_direct. For now, will still look for odbc_execdirect in prepare, but not as DBH attribute +as a backup (which is what it was doing), but that support will be dropped at some time in the future. +Please use odbc_exec_direct from now on... + +Fix handling of print statements for SQL Server thanks to Martin Evans! Thanks for all your work on this! +Due to bug in SQL Server, you must use odbc_exec_direct. See t/20SqlServer.t for example. You will need +to call $sth->{odbc_more_results} to clear out any trailing messages. + +Change tests to use Test::More. Whew, that's much nicer! + +Fix Oracle integral/numeric output params so that warning not printed about value not being numeric (even though it is!) + +=head2 Changes in DBD::ODBC 1.12 October 26, 2004 + +Fix bug with odbc_execdirect attributed thanks to Martin Evans +Fix bug(s) with odbc_query_timeout and tested with SQL*Server. +Oracle tests failed with setting timeout. Probably not handled by Oracle's ODBC driver + +=head2 Changes in DBD::ODBC 1.11 October 11, 2004 + +Added odbc_timeout, but untested + +=head2 Changes in DBD::ODBC 1.10 September 8, 2004 + +Fixed bug in Makefile.PL. +Added pod.t test, taken from DBI. +Fixed various small POD issues, discovered during the pod test. +Fixed bug in bind_param_inout + +=head2 Changes in DBD::ODBC 1.09 March 10, 2004 + +Duh. I forgot to add new dbivport.h to MANIFEST and SVN before submitting. Fixed. + +=head2 Changes in DBD::ODBC 1.08 March 6, 2004 + +Added check in Makefile.PL to detect if the environment variable LANG is +Set. If so, prints a warning about potential makefile generation issues. +Change to use dbivport.h per new DBI spec. +Add ability to set the cursor type during the connect. This may allow some servers +which do not support multiple concurrent statements to permit them -- +tested with SQL Server. Thanks to Martin Busik! +See odbc_cursortype information in the ODBC POD. + + +=head2 Changes in DBD::ODBC 1.07 February 19, 2004 + +Added to Subversion version control hosted by perl.org. Thanks Robert! See ODBC.pm POD for more information. +Added contributing section to ODBC.pm POD -- see more details there! +Added parameter to odbc_errhandler for the NativeError -- thanks to Martin Busik. +Fix for Makefile.PL not having tab in front of $(NOOP) (Finally). +Fix for SQLForeignKeys thanks to Kevin Shepherd. + +=head2 Changes in DBD::ODBC 1.06 June 19, 2003 + +Fixed test in t/02simple.t to skip if the DSN defined by the user has DSN= in it. +Added tests for wrong DSN, ensuring the DBI::errstr is appropriately set. +Fixed small issue in Makefile.PL for Unix systems thanks to H.Merijn Brand. +Update to NOT copy user id and password to connect string if UID or PWD parameter in connect string. +Updated Makefile.PL for dmake, per patch by Steffen Goldner. Thanks Steffen! + +=head2 Changes in DBD::ODBC 1.05 March 14, 2003 + +Cleaned up Makefile.PL and added Informix support thanks to Jonathan Leffler (see README.informix) +Added nicer error message when attempting to do anything while the database is disconnected. +Fixed fetchrow_hashref('NAME_uc | NAME_lc') with odbc_more_results. +Added exporter to allow perl -MDBD::ODBC=9999 command line to determine version +Fixed for building with DBI 1.33 and greater +Removed all C++ style comments +Ensured files are in Unix format, with the exception of the README type information and Makefile.PL + +=head2 Changes in DBD::ODBC 1.04 January 24, 2003 + +It seems that case insensitive string comparison with a limit causes problems for +multiple platforms. strncmpi, strncasecmp, _strcmpin are all functions hit and +it seems to be a hit-or-miss. Hence, I rewrote it to upper case the string +then do strncmp, which should be safe...sheesh. A simple thing turned into +a headache... + +=head2 Changes in DBD::ODBC 1.03 January 17, 2003 + +Add automatic detection of DRIVER= or DSN= to add user id and password to +connect string. + + +=head2 Changes in DBD::ODBC 1.02 January 6, 2003 + +Fix to call finish() automatically if execute is re-called in a loop +(and test in t/02simple.t to ensure it's fixed) + +Augmented error message when longs are truncated to help users determine where +to look for help. + +Fixes for build under Win32 with Perl5.8. + + +=head2 Changes in DBD::ODBC 1.01 December 9, 2002 + +Forgot to fix require DBI 1.201 in ODBC.pm to work for perl 5.8. Fixed + +=head2 Changes in DBD::ODBC 1.00 December 8, 2002 + +(Please see all changes since version 0.43) + +Updated Makefile.PL to handle SQL_Wxxx types correctly with unixODBC and linking +directly with EasySoft OOB. Note that I could not find where iODBC defines SQL_WLONG_VARCHAR, +so I'm not sure it's fixed on all other platforms. Should not have been a problem under +Win32... + +Found that the fix in _18 was only enabled if debug enabled and it broke something else. +removed the fix. + +Updated Makefile.PL to use DBI version 1.21 instead of 1.201 to facilitate builds under +latest development versions of Perl. + +Updated code to use the *greater* of the column display size and the column length for +allocating column buffers. This *should* workaround a problem with DBD::ODBC and the +Universe database. + +Added code thanks to Michael Riber to handle SQLExecDirect instead of SQLPrepare. There are +two ways to get this: + + $dbh->prepare($sql, { odbc_execdirect => 1}); + and + $dbh->{odbc_execdirect} = 1; + +When $dbh->prepare() is called with the attribute "ExecDirect" set to a non-zero value +dbd_st_prepare do NOT call SQLPrepare, but set the sth flag odbc_exec_direct to 1. + +Fixed numeric value binding when binding non-integral values. Now lets the driver +or the database handle the conversion. + +Fixed makefile.pl generation of makefile to force the ODBC directory first in the +include list to help those installing ODBC driver managers on systems which +already have ODBC drivers in their standard include path. + +=head2 Changes in DBD::ODBC 0.45_18 September 26, 2002 + +Updated MANIFEST to include more of the mytest/* files (examples, tests) +Fixed problem when attempting to get NUM_OF_FIELDS after execute returns no rows/columns. + +=head2 Changes in DBD::ODBC 0.45_17 August 26, 2002 + +More fixes for multiple result sets. Needed to clear the DBIc_FIELDS_AV +when re-executing the multiple-result set stored procedure/query. + +=head2 Changes in DBD::ODBC 0.45_16 August 26, 2002 + +Updated to fix output parameters with multiple result sets. The output +parameters are not set until the last result set has been retrieved. + +=head2 Changes in DBD::ODBC 0.45_15 August 20, 2002 + +Updated for new DBIc_STATE macros (all debug, as it turned out) to be thread safer in the long run + +Updated for the new DBIc_LOGFP macros + +Added CLONE method + +Fix for SQL Server where multiple result sets being returned from a stored proc, +where one of the result sets was empty (insert/update). + +Added new attribute odbc_force_rebind, which forces DBD::ODBC to +check recheck for new result sets every execute. This is only +really necessary if you have a stored procedure which returns different +result sets with each execute, given the same "prepare". Many times +this will be automatically set by DBD::ODBC, however, if there is only +one result set in the stored proc, but it can differ with each call, +then DBD::ODBC will not know to set it. + +Updated the DBD::ODBC POD documentation to document DBD::ODBC +private attributes and usage. + +=head2 Changes in DBD::ODBC 0.45_14 August 13, 2002 + +Added support to handle (better) DBI begin_work(). + +Fix for binding undef parameters on SQL Server. + +Fix bug when connecting twice in the same script. Trying to set the environment ODBC version +twice against the same henv caused an error. + +=head2 Changes in DBD::ODBC 0.45_13 August 9, 2002 + +Workaround problem with iODBC where SQLAllocHandleStd is not present in iODBC. +Made Changes file accessible via perldoc DBD::ODBC::Changes. In the near future +the change log will be removed from here and put in changes to tidy up a bit. + +=head2 Changes in DBD::ODBC 0.45_12 August 9, 2002 + +Fixed global destruction access violation (which was seemingly random). + +=head2 Changes in DBD::ODBC 0.45_11 August 8, 2002 + +Updated manifest to include more samples. +Working on checking for leaks on Linux, where I might get more information about +the process memory. + +Working on fixing problems with MS SQL Server binding parameters. It seems that SQLServer +gets "confused" if you bind a NULL first. In "older" (SQLServer 2000 initial release) versions +of the driver, it would truncate char fields. In "newer" versions of the SQL Server +driver, it seems to only truncate dates (actually, round them to the nearest minute). If you have +problems in the SQL Server tests, please upgrade your driver to the latest version on +Microsoft's website (MDAC 2.7 or above) http://www.microsoft.com/data + +=head2 Changes in DBD::ODBC 0.45_10 July 30, 2002 + +Added database specific tests to ensure things are working. Some of the tests may +not work for all people or may not be desirable. I have tried to keep them as +safe as possible, but if they don't work, please let me know. + +Added support for the internal function GetFunctions to handle ODBC 3's +SQL_API_ODBC3_ALL_FUNCTIONS. Would have caused a memory overwrite on the +stack if it was called. + + +=head2 Changes in DBD::ODBC 0.45_9 July 30, 2002 + +Fixed bug in procedure handling for SQLServer. Was not re-describing the result sets +if the SQLMoreResults in the execute needs to be called. + +=head2 Changes in DBD::ODBC 0.45_8 July 25, 2002 + +Fixed bug in tracing code when binding an undef parameter which did not +happen to have a valid buffer with tracing level >= 2 + +Fixed bug when binding undef after a valid data bind on a timestamp. The +Scale value was being calculated based upon the string that had been bound +prior to the bind of the undef and if that had a sub-second value, then +the scale would be set to the wrong value...I.e. + + bind_param(1, '2000-05-17 00:01:00.250', SQL_TYPE_TIMESTAMP) then + execute + bind_param(1, undef, SQL_TYPE_TIMESTAMP) then + +Fixed SQL Server issue when binding a null and the length was set to 0 instead of 1 + +=head2 Changes in DBD::ODBC 0.45_7 July 25, 2002 + +Adding support for array binding, but not finished. + +Fixed bug where SqlServer Stored procedures which perform INSERT would not correctly +return a result set. Thanks to Joe Tebelskis for finding it and Martin Evans for +supplying a fix. + +Fixed bug where binding the empty string would cuase a problem. Fixed and added +test in t/07bind.t. + +=head2 Changes in DBD::ODBC 0.45_6 July 24, 2002 + +Added support for new DBI ParamValues feature. + +=head2 Changes in DBD::ODBC 0.45_5 July 23, 2002 + +Added odbc_err_handler and odbc_async_exec thanks to patches by David L. Good. +See example in mytest/testerrhandler.pl + +Here's the notes about it: + + I've implemented two separate functions. The first is an "error + handler" similar to that in DBD::Sybase. The error handler can be used + to intercept error and status messages from the server. It is the only + way (at least currently) that you can retrieve non-error status messages + when execution is successful. + + To use the error handler, set the "odbc_err_handler" attribute on + your database handle to a reference to a subroutine that will act + as the error handler. This subroutine will be passed two args, the + SQLSTATE and the error message. If the subroutine returns 0, the + error message will be otherwise ignored. If it returns non-zero, + the error message will be processed normally. + + The second function implemented is asynchronous execution. It's only + useful for retrieving server messages with an error handler during an + execute() that takes a long time (such as a DBCC on a large database) ODBC + doesn't have the concept of a callback routine like Sybase's DBlib/CTlib + does, so asynchronous execution is needed to be able to get the server + messages before the SQL statement is done processing. + + To use asynchronous execution, set the "odbc_async_exec" attribute on + your database handle to 1. Not all ODBC drivers support asynchronous + execution. To see if yours does, set odbc_async_exec to 1 and then check + it's value. If the value is 1, your ODBC driver can do asynchronous + execution. If the value is 0, your ODBC driver cannot. + +=head2 Changes in DBD::ODBC 0.45_4 July 22, 2002 + +More fixes for DB2 tests and timestamp handling. + +=head2 Changes in DBD::ODBC 0.45_3 July 22, 2002 + +Changes to internal timestamp type handling and test structure to ensure tests +work for all platforms. DB2 was giving me fits due to bad assumptions. Thanks +to Martin Evans (again) for help in identifying the problems and helping research +solutions. This includes the scale/precision values to correctly store full timestamps. + +=head2 Changes in DBD::ODBC 0.45_2 July 19, 2002 + +Moving API usage to ODBC 3.0 specifications. With lots of help from Martin Evans (again!). +Thanks Martin!!!!! + +=head2 Changes in DBD::ODBC 0.44 July 18, 2002 + +.44 was never officially released. +Fix for do() and execute to handle DB2 correctly. Patch/discovery thanks to Martin Evans. +Partly moving towards defaulting to ODBC 3.x standards. + +=head2 Changes in DBD::ODBC 0.43 July 18, 2002 + +Fix for FoxPro (and potentially other) Drivers!!!!! + +Add support for DBI column_info + +Fix for binding undef value which comes from dereferencing hash + +Fix to make all bound columns word (int) aligned in the buffer. + +=head2 Changes in DBD::ODBC 0.42 July 8, 2002 + +Added patches to the tests to support ActiveState's automated build process. + +Fix ping() to try SQLTables for a test, instead of a strange query. + +=head2 Changes in DBD::ODBC 0.41 April 15, 2002 + +Fixed problem where SQLDescribeParam would fail (probably +bug in ODBC driver). Now reverts to SQL_VARCHAR if that +happens, instead of failing the query. + +Fixed error report when using Oracle's driver. There is +a known problem. Left the error on the test, but added +warning indicating it's a known problem. + +=head2 Changes in DBD::ODBC 0.40 April 12, 2002 + +Most significant change is the change in the default binding +type which forces DBD::ODBC to attempt to determine the bind +type if one is not passed. I decided to make this the default +behavior to make things as simple as possible. + +Fixed connection code put in 0.39 to work correctly. + +Two minor patches for building, one for Cygwin one +if both iODBC and unixODBC libraries are installed. +Probably need better command line on this, but if +someone has the problem, please let me know (and +hopefully send a patch with it). + +=head2 Changes in DBD::ODBC 0.39 March 12, 2002 + +See mytest/longbin.pl for demonstration of inserting and retrieving +long binary files to/from the db. Uses MD5 algorithm to verify data. +Please do some similar test(s) with your database before using it +in production. The various bind types are different for each database! + +Finally removed distribution of old iODBC. See www.iodbc.org or +www.unixodbc.org for newer/better versions of the ODBC driver +manager for Unix (and others?). + +Added ability to force ODBC environment version. + +Fix to SQLColAttributes. + +Changes to connect sequence to provide better error +messages for those using DSN-less connections. + +=head2 Changes in DBD::ODBC 0.38 February 12, 2002 + +Fixed do function (again) thanks to work by Martin Evans. + +=head2 Changes in DBD::ODBC 0.37 February 10, 2002 + +Patches for get_info where return type is string. Patches +thanks to Steffen Goldner. Thanks Steffen! + +Patched get_info to NOT attempt to get data for SQL_DRIVER_HSTMT +and SQL_DRIVER_HDESC as they expect data in and have limited value +(IMHO). + +Further fixed build for ODBC 2.x drivers. The new SQLExecDirect +code had SQLAllocHandle which is a 3.x function, not a 2.x function. +Sigh. I should have caught that the first time. Signed, the Mad-and- +not-thorough-enough-patcher. + +Additionally, a random core dump occurred in the tests, based upon the +new SQLExecDirect code. This has been fixed. + + +=head2 Changes in DBD::ODBC 0.36 February 10, 2002 + +Fixed build for ODBC 2.x drivers. The new SQLExecDirect code +had SQLFreeHandle which is a 3.x function, not a 2.x function. + +=head2 Changes in DBD::ODBC 0.35 February 9, 2002 + +Fixed (finally) multiple result sets with differing +numbers of columns. The final fix was to call +SQLFreeStmt(SQL_UNBIND) before repreparing +the statement for the next query. + +Added more to the multi-statement tests to ensure +the data retrieved was what was expected. + +Now, DBD::ODBC overrides DBI's do to call SQLExecDirect +for simple statements (those without parameters). +Please advise if you run into problems. Hopefully, +this will provide some small speed improvement for +simple "do" statements. You can also call +$dbh->func($stmt, ExecDirect). I'm not sure this has +great value unless you need to ensure SQLExecDirect +is being called. Patches thanks to Merijn Broeren. +Thanks Merijn! + +=head2 Changes in DBD::ODBC 0.34 February 7, 2002 + +Further revamped tests to attempt to determine if SQLDescribeParam +will work to handle the binding types. The t/08bind.t attempts +to determine if SQLDescribeParam is supported. note that Oracle's +ODBC driver under NT doesn't work correctly when binding dates +using the ODBC date formatting {d } or {ts }. So, test #3 will +fail in t/08bind.t + +New support for primary_key_info thanks to patches by Martin Evans. +New support for catalog, schema, table and table_type in table_info +thanks to Martin Evans. Thanks Martin for your work and your +continuing testing, suggestions and general support! + +Support for upcoming dbi get_info. + +=head2 Changes in DBD::ODBC 0.33_3 February 4, 2002 + +Revamped tests to include tests for multiple result sets. +The tests are ODBC driver platform specific and will be skipped +for drivers which do not support multiple result sets. + +=head2 Changes in DBD::ODBC 0.33_2 February 4, 2002 + +Finally tested new binding techniques with SQL Server 2000, +but there is a nice little bug in their MDAC and ODBC +drivers according to the knowledge base article # Q273813, titled + + "FIX: "Incorrect Syntax near the Keyword 'by' " + Error Message with Column Names of "C", "CA" or "CAS" (Q273813) + +DBD::ODBC now does not name any of the columns A, B, C, or D +they are now COL_A, COL_B, COL_C, COL_D. + + *** NOTE: *** I AM STRONGLY CONSIDERING MAKING THE NEW + BINDING the default for future versions. I do not believe + it will break much existing code (if any) as anyone binding + to non VARCHAR (without the ODBC driver doing a good conversion + from the VARCHAR) will have a problem. It may be subtle, however, + since much code will work, but say, binding dates may not with + some drivers. + + Please comment soon... + +=head2 Changes in DBD::ODBC 0.33_1 February 4, 2002 + +*** WARNING: *** + + Changes to the binding code to allow the use of SQLDescribeParam + to determine if the type of column being bound. This is + experimental and activated by setting + + $dbh->{odbc_default_bind_type} = 0; # before creating the query... + +Currently the default value of odbc_default_bind_type = SQL_VARCHAR +which mimicks the current behavior. If you set +odbc_default_bind_type to 0, then SQLDescribeParam will be +called to determine the columen type. Not ALL databases +handle this correctly. For example, Oracle returns +SQL_VARCHAR for all types and attempts to convert to the +correct type for us. However, if you use the ODBC escaped +date/time format such as: {ts '1998-05-13 00:01:00'} then +Oracle complains. If you bind this with a SQL_TIMESTAMP type, +however, Oracle's ODBC driver will parse the time/date correctly. +Use at your own risk! + +Fix to dbdimp.c to allow quoted identifiers to begin/end +with either " or '. +The following will not be treated as if they have a bind placeholder: + + "isEstimated?" + '01-JAN-1987 00:00:00' + 'Does anyone insert a ?' + + +=head2 Changes in DBD::ODBC 0.32 January 22, 2002 + +More SAP patches to Makfile.PL to eliminate the call to Data Sources + +A patch to the test (for SAP and potentially others), to allow +fallback to SQL_TYPE_DATE in the tests + +=head2 Changes in DBD::ODBC 0.31 January 18, 2002 + +Added SAP patches to build directly against SAP driver instead of +driver manager thanks to Flemming Frandsen (thanks!) + +Added support to fix ping for Oracle8. May break other databases, +so please report this as soon as possible. The downside is that +we need to actually execute the dummy query. + + +=head2 Changes in DBD::ODBC 0.30 January 8, 2002 + +Added ping patch for Solid courtesy of Marko Asplund + +Updated disconnect to rollback if autocommit is not on. +This should silence some errors when disconnecting. + +Updated SQL_ROWSET_SIZE attribute. Needed to force it to +odbc_SQL_ROWSET_SIZE to obey the DBI rules. + +Added odbc_SQL_DRIVER_ODBC_VER, which obtains the version of +the Driver upon connect. This internal capture of the version is +a read-only attributed and is used during array binding of parameters. + +Added odbc_ignore_named_placeholders attribute to facilicate +creating triggers within SAPDB and Oracle, to name two. The +syntax in these DBs is to allow use of :old and :new to +access column values before and after updates. Example: + + $dbh->{odbc_ignore_named_placeholders} = 1; # set it for all future statements + # ignores :foo, :new, etc, but not :1 or ? + $dbh->do("create or replace etc :new.D = sysdate etc"); + + +=head2 Changes in DBD::ODBC 0.29 August 22, 2001 + +Cygwin patches from Neil Lunn (untested by me). Thanks Neil! + +SQL_ROWSET_SIZE attribute patch from Andrew Brown + + There are only 2 additional lines allowing for the setting of + SQL_ROWSET_SIZE as db handle option. + + The purpose to my madness is simple. SqlServer (7 anyway) by default + supports only one select statement at once (using std ODBC cursors). + According to the SqlServer documentation you can alter the default setting + of + three values to force the use of server cursors - in which case multiple + selects are possible. + + The code change allows for: + $dbh->{SQL_ROWSET_SIZE} = 2; # Any value > 1 + + For this very purpose. + + The setting of SQL_ROWSET_SIZE only affects the extended fetch command as + far as I can work out and thus setting this option shouldn't affect + DBD::ODBC operations directly in any way. + + Andrew + + +VMS and other patches from Martin Evans (thanks!) + +[1] a fix for Makefile.PL to build DBD::ODBC on OpenVMS. + +[2] fix trace message coredumping after SQLDriverConnect + +[3] fix call to SQLCancel which fails to pass the statement handle properly. + +[4] consume diagnostics after SQLDriverConnect/SQLConnect call or they remain + until the next error occurs and it then looks confusing (this is due to + ODBC spec for SQLError). e.g. test 02simple returns a data truncated error + only now instead of all the informational diags that are left from the + connect call, like the "database changed", "language changed" messages you + get from MS SQL Server. + +Replaced C++ style comments with C style to support more platforms more easily. + +Fixed bug which use the single quote (') instead of a double quote (") for "literal" column names. This + helped when having a colon (:) in the column name. + +Fixed bug which would cause DBD::ODBC to core-dump (crash) if DBI tracing level was greater than 3. + +Fixed problem where ODBC.pm would have "use of uninitialized variable" if calling DBI's type_info. + +Fixed problem where ODBC.xs *may* have an overrun when calling SQLDataSources. + +Fixed problem with DBI 1.14, where fprintf was being called instead of PerlIO_printf for debug information + +Fixed problem building with unixODBC per patch from Nick Gorham + +Added ability to bind_param_inout() via patches from Jeremy Cooper. Haven't figured out a good, non-db specific + way to test. My current test platform attempts to determine the connected database type via + ugly hacks and will test, if it thinks it can. Feel free to patch and send me something...Also, my + current Oracle ODBC driver fails miserably and dies. + +Updated t/02simple.t to not print an error, when there is not one. + +=head2 Changes in DBD::ODBC 0.28 March 23, 2000 + +Added support for SQLSpecialColumns thanks to patch provided by Martin J. Evans [martin@easysoft.com] + +Fixed bug introduced in 0.26 which was introduced of SQLMoreResults was not supported by the driver. + +=head2 Changes in DBD::ODBC 0.27 March 8, 2000 + +Examined patch for ping method to repair problem reported by Chris Bezil. Thanks Chris! + +Added simple test for ping method working which should identify this in the future. + +=head2 Changes in DBD::ODBC 0.26 March 5, 2000 + +Put in patch for returning only positive rowcounts from dbd_st_execute. The original patch +was submitted by Jon Smirl and put back in by David Good. Reasoning seems sound, so I put it +back in. However, any databases that return negative rowcounts for specific reasons, +will no longer do so. + +Put in David Good's patch for multiple result sets. Thanks David! See mytest\moreresults.pl for +an example of usage. + +Added readme.txt in iodbcsrc explaining an issue there with iODBC 2.50.3 and C<data_sources>. + +Put in rudimentary cancel support via SQLCancel. Call $sth->cencel to utilize. However, it is largely +untested by me, as I do not have a good sample for this yet. It may come in handy with threaded +perl, someday or it may work in a signal handler. + +=head2 Changes in DBD::ODBC 0.25 March 4, 2000 + +Added conditional compilation for SQL_WVARCHAR and SQL_WLONGVARCHAR. If they +are not defined by your driver manager, they will not be compiled in to the code. +If you would like to support these types on some platforms, you may be able to +#define SQL_WVARCHAR (-9) +#define SQL_WLONGVARCHAR (-10) + +Added more long tests with binding in t\09bind.t. Note use of bind_param! + +=head2 Changes in DBD::ODBC 0.24 February 24, 2000 + +Fixed Test #13 in 02simple.t. Would fail, improperly, if there was only one data source defined. + +Fixed (hopefully) SQL Server 7 and ntext type "Out of Memory!" errors via patch from Thomas Lowery. Thanks Thomas! + +Added more support for Solid to handle the fact that it does not support data_sources nor SQLDriverConnect. +Patch supplied by Samuli Karkkainen [skarkkai@woods.iki.fi]. Thanks! It's untested by me, however. + +Added some information from Adam Curtin about a bug in iodbc 2.50.3's data_sources. See +iodbcsrc\readme.txt. + +Added information in this pod from Stephen Arehart regarding DSNLess connections. + +Added fix for sp_prepare/sp_execute bug reported by Paul G. Weiss. + +Added some code for handling a hint on disconnect where the user gets an error for not committing. + +=head2 Changes in DBD::ODBC 0.22 September 8, 1999 + +Fixed for threaded perl builds. Note that this was tested only on Win32, with no threads in use and using DBI 1.13. +Note, for ActiveState/PERL_OBJECT builds, DBI 1.13_01 is required as of 9/8/99. +If you are using ActiveState's perl, this can be installed by using PPM. + + +=head2 Changes in DBD::ODBC 0.21 + +Thanks to all who provided patches! + +Added ability to connect to an ODBC source without prior creation of DSN. See mytest/contest.pl for example with MS Access. +(Also note that you will need documentation for your ODBC driver -- which, sadly, can be difficult to find). + +Fixed case sensitivity in tests. + +Hopefully fixed test #4 in t/09bind.t. Updated it to insert the date column and updated it to find the right +type of the column. However, it doesn't seem to work on my Linux test machine, using the OpenLink drivers +with MS-SQL Server (6.5). It complains about binding the date time. The same test works under Win32 with +SQL Server 6.5, Oracle 8.0.3 and MS Access 97 ODBC drivers. Hmmph. + +Fixed some binary type issues (patches from Jon Smirl) + +Added SQLStatistics, SQLForeignKeys, SQLPrimaryKeys (patches from Jon Smirl) +Thanks (again), Jon, for providing the build_results function to help reduce duplicate code! + +Worked on LongTruncOk for Openlink drivers. + +Note: those trying to bind variables need to remember that you should use the following syntax: + + use DBI; + ... + $sth->bind_param(1, $str, DBI::SQL_LONGVARCHAR); + +Added support for unixodbc (per Nick Gorham) +Added support for OpenLinks udbc (per Patrick van Kleef) +Added Support for esodbc (per Martin Evans) +Added Support for Easysoft (per Bob Kline) + +Changed table_info to produce a list of views, too. +Fixed bug in SQLColumns call. +Fixed blob handling via patches from Jochen Wiedmann. +Added data_sources capability via snarfing code from DBD::Adabas (Jochen Wiedmann) + +=head2 Changes in DBD::ODBC 0.20 August 14, 1998 + +SQLColAttributes fixes for SQL Server and MySQL. Fixed tables method +by renaming to new table_info method. Added new tyoe_info_all method. +Improved Makefile.PL support for Adabase. + +=head2 Changes in DBD::ODBC 0.19 + +Added iODBC source code to distribution.Fall-back to using iODBC header +files in some cases. + +=head2 Changes in DBD::ODBC 0.18 + +Enhancements to build process. Better handling of errors in +error handling code. + +=head2 Changes in DBD::ODBC 0.17 + +This release is mostly due to the good work of Jeff Urlwin. +My eternal thanks to you Jeff. + +Fixed "SQLNumResultCols err" on joins and 'order by' with some +drivers (see Microsoft Knowledge Base article #Q124899). +Thanks to Paul O'Fallon for that one. + +Added more (probably incomplete) support for unix ODBC in Makefile.PL + +Increased default SQL_COLUMN_DISPLAY_SIZE and SQL_COLUMN_LENGTH to 2000 +for drivers that don't provide a way to query them dynamically. Was 100! + +When fetch reaches the end-of-data it automatically frees the internal +ODBC statement handle and marks the DBI statement handle as inactive +(thus an explicit 'finish' is *not* required). + +Also: + +LongTruncOk for Oracle ODBC (where fbh->datalen < 0) + +Added tracing into SQLBindParameter (help diagnose oracle odbc bug) + +Fixed/worked around bug/result from Latest Oracle ODBC driver where in +SQLColAttribute cbInfoValue was changed to 0 to indicate fDesc had a value + +Added work around for compiling w/ActiveState PRK (PERL_OBJECT) + +Updated tests to include date insert and type + +Added more "backup" SQL_xxx types for tests + +Updated bind test to test binding select + + NOTE: bind insert fails on Paradox driver (don't know why) + +Added support for: (see notes below) + + SQLGetInfo via $dbh->func(xxx, GetInfo) + SQLGetTypeInfo via $dbh->func(xxx, GetTypeInfo) + SQLDescribeCol via $sth->func(colno, DescribeCol) + SQLColAttributes via $sth->func(xxx, colno, ColAttributes) + SQLGetFunctions via $dbh->func(xxx, GetFunctions) + SQLColumns via $dbh->func(catalog, schema, table, column, 'columns') + +Fixed $DBI::err to reflect the real ODBC error code +which is a 5 char code, not necessarily numeric. + +Fixed fetches when LongTruncOk == 1. + +Updated tests to pass more often (hopefully 100% <G>) + +Updated tests to test long reading, inserting and the LongTruncOk attribute. + +Updated tests to be less driver specific. + +They now rely upon SQLGetTypeInfo I<heavily> in order to create the tables. +The test use this function to "ask" the driver for the name of the SQL type +to correctly create long, varchar, etc types. For example, in Oracle the +SQL_VARCHAR type is VARCHAR2, while MS Access uses TEXT for the SQL Name. +Again, in Oracle the SQL_LONGVARCHAR is LONG, while in Access it's MEMO. +The tests currently handle this correctly (at least with Access and Oracle, +MS SQL server will be tested also). + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/ODBC/FAQ.pm b/Master/tlpkg/tlperl/lib/DBD/ODBC/FAQ.pm new file mode 100755 index 00000000000..15714ea92f4 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/ODBC/FAQ.pm @@ -0,0 +1,681 @@ +=head1 NAME + +DBD::ODBC::FAQ - Frequently Asked Questions for DBD::ODBC + +=head1 SYNOPSIS + + perldoc DBD::ODBC::FAQ + +=head1 VERSION + +($Revision: 13204 $) + +=head1 QUESTIONS + +=head2 How do I read more than N characters from a Memo | BLOB | LONG field? + +See LongReadLen in the DBI docs. + +Example: + + $dbh->{LongReadLen} = 20000; + $sth = $dbh->prepare("select long_col from big_table"); + $sth->execute; + etc + +=head2 What is DBD::ODBC? + +=head2 Why can't I connect? + +=head2 Do I need an ODBC driver? + +=head2 What is the ODBC driver manager? + +These, general questions lead to needing definitions. + +=over 4 + +=item ODBC Driver + +The ODBC Driver is the driver that the ODBC manager uses to connect +and interact with the RDBMS. You B<DEFINITELY> need this to connect to +any database. For Win32, they are plentiful and installed with many +applications. For Linux/Unix, you can find a fairly comprehensive list +at L<http://www.unixodbc.org/drivers.html>. + +=item ODBC Driver Manager + +The ODBC driver manager is the interface between an ODBC application +(DBD::ODBC in this case) and the ODBC driver. The driver manager +principally provides the ODBC API so ODBC applications may link with a +single shared object (or dll) and be able to talk to a range of ODBC +drivers. At run time the application provides a connection string +which defines the ODBC data source it wants to connect to and this in +turn defines the ODBC driver which will handle this data source. The +driver manager loads the requested ODBC driver and passes all ODBC API +calls on to the driver. In this way, an ODBC application can be built +and distributed without knowing which ODBC driver it will be using. + +However, this is a rather simplistic description of what the driver +manager does. The ODBC driver manager also: + +* Controls a repository of installed ODBC drivers (on UNIX this is the +file odbcinst.ini). + +* Controls a repository of defined ODBC data sources (on UNIX these are +the files odbc.ini and .odbc.ini). + +* Provides the ODBC driver APIs (SQLGetPrivateProfileString and +SQLWritePrivateProfileString) to read and write ODBC data source +attributes. + +* Handles ConfigDSN which the driver exports to configure data +sources. + +* Provides APIs to install and uninstall drivers (SQLInstallDriver). + +* Maps ODBC versions e.g. so an ODBC 2.0 application can work with an +ODBC 3.0 driver and vice versa. + +* Maps ODBC states between different versions of ODBC. + +* Provides a cursor library for drivers which only support +forward-only cursors. + +* Provides SQLDataSources and SQLDrivers so an application can find +out what ODBC drivers are installed and what ODBC data sources are +defined. + +* Provides an ODBC administrator which driver writers can use to +install ODBC drivers and users can use to define ODBC data sources. + +The ODBC Driver Manager is the piece of software which interacts with +the drivers for the application. It "hides" some of the differences +between the drivers (i.e. if a function call is not supported by a +driver, it 'hides' that and informs the application that the call is +not supported. DBD::ODBC needs this to talk to drivers. + +Under Win32, you usually get the ODBC Driver Manager as part of the +OS. Under Unix/Linux you may have to find and build the driver +manager yourself. The two main driver managers for Unix are unixODBC +(L<http://www.unixodbc.org>) and iODBC (L<http://www.iodbc.org>). + +B<It is strongly advised you get an ODBC Driver Manager before trying to +build DBD::ODBC unless you intend linking DBD::ODBC directly with your +driver.> + +For a reasonable description of ODBC on Unix/Linux see +L<http://www.easysoft.com/developer/interfaces/odbc/linux.html> + +=item DBD::ODBC + +DBD::ODBC uses the driver manager to talk to the ODBC driver(s) on +your system. You need both a driver manager and driver installed and +tested before working with DBD::ODBC. You need to have a DSN (see +below) configured and B<TESTED> before being able to test DBD::ODBC. + +=item DSN (Data Source Name) + + +The DSN is a way of referring to a particular driver and database by +any name you wish. The DSN is usually a key to a list of attributes +the ODBC driver needs to connect to the database (e.g. ip address and +port) but there is always a key which names the driver so the driver +manager knows which driver to use with which data source. Do no +confuse DSNs with ODBC connection strings or DBI's "$data_source" (the +first argument to L<DBI/connect>. + +The $data_source argument to DBI is composed of 'dbi:DRIVER:something_else' +where DRIVER is the name of the DBD driver you want to use (ODBC of +course for DBD::ODBC). The "something_else" for DBD::ODBC can be a DSN +name or it can be a normal ODBC connection string. + +An ODBC connection string consists of attribute/value pairs separated +with semicolons (;). You can replace "something_else" above with a +normal ODBC connection string but as a special case for DBD::ODBC you can +just use the DSN name without the usual ODBC connection string prefix +of "DSN=dsn_name". + +e.g. + +=over + +=item dbi:ODBC:DSN=fred + +ODBC connection string using fred DSN + +=item dbi:ODBC:fred + +Same as above (a special case). + +=item dbi:ODBC:Driver={blah blah driver};Host=1.2.3.4;Port=1000; + +This is known as a DSN-less connection string for obvious reasons. + +=back + +=back + +=head2 Where do I get an ODBC driver manager for Unix/Linux? + +DBD::ODBC used to come bundled with a driver manager but this became +inconvenient when the driver manager was updated. + +The two main ODBC Driver Managers for Unix are unixODBC (L<http://www.unixodbc/org>) and iODBC (L<http://www.iodbc.org>). + +If you are running a packaged Linux like RedHat, Ubuntu, Fedora, Suse +etc etc you'll usually find it packaged with unixODBC and using the +package manager to install it is fairly straight forward. However, +make sure that if the driver manager is split into multiple packages +you install the development package as well as that contains the C +header files required by DBD::ODBC. + +If you cannot find an ODBC Driver Manager package for your OS you can +download the source tar files for either of the driver managers above +and build it yourself. + +=head2 How do I access a MS SQL Server database from Linux/UNIX? + +You have loads of choices (in no particular order): + +* using DBI::ProxyServer or DBD::Gofer. You'll need the former if you + use transactions. + +* using a commercial ODBC Driver or bridge like the ones from Easysoft +or Openlink. + +* using FreeTDS an open source TDS library which includes an ODBC Driver. + +* using DBD::Sybase and Sybase libraries. + +=head2 How do I access a MS-Access database from Linux? + +There are basically two choices: + +* a commercial ODBC Bridge like the ones from Easysoft or OpenLink. + +* using mdbtools although as of writing it has not been updated since +June 2004, only provides read access and seems to be a little buggy. + +=head2 Almost all of my tests for DBD::ODBC fail. They complain about not being able to connect or the DSN is not found. + +Please, please test your configuration of ODBC and driver before +trying to test DBD::ODBC. Most of the time, this stems from the fact +that the DSN (or ODBC) is not configured properly. unixODBC comes with +a small program isql and iODBC comes with odbctest and you should use +these to test your ODBC configuration is working properly first. + +=head2 I'm attempting to bind a Long Var char (or other specific type) +and the binding is not working. + +The code I'm using is below: + + $sth->bind_param(1, $str, $DBI::SQL_LONGVARCHAR); + ^^^ + +The problem is that DBI::SQL_LONGVARCHAR is not the same as +$DBI::SQL_LONGVARCHAR and that $DBI::SQL_LONGVARCHAR is an error! + +It should be: + + $sth->bind_param(1, $str, DBI::SQL_LONGVARCHAR); + +=head2 Does DBD::ODBC support Multiple Active Statements? + +Multiple Active Statements (MAS) are concurrent statements created +from the same database handle which both have pending actions on them +(e.g. they both have executed a select statement but not retrieved all +the available rows yet). + +DBD::ODBC does support MAS but whether you can actually use MAS is +down to the ODBC Driver. + +By default MS SQL Server did not used to support multiple active +statements if any of them were select statements. You could get around +this (with caution) by changing to a dynamic cursor. There is a "hack" +in DBD::ODBC which can be used to enable MAS but you have to fully +understand the implications of doing so(see +L</DBD/ODBC/odbc_SQL_ROWSET_SIZE> and L</DBD/ODBC/odbc_cursortype>). + +In MS SQL Server 2005, there is a new thing called MARS (Multiple +Active Result Sets) which allows multiple active select statements but +it has some nasty implications if you are also doing transactions. To +enable MARS from DBD::ODBC add "MARS_Connection=Yes" to the connection +string as in: + + $h->DBI->connect('dbi:ODBC:DSN=mydsn;MARS_Connection=Yes;'); + +For other drivers it depends. I believe various Oracle ODBC drivers do +support multiple active statements as myodbc does. + +Think carefully before using multiple active statements. It is +probably not portable and there is nearly always a better way of doing +it. + +If anyone wants to report success with a particular driver and +multiple active statements I will collect them here. + +=head2 Why do I get "Datetime field overflow" when attempting to insert a +date into Oracle? + +If you are using the Oracle or Microsoft ODBC drivers then you may get +the following error when inserting dates into an Oracle database: + + [Oracle][ODBC]Datetime field overflow. (SQL-22008) + +If you do then check v$nls_parameters and v$parameter to see if you are +using a date format containing the RR format. e.g., + + select * from v$nls_parameters where parameter = 'NLS_DATE_FORMAT' + select * from v$parameter where name = 'nls_date_format' + +If you see a date format like 'DD-MON-RR' (e.g., contains an RR) then +all I can suggest is you change the date format for your session as I +have never been able to bind a date using this format. You can do this +with: + + alter session set nls_date_format='YYYY/MM/DD' + +and use any format you like but keep away from 'RR'. + +You can find some test code in the file examples/rtcpan_28821.pl which +demonstrates this problem. This was originally a rt.cpan issue which +can be found at L<http://rt.cpan.org/Ticket/Display.html?id=28821>. + +As an aside, if anyone is reading this and can shed some light on the problem +I'd love to hear from you. The technical details are: + + create table rtcpan28821 (a date) + insert into rtcpan28821 values('23-MAR-62') fails + +Looking at the ODBC trace, SQLDescribeParam returns: + + data type: 93, SQL_TYPE_TIMESTAMP + size: 19 + decimal digits: 0 + nullable: 1 + +and DBD::ODBC calls SQLBindParameter with: + + ValueType: SQL_C_CHAR + ParameterType: SQL_TYPE_TIMESTAMP + ColumnSize: 9 + DecimalDigits: 0 + Data: 23-MAR-62 + BufferLength: 9 + +=head2 Why do my SQL Server temporary objects disappear? + +If you are creating temporary objects (e.g., temporary tables) in +SQL Server you find they have disappeared when you attempt to use +them. Temporary objects only have a lifetime of the session they +are created in but in addition, they cannot be created using +prepare/execute. e.g., the following fails: + + $s = $h->prepare('select * into #tmp from mytable'); + $s->execute; + $s = $h->selectall_arrayref('select * from #tmp'); + +with "Invalid object name '#tmp'". Your should read +L<http://technet.microsoft.com/en-US/library/ms131667.aspx> which +basically says I<Prepared statements cannot be used to create +temporary objects on SQL Server 2000 or later...>. The proper way to +avoid this is to use the C<do> method but if you cannot do that then +you need to add the L</odbc_exec_direct> attribute to your prepare as +follows: + + my $s = $h->prepare('select * into #tmp from mytable', + { odbc_exec_direct => 1}); + +See L</odbc_exec_direct>. + +=head2 Why cannot I connect to my data source on Windows 64? + +If you are running a 32bit Perl on a 64bit Windows machine you will +need to be aware there are two ODBC administrators and you need to +create your DSNs with the right one. The ODBC Administrator you get to +from Control Panel, Administrative Tools, Data Sources is the 64bit +one and data sources created here will not be visible or useable from +32bit applications. The ODBC administrator you need to use for 32bit +applications can be found at X:\windows\syswow64\odbcad32.exe. + +=head2 How do I use DBD::ODBC with web servers under Win32. + +=over 4 + +=item General Commentary re web database access + +This should be a DBI faq, actually, but this has somewhat of an +Win32/ODBC twist to it. + +Typically, the Web server is installed as an NT service or a Windows +95/98 service. This typically means that the web server itself does +not have the same environment and permissions the web developer does. +This situation, of course, can and does apply to Unix web servers. +Under Win32, however, the problems are usually slightly different. + +=item Defining your DSN -- which type should I use? + +Under Win32 take care to define your DSN as a system DSN, not as a user +DSN. The system DSN is a "global" one, while the user is local to a +user. Typically, as stated above, the web server is "logged in" as a +different user than the web developer. This helps cause the situation +where someone asks why a script succeeds from the command line, but +fails when called from the web server. + +=item Defining your DSN -- careful selection of the file itself is important! + +For file based drivers, rather than client server drivers, the file +path is VERY important. There are a few things to keep in mind. This +applies to, for example, MS Access databases. + +1) If the file is on an NTFS partition, check to make sure that the Web +B<service> user has permissions to access that file. + +2) If the file is on a remote computer, check to make sure the Web +B<service> user has permissions to access the file. + +3) If the file is on a remote computer, try using a UNC path the file, +rather than a X:\ notation. This can be VERY important as services +don't quite get the same access permissions to the mapped drive letters +B<and>, more importantly, the drive letters themselves are GLOBAL to +the machine. That means that if the service tries to access Z:, the Z: +it gets can depend upon the user who is logged into the machine at the +time. (I've tested this while I was developing a service -- it's ugly +and worth avoiding at all costs). + +Unfortunately, the Access ODBC driver that I have does not allow one to +specify the UNC path, only the X:\ notation. There is at least one way +around that. The simplest is probably to use Regedit and go to +(assuming it's a system DSN, of course) +HKEY_LOCAL_USERS\SOFTWARE\ODBC\"YOUR DSN" You will see a few settings +which are typically driver specific. The important value to change for +the Access driver, for example, is the DBQ value. That's actually the +file name of the Access database. + +=back + +=head2 How do I connect without DSN + +The ability to connect without a full DSN was introduced in version 0.21. + +Example (using MS Access): + + my $DSN = 'driver=Microsoft Access Driver(*.mdb);dbq=\\\\cheese\\g$\\perltest.mdb'; + my $dbh = DBI->connect("dbi:ODBC:$DSN", '','') or die "$DBI::errstr\n"; + +The above sample uses Microsoft's UNC naming convention to point to +the MSAccess file (\\cheese\g$\perltest.mdb). The dbq parameter tells +the access driver which file to use for the database. + +Example (using MSSQL Server): + + my $DSN = 'driver={SQL Server};Server=server_name;database=database_name;uid=user;pwd=password;'; + my $dbh = DBI->connect("dbi:ODBC:$DSN") or die "$DBI::errstr\n"; + +=head2 Why do I get data truncated error from SQL Server when inserting with parameters? + +DBD::ODBC attempts to use the ODBC API C<SQLDescribeParam> to obtain +information about parameters in parameterised SQL. e.g., + + insert into mytable (column1) values(?) + +The C<?> is a parameter marker. You supply the parameter value (in +this case parameter 1) with a call to the C<bind_param> method or by +adding the parameter to the C<execute> method call. When DBD::ODBC +sees the parameter marker in the SQL it will call C<SQLDescribeParam> +to obtain information about the parameter size and type etc (assuming +your ODBC driver supports C<SQLDescribeParam>). + +When you call C<SQLDescribeParam> in the MS SQL Server ODBC driver the +driver will scan your SQL attempting to discover the columns in your +database the parameters align with. e.g., in the above case the +parameter to be bound is linked with "column1" so C<SQLDescribeParam> +should return information about "column1". The SQL Server ODBC driver +finds information about "column1" (in this example) by creating SQL such +as: + + select column1 from mytable where 1 = 2 + +then looking at the column details. Unfortunately, some SQL confuses +SQL Server and it will generate SQL to find out about your parameters +which examines the wrong columns and on rare occasions it may even +generate totally incorrect SQL. The test case F<t/rt_39841.t> +domonstrates a couple of these. + +The upshot of this is that DBD::ODBC is sometimes lied to about +parameters and will then bind your parameters incorrectly. This can lead +to later errors when C<execute> is called. This happens most commonly +when using parameters in SQL with sub-selects. For example: + + create table one (a1 integer, a2 varchar(10)) + create table two (b1 varchar(10), b2 varchar(20)) + + insert into one values(1, 'aaaaaaaaaa') + insert into two values('aaaaaaaaaa','bbbbbbbbbbbbbbbbbbbb') + + select b1, (select a2 from one where a2 = b1) from two where b2 = ? + + param 1 bound as 'bbbbbbbbbbbbbbbbbbbb' + +Clearly in this example, the one and only parameter is for two.b2 which +is a varchar(20) but when SQL Server rearranges your SQL to describe +the parameter it issues: + + select a2 from one where 1 = 0 + +and DBD::ODBC is told the parameter is a VARCHAR(10). In DBD::ODBC +1.17 this would then lead to a data truncation error because parameter +1 would be bound as 'bbbbbbbbbbbbbbbbbbbb' but with a column size of +10 as that is what SQLDescribeParam returned. DBD::ODBC 1.17_1 (and +later) works around this problem for VARCHAR columns because it is +obvious a VARCHAR parameter of length 20 cannot have a column size of +10 so the column size is increased to the length of the parameter. + +However, a more difficult error can occur when SQL Server describes +the parameter as totally the wrong type. The first example in +F<t/rt_39841.t> demonstrates this. SQL Server describes a VARCHAR +parameter as an integer which DBD::ODBC has little choice to believe +but when something like 'bbbbbbbbbb' is bound as an integer, SQL +Server will then return an error like "invalid value for cast +specification". The only way around this is to specifically name the +parameter type. e.g., + + create table one (a1 integer, a2 varchar(20)) + create table two (b1 double precision, b2 varchar(8)) + + insert into one values(1, 'aaaaaaaaaa') + insert into two values(1, 'bbbbbbbb') + + select b1, ( select a2 from one where a1 = b1 ) from two where b2 = ? + + param 1 bound as 'bbbbbbbbbb' + +Clearly parameter 1 is a varchar(8) but SQL Server rearranges the SQL to: + + select a1 from one where 1 = 2 + +when it should have run + + select b2 from two where 1 = 2 + +As a result parameter 1 is described as an integer and this leads to the +problem. To workaround this problem you would need to bind parameter 1 +naming the SQL type of the parameter using something like: + + use DBI qw(:sql_types); + + bind_param(1, 'bbbbbbbbbb', SQL_VARCHAR); + +as omitting SQL_VARCHAR will cause DBD::ODBC to use the type +C<SQLDescribeParam> returned. + +=head2 Why do I get invalid value for cast specification (22018) from SQL Server when inserting with parameters? + +See L<http://support.microsoft.com/kb/269011> on the microsoft web site for +a bug you may have hit. + +In Perl the most common reason for this is that you have bound column +data in SQL which does not match the column type in the database and +the ODBC driver cannot perform the necessary conversion. DBD::ODBC +mostly binds all column data as strings and lets the ODBC driver +convert the string to the right column type. If you supply a string +which cannot be converted to the native column type you will get this +error e.g., if you attempt to bind a non-datetime string to a datetime +column or a non-numeric string to a numeric column. + +=head2 Why do I get strange results with SQL Server and named parameters? + +If you are using a MS SQL Server driver and named parameters to +procedures be very careful to use then in the sasme order they are +defined in the procedure. i.e., if you have a procedure like this: + + create procedure test + @param1 varchar(50), + @param2 smallint + as + begin + .. + end + +then ensure if you call it using named parameters you specify them in +the same order they are declared: + + exec test @param1=?,@param2=? + +and not + + exec test @param2=?,@param1=? + +The reason for this is that all SQL Server drivers we have seen +describe procedures parameters in the order they are declared and +ignore the order they are used in the SQL. If you specify them out of +order DBD::ODBC will get details on p1 which are really for p2 +etc. This can lead to data truncation errors and all sort of other +problems it is impossible for DBD::ODBC spot or workaround. + +=head2 Why do I get "Numeric value out of range" when binding dates in Oracle? + +Also see "Why do I get "Datetime field overflow" when attempting to insert a +date into Oracle?". + +Here is some example code binding dates; some work, some don't, see comments. + + use DBI; + use strict; + + # table is "create table martin (a date, b int)" + + my $h = DBI->connect; + + $h->do(q{alter session set nls_date_format='DD-MON-YY'}); + + my $s = $h->prepare(q{select * from v$nls_parameters where parameter = 'NLS_DATE_FORMAT'}); + $s->execute; + print DBI::dump_results($s); + + my $date = '30-DEC-99'; + my $dateodbc = qq/{ d '1999-12-30'}/; + + # the following works ok - resulting in 2099-12-30 being inserted + $h->do(qq{insert into martin values ('$date', 1)}); + + # the following works resulting in 1999-12-30 being inserted + $h->do(qq{insert into martin values ($dateodbc, 2)}); + + # fails + eval { + my $s = $h->prepare(q{insert into martin values(?,3)}); + $s->bind_param(1, $date); + # fails + # Numeric value out of range: invalid character in date/time string (SQL-22003) + $s->execute; + }; + + # works resulting in 2099-12-30 being inserted + eval { + my $s = $h->prepare(q{insert into martin values(?,4)}); + $s->bind_param(1, $date, DBI::SQL_VARCHAR); + $s->execute; + }; + + # works resulting in 1999-12-30 being inserted + eval { + my $s = $h->prepare(q{insert into martin values(?,5)}); + $s->bind_param(1, $dateodbc); + $s->execute; + }; + +In general, when using an ODBC driver you should use the ODBC syntax +for dates, times and timestamps as those are the only formats an ODBC +has to support. + +In the above case with Oracle, the date parameter is described as a +SQL_TYPE_DATE SQL type so by default DBD::ODBC binds your parameter as +a SQL_TYPE_DATE. If you use '30-DEC-99' then that means the C type is +SQL_CHAR and the SQL type is SQL_TYPE_DATE so the driver is forced to +parse the date before sending it to Oracle (that would mean knowing +what your NLS_DATE_FORMAT is and it would also mean knowing all the +magic special characters Oracle can use to define date formats). + +If you override the bind type to SQL_VARCHAR then the driver sees +SQL_CHAR => SQL_VARCHAR, nothing to do and hence Oracle itself does +the translation - that is why the SQL_VARCHAR works. + +=head2 CWB0111 error with System i Access ODBC Driver + +The full error this relates to is: + +[unixODBC][IBM][System i Access ODBC Driver]Column 1: CWB0111 - A buffer passed to a system call is too small to hold return data (SQL-22018) + +The iSeries ODBC driver looks at your environment and decides that if +you have UTF-8 set it will encode data returned from the database in +UTF-8. + +e.g., LC_CTYPE=fr_FR.UTF-8 + +If you then select data from your database containing non-ASCII +characters e.g., accented characters the iSeries ODBC driver will +encode the data in UTF-8. UTF-8 encoding increases the size of strings +containing characters with codes > 127. + +DBD::ODBC uses SQLDescribeCol and SQLColAttribute ODBC calls to work +out the size of the columns you are retrieving and allocate space for +them. As the ODBC API specifies the sizes returned are on bytes when +the driver says a string column is N is size DBD::ODBC allocates N+1 +(for NULL) bytes. If the driver then encodes N characters in UTF-8 the +size will be too big to fit into DBD::ODBC's buffer and you will get +the error above. This is most often seen with char(N) columns as the +trailing spaces are returned by the driver so you are bound to +overflow the buffer as soon as a non-ASCII characters is found. + +What are your possible solutions? + +You can attempt to trim the data to leave room for the encoding. +e.g., RTRIM(column_name) in your select on char(N) columns but this is +a poor choice and only going to work in a few circumstances. + +You can increase the sizes of your columns in the database but this is +another hack. + +You can rearrange your SQL to cast the columns in question to larger +types. + +Remove UTF-8 from your locale. This is the best solution as it is +guaranteed to stop this error but if you have data which cannot be +represented in 8 bit characters this won't help. + +=head1 AUTHOR + +Parts of this document were written by Tim Bunce, +Jeff Urlwin and Martin J. Evans. + +=head1 LICENSE + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.8.7 or, +at your option, any later version of Perl 5 you may have available. + + +=cut + diff --git a/Master/tlpkg/tlperl/lib/DBD/Pg.pm b/Master/tlpkg/tlperl/lib/DBD/Pg.pm new file mode 100755 index 00000000000..0218e222a07 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Pg.pm @@ -0,0 +1,4208 @@ +# -*-cperl-*- +# $Id: Pg.pm 13752 2010-01-20 19:19:06Z turnstep $ +# +# Copyright (c) 2002-2010 Greg Sabino Mullane and others: see the Changes file +# Portions Copyright (c) 2002 Jeffrey W. Baker +# Portions Copyright (c) 1997-2001 Edmund Mergl +# Portions Copyright (c) 1994-1997 Tim Bunce +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + + +use strict; +use warnings; +use 5.006001; + +{ + package DBD::Pg; + + use version; our $VERSION = qv('2.16.1'); + + use DBI (); + use DynaLoader (); + use Exporter (); + use vars qw(@ISA %EXPORT_TAGS $err $errstr $sqlstate $drh $dbh $DBDPG_DEFAULT @EXPORT); + @ISA = qw(DynaLoader Exporter); + + + %EXPORT_TAGS = + ( + async => [qw(PG_ASYNC PG_OLDQUERY_CANCEL PG_OLDQUERY_WAIT)], + pg_types => [qw( + PG_ABSTIME PG_ABSTIMEARRAY PG_ACLITEM PG_ACLITEMARRAY PG_ANY + PG_ANYARRAY PG_ANYELEMENT PG_ANYENUM PG_ANYNONARRAY PG_BIT + PG_BITARRAY PG_BOOL PG_BOOLARRAY PG_BOX PG_BOXARRAY + PG_BPCHAR PG_BPCHARARRAY PG_BYTEA PG_BYTEAARRAY PG_CHAR + PG_CHARARRAY PG_CID PG_CIDARRAY PG_CIDR PG_CIDRARRAY + PG_CIRCLE PG_CIRCLEARRAY PG_CSTRING PG_CSTRINGARRAY PG_DATE + PG_DATEARRAY PG_FLOAT4 PG_FLOAT4ARRAY PG_FLOAT8 PG_FLOAT8ARRAY + PG_GTSVECTOR PG_GTSVECTORARRAY PG_INET PG_INETARRAY PG_INT2 + PG_INT2ARRAY PG_INT2VECTOR PG_INT2VECTORARRAY PG_INT4 PG_INT4ARRAY + PG_INT8 PG_INT8ARRAY PG_INTERNAL PG_INTERVAL PG_INTERVALARRAY + PG_LANGUAGE_HANDLER PG_LINE PG_LINEARRAY PG_LSEG PG_LSEGARRAY + PG_MACADDR PG_MACADDRARRAY PG_MONEY PG_MONEYARRAY PG_NAME + PG_NAMEARRAY PG_NUMERIC PG_NUMERICARRAY PG_OID PG_OIDARRAY + PG_OIDVECTOR PG_OIDVECTORARRAY PG_OPAQUE PG_PATH PG_PATHARRAY + PG_PG_ATTRIBUTE PG_PG_CLASS PG_PG_PROC PG_PG_TYPE PG_POINT + PG_POINTARRAY PG_POLYGON PG_POLYGONARRAY PG_RECORD PG_RECORDARRAY + PG_REFCURSOR PG_REFCURSORARRAY PG_REGCLASS PG_REGCLASSARRAY PG_REGCONFIG + PG_REGCONFIGARRAY PG_REGDICTIONARY PG_REGDICTIONARYARRAY PG_REGOPER PG_REGOPERARRAY + PG_REGOPERATOR PG_REGOPERATORARRAY PG_REGPROC PG_REGPROCARRAY PG_REGPROCEDURE + PG_REGPROCEDUREARRAY PG_REGTYPE PG_REGTYPEARRAY PG_RELTIME PG_RELTIMEARRAY + PG_SMGR PG_TEXT PG_TEXTARRAY PG_TID PG_TIDARRAY + PG_TIME PG_TIMEARRAY PG_TIMESTAMP PG_TIMESTAMPARRAY PG_TIMESTAMPTZ + PG_TIMESTAMPTZARRAY PG_TIMETZ PG_TIMETZARRAY PG_TINTERVAL PG_TINTERVALARRAY + PG_TRIGGER PG_TSQUERY PG_TSQUERYARRAY PG_TSVECTOR PG_TSVECTORARRAY + PG_TXID_SNAPSHOT PG_TXID_SNAPSHOTARRAY PG_UNKNOWN PG_UUID PG_UUIDARRAY + PG_VARBIT PG_VARBITARRAY PG_VARCHAR PG_VARCHARARRAY PG_VOID + PG_XID PG_XIDARRAY PG_XML PG_XMLARRAY + )] + ); + + { + package DBD::Pg::DefaultValue; + sub new { my $self = {}; return bless $self, shift; } + } + $DBDPG_DEFAULT = DBD::Pg::DefaultValue->new(); + Exporter::export_ok_tags('pg_types', 'async'); + @EXPORT = qw($DBDPG_DEFAULT PG_ASYNC PG_OLDQUERY_CANCEL PG_OLDQUERY_WAIT PG_BYTEA); + + require_version DBI 1.52; + + bootstrap DBD::Pg $VERSION; + + $err = 0; # holds error code for DBI::err + $errstr = ''; # holds error string for DBI::errstr + $sqlstate = ''; # holds five character SQLSTATE code + $drh = undef; # holds driver handle once initialized + + ## These two methods are here to allow calling before connect() + sub parse_trace_flag { + my ($class, $flag) = @_; + return (0x7FFFFF00 - 0x08000000) if $flag eq 'DBD'; ## all but the prefix + return 0x01000000 if $flag eq 'pglibpq'; + return 0x02000000 if $flag eq 'pgstart'; + return 0x04000000 if $flag eq 'pgend'; + return 0x08000000 if $flag eq 'pgprefix'; + return 0x10000000 if $flag eq 'pglogin'; + return 0x20000000 if $flag eq 'pgquote'; + return DBI::parse_trace_flag($class, $flag); + } + sub parse_trace_flags { + my ($class, $flags) = @_; + return DBI::parse_trace_flags($class, $flags); + } + + sub CLONE { + $drh = undef; + return; + } + + ## Deprecated + sub _pg_use_catalog { + return 'pg_catalog.'; + } + + sub driver { + return $drh if defined $drh; + my($class, $attr) = @_; + + $class .= '::dr'; + + $drh = DBI::_new_drh($class, { + 'Name' => 'Pg', + 'Version' => $VERSION, + 'Err' => \$DBD::Pg::err, + 'Errstr' => \$DBD::Pg::errstr, + 'State' => \$DBD::Pg::sqlstate, + 'Attribution' => "DBD::Pg $VERSION by Greg Sabino Mullane and others", + }); + + + DBD::Pg::db->install_method('pg_cancel'); + DBD::Pg::db->install_method('pg_endcopy'); + DBD::Pg::db->install_method('pg_getline'); + DBD::Pg::db->install_method('pg_getcopydata'); + DBD::Pg::db->install_method('pg_getcopydata_async'); + DBD::Pg::db->install_method('pg_notifies'); + DBD::Pg::db->install_method('pg_putcopydata'); + DBD::Pg::db->install_method('pg_putcopyend'); + DBD::Pg::db->install_method('pg_ping'); + DBD::Pg::db->install_method('pg_putline'); + DBD::Pg::db->install_method('pg_ready'); + DBD::Pg::db->install_method('pg_release'); + DBD::Pg::db->install_method('pg_result'); + DBD::Pg::db->install_method('pg_rollback_to'); + DBD::Pg::db->install_method('pg_savepoint'); + DBD::Pg::db->install_method('pg_server_trace'); + DBD::Pg::db->install_method('pg_server_untrace'); + DBD::Pg::db->install_method('pg_type_info'); + + DBD::Pg::st->install_method('pg_cancel'); + DBD::Pg::st->install_method('pg_result'); + DBD::Pg::st->install_method('pg_ready'); + + DBD::Pg::db->install_method('pg_lo_creat'); + DBD::Pg::db->install_method('pg_lo_open'); + DBD::Pg::db->install_method('pg_lo_write'); + DBD::Pg::db->install_method('pg_lo_read'); + DBD::Pg::db->install_method('pg_lo_lseek'); + DBD::Pg::db->install_method('pg_lo_tell'); + DBD::Pg::db->install_method('pg_lo_close'); + DBD::Pg::db->install_method('pg_lo_unlink'); + DBD::Pg::db->install_method('pg_lo_import'); + DBD::Pg::db->install_method('pg_lo_export'); + + return $drh; + + } ## end of driver + + + 1; + +} ## end of package DBD::Pg + + +{ + package DBD::Pg::dr; + + use strict; + + ## Returns an array of formatted database names from the pg_database table + sub data_sources { + + my $drh = shift; + my $attr = shift || ''; + ## Future: connect to "postgres" when the minimum version we support is 8.0 + my $connstring = 'dbname=template1'; + if ($ENV{DBI_DSN}) { + ($connstring = $ENV{DBI_DSN}) =~ s/dbi:Pg://; + } + if (length $attr) { + $connstring .= ";$attr"; + } + + my $dbh = DBD::Pg::dr::connect($drh, $connstring) or return undef; + $dbh->{AutoCommit}=1; + my $SQL = 'SELECT pg_catalog.quote_ident(datname) FROM pg_catalog.pg_database ORDER BY 1'; + my $sth = $dbh->prepare($SQL); + $sth->execute() or die $DBI::errstr; + $attr and $attr = ";$attr"; + my @sources = map { "dbi:Pg:dbname=$_->[0]$attr" } @{$sth->fetchall_arrayref()}; + $dbh->disconnect; + return @sources; + } + + + sub connect { ## no critic (ProhibitBuiltinHomonyms) + my ($drh, $dbname, $user, $pass, $attr) = @_; + + ## Allow "db" and "database" as synonyms for "dbname" + $dbname =~ s/\b(?:db|database)\s*=/dbname=/; + + my $name = $dbname; + if ($dbname =~ m{dbname\s*=\s*[\"\']([^\"\']+)}) { + $name = "'$1'"; + $dbname =~ s/\"/\'/g; + } + elsif ($dbname =~ m{dbname\s*=\s*([^;]+)}) { + $name = $1; + } + + $user = defined($user) ? $user : defined $ENV{DBI_USER} ? $ENV{DBI_USER} : ''; + $pass = defined($pass) ? $pass : defined $ENV{DBI_PASS} ? $ENV{DBI_PASS} : ''; + + my ($dbh) = DBI::_new_dbh($drh, { + 'Name' => $dbname, + 'Username' => $user, + 'CURRENT_USER' => $user, + }); + + # Connect to the database.. + DBD::Pg::db::_login($dbh, $dbname, $user, $pass) or return undef; + + my $version = $dbh->{pg_server_version}; + $dbh->{private_dbdpg}{version} = $version; + + if ($attr) { + if ($attr->{dbd_verbose}) { + $dbh->trace('DBD'); + } + } + + return $dbh; + } + + sub private_attribute_info { + return { + }; + } + +} ## end of package DBD::Pg::dr + + +{ + package DBD::Pg::db; + + use DBI qw(:sql_types); + + use strict; + + sub parse_trace_flag { + my ($h, $flag) = @_; + return DBD::Pg->parse_trace_flag($flag); + } + + sub prepare { + my($dbh, $statement, @attribs) = @_; + + return undef if ! defined $statement; + + # Create a 'blank' statement handle: + my $sth = DBI::_new_sth($dbh, { + 'Statement' => $statement, + }); + + DBD::Pg::st::_prepare($sth, $statement, @attribs) || 0; + + return $sth; + } + + sub last_insert_id { + + my ($dbh, $catalog, $schema, $table, $col, $attr) = @_; + + ## Our ultimate goal is to get a sequence + my ($sth, $count, $SQL, $sequence); + + ## Cache all of our table lookups? Default is yes + my $cache = 1; + + ## Catalog and col are not used + $schema = '' if ! defined $schema; + $table = '' if ! defined $table; + my $cachename = "lii$table$schema"; + + if (defined $attr and length $attr) { + ## If not a hash, assume it is a sequence name + if (! ref $attr) { + $attr = {sequence => $attr}; + } + elsif (ref $attr ne 'HASH') { + $dbh->set_err(1, 'last_insert_id must be passed a hashref as the final argument'); + return undef; + } + ## Named sequence overrides any table or schema settings + if (exists $attr->{sequence} and length $attr->{sequence}) { + $sequence = $attr->{sequence}; + } + if (exists $attr->{pg_cache}) { + $cache = $attr->{pg_cache}; + } + } + + if (! defined $sequence and exists $dbh->{private_dbdpg}{$cachename} and $cache) { + $sequence = $dbh->{private_dbdpg}{$cachename}; + } + elsif (! defined $sequence) { + ## At this point, we must have a valid table name + if (! length $table) { + $dbh->set_err(1, 'last_insert_id needs at least a sequence or table name'); + return undef; + } + my @args = ($table); + ## Make sure the table in question exists and grab its oid + my ($schemajoin,$schemawhere) = ('',''); + if (length $schema) { + $schemajoin = "\n JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace)"; + $schemawhere = "\n AND n.nspname = ?"; + push @args, $schema; + } + $SQL = "SELECT c.oid FROM pg_catalog.pg_class c $schemajoin\n WHERE relname = ?$schemawhere"; + if (! length $schema) { + $SQL .= ' AND pg_catalog.pg_table_is_visible(c.oid)'; + } + $sth = $dbh->prepare_cached($SQL); + $count = $sth->execute(@args); + if (!defined $count or $count eq '0E0') { + $sth->finish(); + my $message = qq{Could not find the table "$table"}; + length $schema and $message .= qq{ in the schema "$schema"}; + $dbh->set_err(1, $message); + return undef; + } + my $oid = $sth->fetchall_arrayref()->[0][0]; + $oid =~ /(\d+)/ or die qq{OID was not numeric?!?\n}; + $oid = $1; + ## This table has a primary key. Is there a sequence associated with it via a unique, indexed column? + $SQL = "SELECT a.attname, i.indisprimary, pg_catalog.pg_get_expr(adbin,adrelid)\n". + "FROM pg_catalog.pg_index i, pg_catalog.pg_attribute a, pg_catalog.pg_attrdef d\n ". + "WHERE i.indrelid = $oid AND d.adrelid=a.attrelid AND d.adnum=a.attnum\n". + " AND a.attrelid = $oid AND i.indisunique IS TRUE\n". + " AND a.atthasdef IS TRUE AND i.indkey[0]=a.attnum\n". + q{ AND d.adsrc ~ '^nextval'}; + $sth = $dbh->prepare($SQL); + $count = $sth->execute(); + if (!defined $count or $count eq '0E0') { + $sth->finish(); + $dbh->set_err(1, qq{No suitable column found for last_insert_id of table "$table"}); + return undef; + } + my $info = $sth->fetchall_arrayref(); + + ## We have at least one with a default value. See if we can determine sequences + my @def; + for (@$info) { + next unless $_->[2] =~ /^nextval\(+'([^']+)'::/o; + push @$_, $1; + push @def, $_; + } + if (!@def) { + $dbh->set_err(1, qq{No suitable column found for last_insert_id of table "$table"\n}); + } + ## Tiebreaker goes to the primary keys + if (@def > 1) { + my @pri = grep { $_->[1] } @def; + if (1 != @pri) { + $dbh->set_err(1, qq{No suitable column found for last_insert_id of table "$table"\n}); + } + @def = @pri; + } + $sequence = $def[0]->[3]; + ## Cache this information for subsequent calls + $dbh->{private_dbdpg}{$cachename} = $sequence; + } + + $sth = $dbh->prepare_cached('SELECT currval(?)'); + $count = $sth->execute($sequence); + return undef if ! defined $count; + return $sth->fetchall_arrayref()->[0][0]; + + } ## end of last_insert_id + + sub ping { + my $dbh = shift; + local $SIG{__WARN__} = sub { } if $dbh->FETCH('PrintError'); + my $ret = DBD::Pg::db::_ping($dbh); + return $ret < 1 ? 0 : $ret; + } + + sub pg_ping { + my $dbh = shift; + local $SIG{__WARN__} = sub { } if $dbh->FETCH('PrintError'); + return DBD::Pg::db::_ping($dbh); + } + + sub pg_type_info { + my($dbh,$pg_type) = @_; + local $SIG{__WARN__} = sub { } if $dbh->FETCH('PrintError'); + my $ret = DBD::Pg::db::_pg_type_info($pg_type); + return $ret; + } + + # Column expected in statement handle returned. + # table_cat, table_schem, table_name, column_name, data_type, type_name, + # column_size, buffer_length, DECIMAL_DIGITS, NUM_PREC_RADIX, NULLABLE, + # REMARKS, COLUMN_DEF, SQL_DATA_TYPE, SQL_DATETIME_SUB, CHAR_OCTET_LENGTH, + # ORDINAL_POSITION, IS_NULLABLE + # The result set is ordered by TABLE_SCHEM, TABLE_NAME and ORDINAL_POSITION. + + sub column_info { + my $dbh = shift; + my ($catalog, $schema, $table, $column) = @_; + + my @search; + ## If the schema or table has an underscore or a %, use a LIKE comparison + if (defined $schema and length $schema) { + push @search, 'n.nspname ' . ($schema =~ /[_%]/ ? 'LIKE ' : '= ') . + $dbh->quote($schema); + } + if (defined $table and length $table) { + push @search, 'c.relname ' . ($table =~ /[_%]/ ? 'LIKE ' : '= ') . + $dbh->quote($table); + } + if (defined $column and length $column) { + push @search, 'a.attname ' . ($column =~ /[_%]/ ? 'LIKE ' : '= ') . + $dbh->quote($column); + } + + my $whereclause = join "\n\t\t\t\tAND ", '', @search; + + my $schemajoin = 'JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace)'; + + my $remarks = 'pg_catalog.col_description(a.attrelid, a.attnum)'; + + my $column_def = $dbh->{private_dbdpg}{version} >= 80000 + ? 'pg_catalog.pg_get_expr(af.adbin, af.adrelid)' + : 'af.adsrc'; + + my $col_info_sql = qq! + SELECT + NULL::text AS "TABLE_CAT" + , quote_ident(n.nspname) AS "TABLE_SCHEM" + , quote_ident(c.relname) AS "TABLE_NAME" + , quote_ident(a.attname) AS "COLUMN_NAME" + , a.atttypid AS "DATA_TYPE" + , pg_catalog.format_type(a.atttypid, NULL) AS "TYPE_NAME" + , a.attlen AS "COLUMN_SIZE" + , NULL::text AS "BUFFER_LENGTH" + , NULL::text AS "DECIMAL_DIGITS" + , NULL::text AS "NUM_PREC_RADIX" + , CASE a.attnotnull WHEN 't' THEN 0 ELSE 1 END AS "NULLABLE" + , $remarks AS "REMARKS" + , $column_def AS "COLUMN_DEF" + , NULL::text AS "SQL_DATA_TYPE" + , NULL::text AS "SQL_DATETIME_SUB" + , NULL::text AS "CHAR_OCTET_LENGTH" + , a.attnum AS "ORDINAL_POSITION" + , CASE a.attnotnull WHEN 't' THEN 'NO' ELSE 'YES' END AS "IS_NULLABLE" + , pg_catalog.format_type(a.atttypid, a.atttypmod) AS "pg_type" + , '?' AS "pg_constraint" + , n.nspname AS "pg_schema" + , c.relname AS "pg_table" + , a.attname AS "pg_column" + , a.attrelid AS "pg_attrelid" + , a.attnum AS "pg_attnum" + , a.atttypmod AS "pg_atttypmod" + , t.typtype AS "_pg_type_typtype" + , t.oid AS "_pg_type_oid" + FROM + pg_catalog.pg_type t + JOIN pg_catalog.pg_attribute a ON (t.oid = a.atttypid) + JOIN pg_catalog.pg_class c ON (a.attrelid = c.oid) + LEFT JOIN pg_catalog.pg_attrdef af ON (a.attnum = af.adnum AND a.attrelid = af.adrelid) + $schemajoin + WHERE + a.attnum >= 0 + AND c.relkind IN ('r','v') + $whereclause + ORDER BY "TABLE_SCHEM", "TABLE_NAME", "ORDINAL_POSITION" + !; + + my $data = $dbh->selectall_arrayref($col_info_sql) or return undef; + + # To turn the data back into a statement handle, we need + # to fetch the data as an array of arrays, and also have a + # a matching array of all the column names + my %col_map = (qw/ + TABLE_CAT 0 + TABLE_SCHEM 1 + TABLE_NAME 2 + COLUMN_NAME 3 + DATA_TYPE 4 + TYPE_NAME 5 + COLUMN_SIZE 6 + BUFFER_LENGTH 7 + DECIMAL_DIGITS 8 + NUM_PREC_RADIX 9 + NULLABLE 10 + REMARKS 11 + COLUMN_DEF 12 + SQL_DATA_TYPE 13 + SQL_DATETIME_SUB 14 + CHAR_OCTET_LENGTH 15 + ORDINAL_POSITION 16 + IS_NULLABLE 17 + pg_type 18 + pg_constraint 19 + pg_schema 20 + pg_table 21 + pg_column 22 + pg_enum_values 23 + /); + + for my $row (@$data) { + my $typoid = pop @$row; + my $typtype = pop @$row; + my $typmod = pop @$row; + my $attnum = pop @$row; + my $aid = pop @$row; + + $row->[$col_map{COLUMN_SIZE}] = + _calc_col_size($typmod,$row->[$col_map{COLUMN_SIZE}]); + + # Replace the Pg type with the SQL_ type + $row->[$col_map{DATA_TYPE}] = DBD::Pg::db::pg_type_info($dbh,$row->[$col_map{DATA_TYPE}]); + + # Add pg_constraint + my $SQL = q{SELECT consrc FROM pg_catalog.pg_constraint WHERE contype = 'c' AND }. + qq{conrelid = $aid AND conkey = '{$attnum}'}; + my $info = $dbh->selectall_arrayref($SQL); + if (@$info) { + $row->[19] = $info->[0][0]; + } + else { + $row->[19] = undef; + } + + if ( $typtype eq 'e' ) { + $SQL = "SELECT enumlabel FROM pg_catalog.pg_enum WHERE enumtypid = $typoid ORDER BY oid"; + $row->[23] = $dbh->selectcol_arrayref($SQL); + } + else { + $row->[23] = undef; + } + } + + # Since we've processed the data in Perl, we have to jump through a hoop + # To turn it back into a statement handle + # + return _prepare_from_data + ( + 'column_info', + $data, + [ sort { $col_map{$a} <=> $col_map{$b} } keys %col_map] + ); + } + + sub _prepare_from_data { + my ($statement, $data, $names, %attr) = @_; + my $sponge = DBI->connect('dbi:Sponge:', '', '', { RaiseError => 1 }); + my $sth = $sponge->prepare($statement, { rows=>$data, NAME=>$names, %attr }); + return $sth; + } + + sub statistics_info { + + my $dbh = shift; + my ($catalog, $schema, $table, $unique_only, $quick, $attr) = @_; + + ## Catalog is ignored, but table is mandatory + return undef unless defined $table and length $table; + + my $schema_where = ''; + my @exe_args = ($table); + + my $input_schema = (defined $schema and length $schema) ? 1 : 0; + + if ($input_schema) { + $schema_where = 'AND n.nspname = ? AND n.oid = d.relnamespace'; + push(@exe_args, $schema); + } + else { + $schema_where = 'AND n.oid = d.relnamespace'; + } + + my $table_stats_sql = qq{ + SELECT d.relpages, d.reltuples, n.nspname + FROM pg_catalog.pg_class d, pg_catalog.pg_namespace n + WHERE d.relname = ? $schema_where + }; + + my $colnames_sql = qq{ + SELECT + a.attnum, a.attname + FROM + pg_catalog.pg_attribute a, pg_catalog.pg_class d, pg_catalog.pg_namespace n + WHERE + a.attrelid = d.oid AND d.relname = ? $schema_where + }; + + my $stats_sql = qq{ + SELECT + c.relname, i.indkey, i.indisunique, i.indisclustered, a.amname, + n.nspname, c.relpages, c.reltuples, i.indexprs, + pg_get_expr(i.indpred,i.indrelid) as predicate + FROM + pg_catalog.pg_index i, pg_catalog.pg_class c, + pg_catalog.pg_class d, pg_catalog.pg_am a, + pg_catalog.pg_namespace n + WHERE + d.relname = ? $schema_where AND d.oid = i.indrelid + AND i.indexrelid = c.oid AND c.relam = a.oid + ORDER BY + i.indisunique desc, a.amname, c.relname + }; + + my @output_rows; + + # Table-level stats + if (!$unique_only) { + my $table_stats_sth = $dbh->prepare($table_stats_sql); + $table_stats_sth->execute(@exe_args) or return undef; + my $tst = $table_stats_sth->fetchrow_hashref or return undef; + push(@output_rows, [ + undef, # TABLE_CAT + $tst->{nspname}, # TABLE_SCHEM + $table, # TABLE_NAME + undef, # NON_UNIQUE + undef, # INDEX_QUALIFIER + undef, # INDEX_NAME + 'table', # TYPE + undef, # ORDINAL_POSITION + undef, # COLUMN_NAME + undef, # ASC_OR_DESC + $tst->{reltuples},# CARDINALITY + $tst->{relpages}, # PAGES + undef, # FILTER_CONDITION + ]); + } + + # Fetch the column names for later use + my $colnames_sth = $dbh->prepare($colnames_sql); + $colnames_sth->execute(@exe_args) or return undef; + my $colnames = $colnames_sth->fetchall_hashref('attnum'); + + # Fetch the index definitions + my $sth = $dbh->prepare($stats_sql); + $sth->execute(@exe_args) or return undef; + + STAT_ROW: + #use Data::Dumper; + #warn Dumper $stats_sql; + while (my $row = $sth->fetchrow_hashref) { + #warn Dumper $row; + next if $row->{indexprs}; # We can't return these accurately via this interface ... + next if $unique_only and !$row->{indisunique}; + + my $indtype = $row->{indisclustered} + ? 'clustered' + : ( $row->{amname} eq 'btree' ) + ? 'btree' + : ($row->{amname} eq 'hash' ) + ? 'hashed' : 'other'; + + my $nonunique = $row->{indisunique} ? 0 : 1; + + my @index_row = ( + undef, # TABLE_CAT + $row->{nspname}, # TABLE_SCHEM + $table, # TABLE_NAME + $nonunique, # NON_UNIQUE + undef, # INDEX_QUALIFIER + $row->{relname}, # INDEX_NAME + $indtype, # TYPE + undef, # ORDINAL_POSITION + undef, # COLUMN_NAME + 'A', # ASC_OR_DESC + $row->{reltuples}, # CARDINALITY + $row->{relpages}, # PAGES + $row->{predicate}, # FILTER_CONDITION + ); + + my $col_nums = $row->{indkey}; + $col_nums =~ s/^\s+//; + my @col_nums = split(/\s+/, $col_nums); + + my $ord_pos = 1; + for my $col_num (@col_nums) { + my @copy = @index_row; + $copy[7] = $ord_pos++; # ORDINAL_POSITION + $copy[8] = $colnames->{$col_num}->{attname}; # COLUMN_NAME + push(@output_rows, \@copy); + } + } + + my @output_colnames = qw/ TABLE_CAT TABLE_SCHEM TABLE_NAME NON_UNIQUE INDEX_QUALIFIER + INDEX_NAME TYPE ORDINAL_POSITION COLUMN_NAME ASC_OR_DESC + CARDINALITY PAGES FILTER_CONDITION /; + + return _prepare_from_data('statistics_info', \@output_rows, \@output_colnames); + } + + sub primary_key_info { + + my $dbh = shift; + my ($catalog, $schema, $table, $attr) = @_; + + ## Catalog is ignored, but table is mandatory + return undef unless defined $table and length $table; + + my $whereclause = 'AND c.relname = ' . $dbh->quote($table); + + if (defined $schema and length $schema) { + $whereclause .= "\n\t\t\tAND n.nspname = " . $dbh->quote($schema); + } + + my $TSJOIN = 'pg_catalog.pg_tablespace t ON (t.oid = c.reltablespace)'; + if ($dbh->{private_dbdpg}{version} < 80000) { + $TSJOIN = '(SELECT 0 AS oid, 0 AS spcname, 0 AS spclocation LIMIT 0) AS t ON (t.oid=1)'; + } + + my $pri_key_sql = qq{ + SELECT + c.oid + , quote_ident(n.nspname) + , quote_ident(c.relname) + , quote_ident(c2.relname) + , i.indkey, quote_ident(t.spcname), quote_ident(t.spclocation) + , n.nspname, c.relname, c2.relname + FROM + pg_catalog.pg_class c + JOIN pg_catalog.pg_index i ON (i.indrelid = c.oid) + JOIN pg_catalog.pg_class c2 ON (c2.oid = i.indexrelid) + LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace) + LEFT JOIN $TSJOIN + WHERE + i.indisprimary IS TRUE + $whereclause + }; + + my $sth = $dbh->prepare($pri_key_sql) or return undef; + $sth->execute(); + my $info = $sth->fetchall_arrayref()->[0]; + return undef if ! defined $info; + + # Get the attribute information + my $indkey = join ',', split /\s+/, $info->[4]; + my $sql = qq{ + SELECT a.attnum, pg_catalog.quote_ident(a.attname) AS colname, + pg_catalog.quote_ident(t.typname) AS typename + FROM pg_catalog.pg_attribute a, pg_catalog.pg_type t + WHERE a.attrelid = '$info->[0]' + AND a.atttypid = t.oid + AND attnum IN ($indkey); + }; + $sth = $dbh->prepare($sql) or return undef; + $sth->execute(); + my $attribs = $sth->fetchall_hashref('attnum'); + + my $pkinfo = []; + + ## Normal way: complete "row" per column in the primary key + if (!exists $attr->{'pg_onerow'}) { + my $x=0; + my @key_seq = split/\s+/, $info->[4]; + for (@key_seq) { + # TABLE_CAT + $pkinfo->[$x][0] = undef; + # SCHEMA_NAME + $pkinfo->[$x][1] = $info->[1]; + # TABLE_NAME + $pkinfo->[$x][2] = $info->[2]; + # COLUMN_NAME + $pkinfo->[$x][3] = $attribs->{$_}{colname}; + # KEY_SEQ + $pkinfo->[$x][4] = $_; + # PK_NAME + $pkinfo->[$x][5] = $info->[3]; + # DATA_TYPE + $pkinfo->[$x][6] = $attribs->{$_}{typename}; + $pkinfo->[$x][7] = $info->[5]; + $pkinfo->[$x][8] = $info->[6]; + $pkinfo->[$x][9] = $info->[7]; + $pkinfo->[$x][10] = $info->[8]; + $pkinfo->[$x][11] = $info->[9]; + $x++; + } + } + else { ## Nicer way: return only one row + + # TABLE_CAT + $info->[0] = undef; + # TABLESPACES + $info->[7] = $info->[5]; + $info->[8] = $info->[6]; + # Unquoted names + $info->[9] = $info->[7]; + $info->[10] = $info->[8]; + $info->[11] = $info->[9]; + # PK_NAME + $info->[5] = $info->[3]; + # COLUMN_NAME + $info->[3] = 2==$attr->{'pg_onerow'} ? + [ map { $attribs->{$_}{colname} } split /\s+/, $info->[4] ] : + join ', ', map { $attribs->{$_}{colname} } split /\s+/, $info->[4]; + # DATA_TYPE + $info->[6] = 2==$attr->{'pg_onerow'} ? + [ map { $attribs->{$_}{typename} } split /\s+/, $info->[4] ] : + join ', ', map { $attribs->{$_}{typename} } split /\s+/, $info->[4]; + # KEY_SEQ + $info->[4] = 2==$attr->{'pg_onerow'} ? + [ split /\s+/, $info->[4] ] : + join ', ', split /\s+/, $info->[4]; + + $pkinfo = [$info]; + } + + my @cols = (qw(TABLE_CAT TABLE_SCHEM TABLE_NAME COLUMN_NAME + KEY_SEQ PK_NAME DATA_TYPE)); + push @cols, 'pg_tablespace_name', 'pg_tablespace_location'; + push @cols, 'pg_schema', 'pg_table', 'pg_column'; + + return _prepare_from_data('primary_key_info', $pkinfo, \@cols); + + } + + sub primary_key { + my $sth = primary_key_info(@_[0..3], {pg_onerow => 2}); + return defined $sth ? @{$sth->fetchall_arrayref()->[0][3]} : (); + } + + + sub foreign_key_info { + + my $dbh = shift; + + ## PK: catalog, schema, table, FK: catalog, schema, table, attr + + my $oldname = $dbh->{FetchHashKeyName}; + + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + + ## Each of these may be undef or empty + my $pschema = $_[1] || ''; + my $ptable = $_[2] || ''; + my $fschema = $_[4] || ''; + my $ftable = $_[5] || ''; + my $args = $_[6]; + + ## No way to currently specify it, but we are ready when there is + my $odbc = 0; + + ## Must have at least one named table + return undef if !$ptable and !$ftable; + + ## If only the primary table is given, we return only those columns + ## that are used as foreign keys, even if that means that we return + ## unique keys but not primary one. We also return all the foreign + ## tables/columns that are referencing them, of course. + + ## The first step is to find the oid of each specific table in the args: + ## Return undef if no matching relation found + my %oid; + for ([$ptable, $pschema, 'P'], [$ftable, $fschema, 'F']) { + if (length $_->[0]) { + my $SQL = "SELECT c.oid AS schema FROM pg_catalog.pg_class c, pg_catalog.pg_namespace n\n". + 'WHERE c.relnamespace = n.oid AND c.relname = ' . $dbh->quote($_->[0]); + if (length $_->[1]) { + $SQL .= ' AND n.nspname = ' . $dbh->quote($_->[1]); + } + my $info = $dbh->selectall_arrayref($SQL); + return undef if ! @$info; + $oid{$_->[2]} = $info->[0][0]; + } + } + + ## We now need information about each constraint we care about. + ## Foreign table: only 'f' / Primary table: only 'p' or 'u' + my $WHERE = $odbc ? q{((contype = 'p'} : q{((contype IN ('p','u')}; + if (length $ptable) { + $WHERE .= " AND conrelid=$oid{'P'}::oid"; + } + else { + $WHERE .= " AND conrelid IN (SELECT DISTINCT confrelid FROM pg_catalog.pg_constraint WHERE conrelid=$oid{'F'}::oid)"; + if (length $pschema) { + $WHERE .= ' AND n2.nspname = ' . $dbh->quote($pschema); + } + } + + $WHERE .= ")\n \t\t\t\tOR \n \t\t\t\t(contype = 'f'"; + if (length $ftable) { + $WHERE .= " AND conrelid=$oid{'F'}::oid"; + if (length $ptable) { + $WHERE .= " AND confrelid=$oid{'P'}::oid"; + } + } + else { + $WHERE .= " AND confrelid = $oid{'P'}::oid"; + if (length $fschema) { + $WHERE .= ' AND n2.nspname = ' . $dbh->quote($fschema); + } + } + $WHERE .= '))'; + + ## Grab everything except specific column names: + my $fk_sql = qq{ + SELECT conrelid, confrelid, contype, conkey, confkey, + pg_catalog.quote_ident(c.relname) AS t_name, pg_catalog.quote_ident(n2.nspname) AS t_schema, + pg_catalog.quote_ident(n.nspname) AS c_schema, pg_catalog.quote_ident(conname) AS c_name, + CASE + WHEN confupdtype = 'c' THEN 0 + WHEN confupdtype = 'r' THEN 1 + WHEN confupdtype = 'n' THEN 2 + WHEN confupdtype = 'a' THEN 3 + WHEN confupdtype = 'd' THEN 4 + ELSE -1 + END AS update, + CASE + WHEN confdeltype = 'c' THEN 0 + WHEN confdeltype = 'r' THEN 1 + WHEN confdeltype = 'n' THEN 2 + WHEN confdeltype = 'a' THEN 3 + WHEN confdeltype = 'd' THEN 4 + ELSE -1 + END AS delete, + CASE + WHEN condeferrable = 'f' THEN 7 + WHEN condeferred = 't' THEN 6 + WHEN condeferred = 'f' THEN 5 + ELSE -1 + END AS defer + FROM pg_catalog.pg_constraint k, pg_catalog.pg_class c, pg_catalog.pg_namespace n, pg_catalog.pg_namespace n2 + WHERE $WHERE + AND k.connamespace = n.oid + AND k.conrelid = c.oid + AND c.relnamespace = n2.oid + ORDER BY conrelid ASC + }; + + my $sth = $dbh->prepare($fk_sql); + $sth->execute(); + my $info = $sth->fetchall_arrayref({}); + return undef if ! defined $info or ! @$info; + + ## Return undef if just ptable given but no fk found + return undef if ! length $ftable and ! grep { $_->{'contype'} eq 'f'} @$info; + + ## Figure out which columns we need information about + my %colnum; + for my $row (@$info) { + for (@{$row->{'conkey'}}) { + $colnum{$row->{'conrelid'}}{$_}++; + } + if ($row->{'contype'} eq 'f') { + for (@{$row->{'confkey'}}) { + $colnum{$row->{'confrelid'}}{$_}++; + } + } + } + ## Get the information about the columns computed above + my $SQL = qq{ + SELECT a.attrelid, a.attnum, pg_catalog.quote_ident(a.attname) AS colname, + pg_catalog.quote_ident(t.typname) AS typename + FROM pg_catalog.pg_attribute a, pg_catalog.pg_type t + WHERE a.atttypid = t.oid + AND (\n}; + + $SQL .= join "\n\t\t\t\tOR\n" => map { + my $cols = join ',' => keys %{$colnum{$_}}; + "\t\t\t\t( a.attrelid = '$_' AND a.attnum IN ($cols) )" + } sort keys %colnum; + + $sth = $dbh->prepare(qq{$SQL \)}); + $sth->execute(); + my $attribs = $sth->fetchall_arrayref({}); + + ## Make a lookup hash + my %attinfo; + for (@$attribs) { + $attinfo{"$_->{'attrelid'}"}{"$_->{'attnum'}"} = $_; + } + + ## This is an array in case we have identical oid/column combos. Lowest oid wins + my %ukey; + for my $c (grep { $_->{'contype'} ne 'f' } @$info) { + ## Munge multi-column keys into sequential order + my $multi = join ' ' => sort @{$c->{'conkey'}}; + push @{$ukey{$c->{'conrelid'}}{$multi}}, $c; + } + + ## Finally, return as a SQL/CLI structure: + my $fkinfo = []; + my $x=0; + for my $t (sort { $a->{'c_name'} cmp $b->{'c_name'} } grep { $_->{'contype'} eq 'f' } @$info) { + ## We need to find which constraint row (if any) matches our confrelid-confkey combo + ## by checking out ukey hash. We sort for proper matching of { 1 2 } vs. { 2 1 } + ## No match means we have a pure index constraint + my $u; + my $multi = join ' ' => sort @{$t->{'confkey'}}; + if (exists $ukey{$t->{'confrelid'}}{$multi}) { + $u = $ukey{$t->{'confrelid'}}{$multi}->[0]; + } + else { + ## Mark this as an index so we can fudge things later on + $multi = 'index'; + ## Grab the first one found, modify later on as needed + $u = ((values %{$ukey{$t->{'confrelid'}}})[0]||[])->[0]; + ## Bail in case there was no match + next if ! ref $u; + } + + ## ODBC is primary keys only + next if $odbc and ($u->{'contype'} ne 'p' or $multi eq 'index'); + + my $conkey = $t->{'conkey'}; + my $confkey = $t->{'confkey'}; + for (my $y=0; $conkey->[$y]; $y++) { + # UK_TABLE_CAT + $fkinfo->[$x][0] = undef; + # UK_TABLE_SCHEM + $fkinfo->[$x][1] = $u->{'t_schema'}; + # UK_TABLE_NAME + $fkinfo->[$x][2] = $u->{'t_name'}; + # UK_COLUMN_NAME + $fkinfo->[$x][3] = $attinfo{$t->{'confrelid'}}{$confkey->[$y]}{'colname'}; + # FK_TABLE_CAT + $fkinfo->[$x][4] = undef; + # FK_TABLE_SCHEM + $fkinfo->[$x][5] = $t->{'t_schema'}; + # FK_TABLE_NAME + $fkinfo->[$x][6] = $t->{'t_name'}; + # FK_COLUMN_NAME + $fkinfo->[$x][7] = $attinfo{$t->{'conrelid'}}{$conkey->[$y]}{'colname'}; + # ORDINAL_POSITION + $fkinfo->[$x][8] = $conkey->[$y]; + # UPDATE_RULE + $fkinfo->[$x][9] = "$t->{'update'}"; + # DELETE_RULE + $fkinfo->[$x][10] = "$t->{'delete'}"; + # FK_NAME + $fkinfo->[$x][11] = $t->{'c_name'}; + # UK_NAME (may be undef if an index with no named constraint) + $fkinfo->[$x][12] = $multi eq 'index' ? undef : $u->{'c_name'}; + # DEFERRABILITY + $fkinfo->[$x][13] = "$t->{'defer'}"; + # UNIQUE_OR_PRIMARY + $fkinfo->[$x][14] = ($u->{'contype'} eq 'p' and $multi ne 'index') ? 'PRIMARY' : 'UNIQUE'; + # UK_DATA_TYPE + $fkinfo->[$x][15] = $attinfo{$t->{'confrelid'}}{$confkey->[$y]}{'typename'}; + # FK_DATA_TYPE + $fkinfo->[$x][16] = $attinfo{$t->{'conrelid'}}{$conkey->[$y]}{'typename'}; + $x++; + } ## End each column in this foreign key + } ## End each foreign key + + my @CLI_cols = (qw( + UK_TABLE_CAT UK_TABLE_SCHEM UK_TABLE_NAME UK_COLUMN_NAME + FK_TABLE_CAT FK_TABLE_SCHEM FK_TABLE_NAME FK_COLUMN_NAME + ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME UK_NAME + DEFERABILITY UNIQUE_OR_PRIMARY UK_DATA_TYPE FK_DATA_TYPE + )); + + my @ODBC_cols = (qw( + PKTABLE_CAT PKTABLE_SCHEM PKTABLE_NAME PKCOLUMN_NAME + FKTABLE_CAT FKTABLE_SCHEM FKTABLE_NAME FKCOLUMN_NAME + KEY_SEQ UPDATE_RULE DELETE_RULE FK_NAME PK_NAME + DEFERABILITY UNIQUE_OR_PRIMARY PK_DATA_TYPE FKDATA_TYPE + )); + + if ($oldname eq 'NAME_lc') { + if ($odbc) { + for my $col (@ODBC_cols) { + $col = lc $col; + } + } + else { + for my $col (@CLI_cols) { + $col = lc $col; + } + } + } + + return _prepare_from_data('foreign_key_info', $fkinfo, $odbc ? \@ODBC_cols : \@CLI_cols); + + } + + + sub table_info { + + my $dbh = shift; + my ($catalog, $schema, $table, $type) = @_; + + my $tbl_sql = (); + + my $extracols = q{,NULL::text AS pg_schema, NULL::text AS pg_table}; + if ( # Rule 19a + (defined $catalog and $catalog eq '%') + and (defined $schema and $schema eq '') + and (defined $table and $table eq '') + ) { + $tbl_sql = qq{ + SELECT + NULL::text AS "TABLE_CAT" + , NULL::text AS "TABLE_SCHEM" + , NULL::text AS "TABLE_NAME" + , NULL::text AS "TABLE_TYPE" + , NULL::text AS "REMARKS" $extracols + }; + } + elsif (# Rule 19b + (defined $catalog and $catalog eq '') + and (defined $schema and $schema eq '%') + and (defined $table and $table eq '') + ) { + $extracols = q{,n.nspname AS pg_schema, NULL::text AS pg_table}; + $tbl_sql = qq{SELECT + NULL::text AS "TABLE_CAT" + , quote_ident(n.nspname) AS "TABLE_SCHEM" + , NULL::text AS "TABLE_NAME" + , NULL::text AS "TABLE_TYPE" + , CASE WHEN n.nspname ~ '^pg_' THEN 'system schema' ELSE 'owned by ' || pg_get_userbyid(n.nspowner) END AS "REMARKS" $extracols + FROM pg_catalog.pg_namespace n + ORDER BY "TABLE_SCHEM" + }; + } + elsif (# Rule 19c + (defined $catalog and $catalog eq '') + and (defined $schema and $schema eq '') + and (defined $table and $table eq '') + and (defined $type and $type eq '%') + ) { + $tbl_sql = qq{ + SELECT + NULL::text AS "TABLE_CAT" + , NULL::text AS "TABLE_SCHEM" + , NULL::text AS "TABLE_NAME" + , 'TABLE' AS "TABLE_TYPE" + , 'relkind: r' AS "REMARKS" $extracols + UNION + SELECT + NULL::text AS "TABLE_CAT" + , NULL::text AS "TABLE_SCHEM" + , NULL::text AS "TABLE_NAME" + , 'VIEW' AS "TABLE_TYPE" + , 'relkind: v' AS "REMARKS" $extracols + }; + } + else { + # Default SQL + $extracols = q{,n.nspname AS pg_schema, c.relname AS pg_table}; + my @search; + my $showtablespace = ', quote_ident(t.spcname) AS "pg_tablespace_name", quote_ident(t.spclocation) AS "pg_tablespace_location"'; + + ## If the schema or table has an underscore or a %, use a LIKE comparison + if (defined $schema and length $schema) { + push @search, 'n.nspname ' . ($schema =~ /[_%]/ ? 'LIKE ' : '= ') . $dbh->quote($schema); + } + if (defined $table and length $table) { + push @search, 'c.relname ' . ($table =~ /[_%]/ ? 'LIKE ' : '= ') . $dbh->quote($table); + } + ## All we can see is "table" or "view". Default is both + my $typesearch = q{IN ('r','v')}; + if (defined $type and length $type) { + if ($type =~ /\btable\b/i and $type !~ /\bview\b/i) { + $typesearch = q{= 'r'}; + } + elsif ($type =~ /\bview\b/i and $type !~ /\btable\b/i) { + $typesearch = q{= 'v'}; + } + } + push @search, "c.relkind $typesearch"; + + my $TSJOIN = 'pg_catalog.pg_tablespace t ON (t.oid = c.reltablespace)'; + if ($dbh->{private_dbdpg}{version} < 80000) { + $TSJOIN = '(SELECT 0 AS oid, 0 AS spcname, 0 AS spclocation LIMIT 0) AS t ON (t.oid=1)'; + } + my $whereclause = join "\n\t\t\t\t\t AND " => @search; + $tbl_sql = qq{ + SELECT NULL::text AS "TABLE_CAT" + , quote_ident(n.nspname) AS "TABLE_SCHEM" + , quote_ident(c.relname) AS "TABLE_NAME" + , CASE + WHEN c.relkind = 'v' THEN + CASE WHEN quote_ident(n.nspname) ~ '^pg_' THEN 'SYSTEM VIEW' ELSE 'VIEW' END + ELSE + CASE WHEN quote_ident(n.nspname) ~ '^pg_' THEN 'SYSTEM TABLE' ELSE 'TABLE' END + END AS "TABLE_TYPE" + , d.description AS "REMARKS" $showtablespace $extracols + FROM pg_catalog.pg_class AS c + LEFT JOIN pg_catalog.pg_description AS d + ON (c.oid = d.objoid AND c.tableoid = d.classoid AND d.objsubid = 0) + LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace) + LEFT JOIN $TSJOIN + WHERE $whereclause + ORDER BY "TABLE_TYPE", "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME" + }; + } + my $sth = $dbh->prepare( $tbl_sql ) or return undef; + $sth->execute(); + + return $sth; + } + + sub tables { + my ($dbh, @args) = @_; + my $attr = $args[4]; + my $sth = $dbh->table_info(@args) or return; + my $tables = $sth->fetchall_arrayref() or return; + my @tables = map { (! (ref $attr eq 'HASH' and $attr->{pg_noprefix})) ? + "$_->[1].$_->[2]" : $_->[2] } @$tables; + return @tables; + } + + sub table_attributes { + my ($dbh, $table) = @_; + + my $sth = $dbh->column_info(undef,undef,$table,undef); + + my %convert = ( + COLUMN_NAME => 'NAME', + DATA_TYPE => 'TYPE', + COLUMN_SIZE => 'SIZE', + NULLABLE => 'NOTNULL', + REMARKS => 'REMARKS', + COLUMN_DEF => 'DEFAULT', + pg_constraint => 'CONSTRAINT', + ); + + my $attrs = $sth->fetchall_arrayref(\%convert); + + for my $row (@$attrs) { + # switch the column names + for my $name (keys %$row) { + $row->{ $convert{$name} } = $row->{$name}; + + ## Keep some original columns + delete $row->{$name} unless ($name eq 'REMARKS' or $name eq 'NULLABLE'); + + } + # Moved check outside of loop as it was inverting the NOTNULL value for + # attribute. + # NOTNULL inverts the sense of NULLABLE + $row->{NOTNULL} = ($row->{NOTNULL} ? 0 : 1); + + my @pri_keys = (); + @pri_keys = $dbh->primary_key( undef, undef, $table ); + $row->{PRIMARY_KEY} = scalar(grep { /^$row->{NAME}$/i } @pri_keys) ? 1 : 0; + } + + return $attrs; + + } + + sub _calc_col_size { + + my $mod = shift; + my $size = shift; + + + if ((defined $size) and ($size > 0)) { + return $size; + } elsif ($mod > 0xffff) { + my $prec = ($mod & 0xffff) - 4; + $mod >>= 16; + my $dig = $mod; + return "$prec,$dig"; + } elsif ($mod >= 4) { + return $mod - 4; + } # else { + # $rtn = $mod; + # $rtn = undef; + # } + + return; + } + + + sub type_info_all { + my ($dbh) = @_; + + my $names = + { + TYPE_NAME => 0, + DATA_TYPE => 1, + COLUMN_SIZE => 2, + LITERAL_PREFIX => 3, + LITERAL_SUFFIX => 4, + CREATE_PARAMS => 5, + NULLABLE => 6, + CASE_SENSITIVE => 7, + SEARCHABLE => 8, + UNSIGNED_ATTRIBUTE => 9, + FIXED_PREC_SCALE => 10, + AUTO_UNIQUE_VALUE => 11, + LOCAL_TYPE_NAME => 12, + MINIMUM_SCALE => 13, + MAXIMUM_SCALE => 14, + SQL_DATA_TYPE => 15, + SQL_DATETIME_SUB => 16, + NUM_PREC_RADIX => 17, + INTERVAL_PRECISION => 18, + }; + + ## This list is derived from dbi_sql.h in DBI, from types.c and types.h, and from the PG docs + + ## Aids to make the list more readable: + my $GIG = 1073741824; + my $PS = 'precision/scale'; + my $LEN = 'length'; + my $UN = undef; + my $ti = + [ + $names, +# name sql_type size pfx/sfx crt n/c/s +-/P/I local min max sub rdx itvl + +['unknown', SQL_UNKNOWN_TYPE, 0, $UN,$UN, $UN, 1,0,0, $UN,0,0, 'UNKNOWN', $UN,$UN, + SQL_UNKNOWN_TYPE, $UN, $UN, $UN ], +['bytea', SQL_VARBINARY, $GIG, q{'},q{'}, $UN, 1,0,3, $UN,0,0, 'BYTEA', $UN,$UN, + SQL_VARBINARY, $UN, $UN, $UN ], +['bpchar', SQL_CHAR, $GIG, q{'},q{'}, $LEN, 1,1,3, $UN,0,0, 'CHARACTER', $UN,$UN, + SQL_CHAR, $UN, $UN, $UN ], +['numeric', SQL_DECIMAL, 1000, $UN,$UN, $PS, 1,0,2, 0,0,0, 'FLOAT', 0,1000, + SQL_DECIMAL, $UN, $UN, $UN ], +['numeric', SQL_NUMERIC, 1000, $UN,$UN, $PS, 1,0,2, 0,0,0, 'FLOAT', 0,1000, + SQL_NUMERIC, $UN, $UN, $UN ], +['int4', SQL_INTEGER, 10, $UN,$UN, $UN, 1,0,2, 0,0,0, 'INTEGER', 0,0, + SQL_INTEGER, $UN, $UN, $UN ], +['int2', SQL_SMALLINT, 5, $UN,$UN, $UN, 1,0,2, 0,0,0, 'SMALLINT', 0,0, + SQL_SMALLINT, $UN, $UN, $UN ], +['float4', SQL_FLOAT, 6, $UN,$UN, $PS, 1,0,2, 0,0,0, 'FLOAT', 0,6, + SQL_FLOAT, $UN, $UN, $UN ], +['float8', SQL_REAL, 15, $UN,$UN, $PS, 1,0,2, 0,0,0, 'REAL', 0,15, + SQL_REAL, $UN, $UN, $UN ], +['int8', SQL_DOUBLE, 20, $UN,$UN, $UN, 1,0,2, 0,0,0, 'LONGINT', 0,0, + SQL_DOUBLE, $UN, $UN, $UN ], +['date', SQL_DATE, 10, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'DATE', 0,0, + SQL_DATE, $UN, $UN, $UN ], +['tinterval',SQL_TIME, 18, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TINTERVAL', 0,6, + SQL_TIME, $UN, $UN, $UN ], +['timestamp',SQL_TIMESTAMP, 29, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TIMESTAMP', 0,6, + SQL_TIMESTAMP, $UN, $UN, $UN ], +['text', SQL_VARCHAR, $GIG, q{'},q{'}, $LEN, 1,1,3, $UN,0,0, 'TEXT', $UN,$UN, + SQL_VARCHAR, $UN, $UN, $UN ], +['bool', SQL_BOOLEAN, 1, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'BOOLEAN', $UN,$UN, + SQL_BOOLEAN, $UN, $UN, $UN ], +['array', SQL_ARRAY, 1, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'ARRAY', $UN,$UN, + SQL_ARRAY, $UN, $UN, $UN ], +['date', SQL_TYPE_DATE, 10, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'DATE', 0,0, + SQL_TYPE_DATE, $UN, $UN, $UN ], +['time', SQL_TYPE_TIME, 18, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TIME', 0,6, + SQL_TYPE_TIME, $UN, $UN, $UN ], +['timestamp',SQL_TYPE_TIMESTAMP,29, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TIMESTAMP', 0,6, + SQL_TYPE_TIMESTAMP, $UN, $UN, $UN ], +['timetz', SQL_TYPE_TIME_WITH_TIMEZONE, + 29, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TIMETZ', 0,6, + SQL_TYPE_TIME_WITH_TIMEZONE, $UN, $UN, $UN ], +['timestamptz',SQL_TYPE_TIMESTAMP_WITH_TIMEZONE, + 29, q{'},q{'}, $UN, 1,0,2, $UN,0,0, 'TIMESTAMPTZ',0,6, + SQL_TYPE_TIMESTAMP_WITH_TIMEZONE, $UN, $UN, $UN ], + # + # intentionally omitted: char, all geometric types, internal types + ]; + return $ti; + } + + + # Characters that need to be escaped by quote(). + my %esc = ( + q{'} => '\\047', # '\\' . sprintf("%03o", ord("'")), # ISO SQL 2 + '\\' => '\\134', # '\\' . sprintf("%03o", ord("\\")), + ); + + # Set up lookup for SQL types we don't want to escape. + my %no_escape = map { $_ => 1 } + DBI::SQL_INTEGER, DBI::SQL_SMALLINT, DBI::SQL_DECIMAL, + DBI::SQL_FLOAT, DBI::SQL_REAL, DBI::SQL_DOUBLE, DBI::SQL_NUMERIC; + + sub get_info { + + my ($dbh,$type) = @_; + + return undef unless defined $type and length $type; + + my %type = ( + +## Driver information: + + 116 => ['SQL_ACTIVE_ENVIRONMENTS', 0 ], ## unlimited + 10021 => ['SQL_ASYNC_MODE', 2 ], ## SQL_AM_STATEMENT + 120 => ['SQL_BATCH_ROW_COUNT', 2 ], ## SQL_BRC_EXPLICIT + 121 => ['SQL_BATCH_SUPPORT', 3 ], ## 12 SELECT_PROC + ROW_COUNT_PROC + 2 => ['SQL_DATA_SOURCE_NAME', "dbi:Pg:$dbh->{Name}" ], + 3 => ['SQL_DRIVER_HDBC', 0 ], ## not applicable + 135 => ['SQL_DRIVER_HDESC', 0 ], ## not applicable + 4 => ['SQL_DRIVER_HENV', 0 ], ## not applicable + 76 => ['SQL_DRIVER_HLIB', 0 ], ## not applicable + 5 => ['SQL_DRIVER_HSTMT', 0 ], ## not applicable + ## Not clear what should go here. Some things suggest 'Pg', others 'Pg.pm'. We'll use DBD::Pg for now + 6 => ['SQL_DRIVER_NAME', 'DBD::Pg' ], + 77 => ['SQL_DRIVER_ODBC_VERSION', '03.00' ], + 7 => ['SQL_DRIVER_VER', 'DBDVERSION' ], ## magic word + 144 => ['SQL_DYNAMIC_CURSOR_ATTRIBUTES1', 0 ], ## we can FETCH, but not via methods + 145 => ['SQL_DYNAMIC_CURSOR_ATTRIBUTES2', 0 ], ## same as above + 84 => ['SQL_FILE_USAGE', 0 ], ## SQL_FILE_NOT_SUPPORTED (this is good) + 146 => ['SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1', 519 ], ## not clear what this refers to in DBD context + 147 => ['SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2', 5209 ], ## see above + 81 => ['SQL_GETDATA_EXTENSIONS', 15 ], ## 1+2+4+8 + 149 => ['SQL_INFO_SCHEMA_VIEWS', 3932149 ], ## not: assert, charset, collat, trans + 150 => ['SQL_KEYSET_CURSOR_ATTRIBUTES1', 0 ], ## applies to us? + 151 => ['SQL_KEYSET_CURSOR_ATTRIBUTES2', 0 ], ## see above + 10022 => ['SQL_MAX_ASYNC_CONCURRENT_STATEMENTS', 0 ], ## unlimited, probably + 0 => ['SQL_MAX_DRIVER_CONNECTIONS', 'MAXCONNECTIONS' ], ## magic word + 152 => ['SQL_ODBC_INTERFACE_CONFORMANCE', 1 ], ## SQL_OIC_LEVEL_1 + 10 => ['SQL_ODBC_VER', '03.00.0000' ], + 153 => ['SQL_PARAM_ARRAY_ROW_COUNTS', 2 ], ## correct? + 154 => ['SQL_PARAM_ARRAY_SELECTS', 3 ], ## PAS_NO_SELECT + 11 => ['SQL_ROW_UPDATES', 'N' ], + 14 => ['SQL_SEARCH_PATTERN_ESCAPE', '\\' ], + 13 => ['SQL_SERVER_NAME', 'CURRENTDB' ], ## magic word + 166 => ['SQL_STANDARD_CLI_CONFORMANCE', 2 ], ## ?? + 167 => ['SQL_STATIC_CURSOR_ATTRIBUTES1', 519 ], ## ?? + 168 => ['SQL_STATIC_CURSOR_ATTRIBUTES2', 5209 ], ## ?? + +## DBMS Information + + 16 => ['SQL_DATABASE_NAME', 'CURRENTDB' ], ## magic word + 17 => ['SQL_DBMS_NAME', 'PostgreSQL' ], + 18 => ['SQL_DBMS_VERSION', 'ODBCVERSION' ], ## magic word + +## Data source information + + 20 => ['SQL_ACCESSIBLE_PROCEDURES', 'Y' ], ## is this really true? + 19 => ['SQL_ACCESSIBLE_TABLES', 'Y' ], ## is this really true? + 82 => ['SQL_BOOKMARK_PERSISTENCE', 0 ], + 42 => ['SQL_CATALOG_TERM', '' ], ## empty = catalogs are not supported + 10004 => ['SQL_COLLATION_SEQ', 'ENCODING' ], ## magic word + 22 => ['SQL_CONCAT_NULL_BEHAVIOR', 0 ], ## SQL_CB_NULL + 23 => ['SQL_CURSOR_COMMIT_BEHAVIOR', 1 ], ## SQL_CB_CLOSE + 24 => ['SQL_CURSOR_ROLLBACK_BEHAVIOR', 1 ], ## SQL_CB_CLOSE + 10001 => ['SQL_CURSOR_SENSITIVITY', 1 ], ## SQL_INSENSITIVE + 25 => ['SQL_DATA_SOURCE_READ_ONLY', 'READONLY' ], ## magic word + 26 => ['SQL_DEFAULT_TXN_ISOLATION', 'DEFAULTTXN' ], ## magic word (2 or 8) + 10002 => ['SQL_DESCRIBE_PARAMETER', 'Y' ], + 36 => ['SQL_MULT_RESULT_SETS', 'Y' ], + 37 => ['SQL_MULTIPLE_ACTIVE_TXN', 'Y' ], + 111 => ['SQL_NEED_LONG_DATA_LEN', 'N' ], + 85 => ['SQL_NULL_COLLATION', 0 ], ## SQL_NC_HIGH + 40 => ['SQL_PROCEDURE_TERM', 'function' ], ## for now + 39 => ['SQL_SCHEMA_TERM', 'schema' ], + 44 => ['SQL_SCROLL_OPTIONS', 8 ], ## not really for DBD? + 45 => ['SQL_TABLE_TERM', 'table' ], + 46 => ['SQL_TXN_CAPABLE', 2 ], ## SQL_TC_ALL + 72 => ['SQL_TXN_ISOLATION_OPTION', 10 ], ## 2+8 + 47 => ['SQL_USER_NAME', $dbh->{CURRENT_USER} ], + +## Supported SQL + + 169 => ['SQL_AGGREGATE_FUNCTIONS', 127 ], ## all of 'em + 117 => ['SQL_ALTER_DOMAIN', 31 ], ## all but deferred + 86 => ['SQL_ALTER_TABLE', 32639 ], ## no collate + 114 => ['SQL_CATALOG_LOCATION', 0 ], + 10003 => ['SQL_CATALOG_NAME', 'N' ], + 41 => ['SQL_CATALOG_NAME_SEPARATOR', '' ], + 92 => ['SQL_CATALOG_USAGE', 0 ], + 87 => ['SQL_COLUMN_ALIAS', 'Y' ], + 74 => ['SQL_CORRELATION_NAME', 2 ], ## SQL_CN_ANY + 127 => ['SQL_CREATE_ASSERTION', 0 ], + 128 => ['SQL_CREATE_CHARACTER_SET', 0 ], + 129 => ['SQL_CREATE_COLLATION', 0 ], + 130 => ['SQL_CREATE_DOMAIN', 23 ], ## no collation, no defer + 131 => ['SQL_CREATE_SCHEMA', 3 ], ## 1+2 schema + authorize + 132 => ['SQL_CREATE_TABLE', 13845 ], ## no collation + 133 => ['SQL_CREATE_TRANSLATION', 0 ], + 134 => ['SQL_CREATE_VIEW', 9 ], ## local + create? + 119 => ['SQL_DATETIME_LITERALS', 65535 ], ## all? + 170 => ['SQL_DDL_INDEX', 3 ], ## create + drop + 136 => ['SQL_DROP_ASSERTION', 0 ], + 137 => ['SQL_DROP_CHARACTER_SET', 0 ], + 138 => ['SQL_DROP_COLLATION', 0 ], + 139 => ['SQL_DROP_DOMAIN', 7 ], + 140 => ['SQL_DROP_SCHEMA', 7 ], + 141 => ['SQL_DROP_TABLE', 7 ], + 142 => ['SQL_DROP_TRANSLATION', 0 ], + 143 => ['SQL_DROP_VIEW', 7 ], + 27 => ['SQL_EXPRESSIONS_IN_ORDERBY', 'Y' ], + 88 => ['SQL_GROUP_BY', 2 ], ## GROUP_BY_CONTAINS_SELECT + 28 => ['SQL_IDENTIFIER_CASE', 2 ], ## SQL_IC_LOWER + 29 => ['SQL_IDENTIFIER_QUOTE_CHAR', q{"} ], + 148 => ['SQL_INDEX_KEYWORDS', 0 ], ## not needed for Pg + 172 => ['SQL_INSERT_STATEMENT', 7 ], ## 1+2+4 = all + 73 => ['SQL_INTEGRITY', 'Y' ], ## e.g. ON DELETE CASCADE? + 89 => ['SQL_KEYWORDS', 'KEYWORDS' ], ## magic word + 113 => ['SQL_LIKE_ESCAPE_CLAUSE', 'Y' ], + 75 => ['SQL_NON_NULLABLE_COLUMNS', 1 ], ## NNC_NOT_NULL + 115 => ['SQL_OJ_CAPABILITIES', 127 ], ## all + 90 => ['SQL_ORDER_BY_COLUMNS_IN_SELECT', 'N' ], + 38 => ['SQL_OUTER_JOINS', 'Y' ], + 21 => ['SQL_PROCEDURES', 'Y' ], + 93 => ['SQL_QUOTED_IDENTIFIER_CASE', 3 ], ## SQL_IC_SENSITIVE + 91 => ['SQL_SCHEMA_USAGE', 31 ], ## all + 94 => ['SQL_SPECIAL_CHARACTERS', '$' ], ## there are actually many more... + 118 => ['SQL_SQL_CONFORMANCE', 4 ], ## SQL92_INTERMEDIATE ?? + 95 => ['SQL_SUBQUERIES', 31 ], ## all + 96 => ['SQL_UNION', 3 ], ## 1+2 = all + +## SQL limits + + 112 => ['SQL_MAX_BINARY_LITERAL_LEN', 0 ], + 34 => ['SQL_MAX_CATALOG_NAME_LEN', 0 ], + 108 => ['SQL_MAX_CHAR_LITERAL_LEN', 0 ], + 30 => ['SQL_MAX_COLUMN_NAME_LEN', 'NAMEDATALEN' ], ## magic word + 97 => ['SQL_MAX_COLUMNS_IN_GROUP_BY', 0 ], + 98 => ['SQL_MAX_COLUMNS_IN_INDEX', 0 ], + 99 => ['SQL_MAX_COLUMNS_IN_ORDER_BY', 0 ], + 100 => ['SQL_MAX_COLUMNS_IN_SELECT', 0 ], + 101 => ['SQL_MAX_COLUMNS_IN_TABLE', 250 ], ## 250-1600 (depends on column types) + 31 => ['SQL_MAX_CURSOR_NAME_LEN', 'NAMEDATALEN' ], ## magic word + 10005 => ['SQL_MAX_IDENTIFIER_LEN', 'NAMEDATALEN' ], ## magic word + 102 => ['SQL_MAX_INDEX_SIZE', 0 ], + 102 => ['SQL_MAX_PROCEDURE_NAME_LEN', 'NAMEDATALEN' ], ## magic word + 104 => ['SQL_MAX_ROW_SIZE', 0 ], ## actually 1.6 TB, but too big to represent here + 103 => ['SQL_MAX_ROW_SIZE_INCLUDES_LONG', 'Y' ], + 32 => ['SQL_MAX_SCHEMA_NAME_LEN', 'NAMEDATALEN' ], ## magic word + 105 => ['SQL_MAX_STATEMENT_LEN', 0 ], + 35 => ['SQL_MAX_TABLE_NAME_LEN', 'NAMEDATALEN' ], ## magic word + 106 => ['SQL_MAX_TABLES_IN_SELECT', 0 ], + 107 => ['SQL_MAX_USER_NAME_LEN', 'NAMEDATALEN' ], ## magic word + +## Scalar function information + + 48 => ['SQL_CONVERT_FUNCTIONS', 2 ], ## CVT_CAST only? + 49 => ['SQL_NUMERIC_FUNCTIONS', 16777215 ], ## ?? all but some naming clashes: rand(om), trunc(ate), log10=ln, etc. + 50 => ['SQL_STRING_FUNCTIONS', 16280984 ], ## ?? + 51 => ['SQL_SYSTEM_FUNCTIONS', 0 ], ## ?? + 109 => ['SQL_TIMEDATE_ADD_INTERVALS', 0 ], ## ?? no explicit timestampadd? + 110 => ['SQL_TIMEDATE_DIFF_INTERVALS', 0 ], ## ?? + 52 => ['SQL_TIMEDATE_FUNCTIONS', 1966083 ], + +## Conversion information - all but BIT, LONGVARBINARY, and LONGVARCHAR + + 53 => ['SQL_CONVERT_BIGINT', 1830399 ], + 54 => ['SQL_CONVERT_BINARY', 1830399 ], + 55 => ['SQL_CONVERT_BIT', 0 ], + 56 => ['SQL_CONVERT_CHAR', 1830399 ], + 57 => ['SQL_CONVERT_DATE', 1830399 ], + 58 => ['SQL_CONVERT_DECIMAL', 1830399 ], + 59 => ['SQL_CONVERT_DOUBLE', 1830399 ], + 60 => ['SQL_CONVERT_FLOAT', 1830399 ], + 61 => ['SQL_CONVERT_INTEGER', 1830399 ], + 123 => ['SQL_CONVERT_INTERVAL_DAY_TIME', 1830399 ], + 124 => ['SQL_CONVERT_INTERVAL_YEAR_MONTH', 1830399 ], + 71 => ['SQL_CONVERT_LONGVARBINARY', 0 ], + 62 => ['SQL_CONVERT_LONGVARCHAR', 0 ], + 63 => ['SQL_CONVERT_NUMERIC', 1830399 ], + 64 => ['SQL_CONVERT_REAL', 1830399 ], + 65 => ['SQL_CONVERT_SMALLINT', 1830399 ], + 66 => ['SQL_CONVERT_TIME', 1830399 ], + 67 => ['SQL_CONVERT_TIMESTAMP', 1830399 ], + 68 => ['SQL_CONVERT_TINYINT', 1830399 ], + 69 => ['SQL_CONVERT_VARBINARY', 0 ], + 70 => ['SQL_CONVERT_VARCHAR', 1830399 ], + 122 => ['SQL_CONVERT_WCHAR', 0 ], + 125 => ['SQL_CONVERT_WLONGVARCHAR', 0 ], + 126 => ['SQL_CONVERT_WVARCHAR', 0 ], + + ); ## end of %type + + ## Put both numbers and names into a hash + my %t; + for (keys %type) { + $t{$_} = $type{$_}->[1]; + $t{$type{$_}->[0]} = $type{$_}->[1]; + } + + return undef unless exists $t{$type}; + + my $ans = $t{$type}; + + if ($ans eq 'NAMEDATALEN') { + return $dbh->selectall_arrayref('SHOW max_identifier_length')->[0][0]; + } + elsif ($ans eq 'ODBCVERSION') { + my $version = $dbh->{private_dbdpg}{version}; + return '00.00.0000' unless $version =~ /^(\d\d?)(\d\d)(\d\d)$/o; + return sprintf '%02d.%02d.%.2d00', $1,$2,$3; + } + elsif ($ans eq 'DBDVERSION') { + my $simpleversion = $DBD::Pg::VERSION; + $simpleversion =~ s/_/./g; + return sprintf '%02d.%02d.%1d%1d%1d%1d', split (/\./, "$simpleversion.0.0.0.0.0.0"); + } + elsif ($ans eq 'MAXCONNECTIONS') { + return $dbh->selectall_arrayref('SHOW max_connections')->[0][0]; + } + elsif ($ans eq 'ENCODING') { + return $dbh->selectall_arrayref('SHOW server_encoding')->[0][0]; + } + elsif ($ans eq 'KEYWORDS') { + ## http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html + ## Basically, we want ones that are 'reserved' for PostgreSQL but not 'reserved' in SQL:2003 + ## + return join ',' => (qw(ANALYSE ANALYZE ASC DEFERRABLE DESC DO FREEZE ILIKE INITIALLY ISNULL LIMIT NOTNULL OFF OFFSET PLACING RETURNING VERBOSE)); + } + elsif ($ans eq 'CURRENTDB') { + return $dbh->selectall_arrayref('SELECT pg_catalog.current_database()')->[0][0]; + } + elsif ($ans eq 'READONLY') { + my $SQL = q{SELECT CASE WHEN setting = 'on' THEN 'Y' ELSE 'N' END FROM pg_settings WHERE name = 'transaction_read_only'}; + my $info = $dbh->selectall_arrayref($SQL); + return defined $info->[0] ? $info->[0][0] : 'N'; + } + elsif ($ans eq 'DEFAULTTXN') { + my $SQL = q{SELECT CASE WHEN setting = 'read committed' THEN 2 ELSE 8 END FROM pg_settings WHERE name = 'default_transaction_isolation'}; + my $info = $dbh->selectall_arrayref($SQL); + return defined $info->[0] ? $info->[0][0] : 2; + } + + return $ans; + } # end of get_info + + sub private_attribute_info { + return { + pg_async_status => undef, + pg_bool_tf => undef, + pg_db => undef, + pg_default_port => undef, + pg_enable_utf8 => undef, + pg_errorlevel => undef, + pg_expand_array => undef, + pg_host => undef, + pg_INV_READ => undef, + pg_INV_WRITE => undef, + pg_lib_version => undef, + pg_options => undef, + pg_pass => undef, + pg_pid => undef, + pg_placeholder_dollaronly => undef, + pg_port => undef, + pg_prepare_now => undef, + pg_protocol => undef, + pg_server_prepare => undef, + pg_server_version => undef, + pg_socket => undef, + pg_standard_conforming_strings => undef, + pg_user => undef, + }; + } +} + + +{ + package DBD::Pg::st; + + sub parse_trace_flag { + my ($h, $flag) = @_; + return DBD::Pg->parse_trace_flag($flag); + } + + sub bind_param_array { + + ## Binds an array of data to a specific placeholder in a statement + ## The DBI version is broken, so we implement a near-copy here + + my $sth = shift; + my ($p_id, $value_array, $attr) = @_; + + ## Bail if the second arg is not undef or an an arrayref + return $sth->set_err(1, "Value for parameter $p_id must be a scalar or an arrayref, not a ".ref($value_array)) + if defined $value_array and ref $value_array and ref $value_array ne 'ARRAY'; + + ## Bail if the first arg is not a number + return $sth->set_err(1, q{Can't use named placeholders for non-driver supported bind_param_array}) + unless DBI::looks_like_number($p_id); # because we rely on execute(@ary) here + + ## Store the list of items in the hash (will be undef or an arayref) + $sth->{ParamArrays}{$p_id} = $value_array; + + ## If any attribs were passed in, we need to call bind_param + return $sth->bind_param($p_id, '', $attr) if $attr; ## This is the big change so -w does not complain + + return 1; + } ## end bind_param_array + + sub private_attribute_info { + return { + pg_async => undef, + pg_bound => undef, + pg_current_row => undef, + pg_direct => undef, + pg_numbound => undef, + pg_cmd_status => undef, + pg_oid_status => undef, + pg_placeholder_dollaronly => undef, + pg_prepare_name => undef, + pg_prepare_now => undef, + pg_segments => undef, + pg_server_prepare => undef, + pg_size => undef, + pg_type => undef, + }; + } + +} ## end st section + +1; + +__END__ + +=head1 NAME + +DBD::Pg - PostgreSQL database driver for the DBI module + +=head1 SYNOPSIS + + use DBI; + + $dbh = DBI->connect("dbi:Pg:dbname=$dbname", '', '', {AutoCommit => 0}); + # The AutoCommit attribute should always be explicitly set + + # For some advanced uses you may need PostgreSQL type values: + use DBD::Pg qw(:pg_types); + + # For asynchronous calls, import the async constants: + use DBD::Pg qw(:async); + + $dbh->do('INSERT INTO mytable(a) VALUES (1)'); + + $sth = $dbh->prepare('INSERT INTO mytable(a) VALUES (?)'); + $sth->execute(); + +=head1 VERSION + +This documents version 2.16.1 of the DBD::Pg module + +=head1 DESCRIPTION + +DBD::Pg is a Perl module that works with the DBI module to provide access to +PostgreSQL databases. + +=head1 MODULE DOCUMENTATION + +This documentation describes driver specific behavior and restrictions. It is +not supposed to be used as the only reference for the user. In any case +consult the B<DBI> documentation first! + +=for html <a href="http://search.cpan.org/~timb/DBI/DBI.pm">Latest DBI docmentation.</a> + +=head1 THE DBI CLASS + +=head2 DBI Class Methods + +=head3 B<connect> + +This method creates a database handle by connecting to a database, and is the DBI +equivalent of the "new" method. To connect to a Postgres database with a minimum of parameters, +use the following syntax: + + $dbh = DBI->connect("dbi:Pg:dbname=$dbname", '', '', {AutoCommit => 0}); + +This connects to the database named in the C<$dbname> variable on the default port (usually 5432) +without any user authentication. + +The following connect statement shows almost all possible parameters: + + $dbh = DBI->connect("dbi:Pg:dbname=$dbname;host=$host;port=$port;options=$options", + $username, + $password, + {AutoCommit => 0, RaiseError => 1, PrintError => 0} + ); + +If a parameter is not given, the connect() method will first look for +specific environment variables, and then fall back to hard-coded defaults: + + parameter environment variable hard coded default + ------------------------------------------------------ + host PGHOST local domain socket + hostaddr PGHOSTADDR local domain socket + port PGPORT 5432 + dbname* PGDATABASE current userid + username PGUSER current userid + password PGPASSWORD (none) + options PGOPTIONS (none) + service PGSERVICE (none) + sslmode PGSSLMODE (none) + +* May also use the aliases C<db> or C<database> + +If the username and password values passed via C<connect()> are undefined (as opposed +to merely being empty strings), DBI will use the environment variables I<DBI_USER> +and I<DBI_PASS> if they exist. + +You can also connect by using a service connection file, which is named +F<pg_service.conf>. The location of this file can be controlled by +setting the I<PGSYSCONFDIR> environment variable. To use one of the named +services within the file, set the name by using either the I<service> parameter +or the environment variable I<PGSERVICE>. Note that when connecting this way, +only the minimum parameters should be used. For example, to connect to a +service named "zephyr", you could use: + + $dbh = DBI->connect("dbi:Pg:service=zephyr", '', ''); + +You could also set C<$ENV{PGSERVICE}> to "zephyr" and connect like this: + + $dbh = DBI->connect("dbi:Pg:", '', ''); + +The format of the F<pg_service.conf> file is simply a bracketed service +name, followed by one parameter per line in the format name=value. +For example: + + [zephyr] + dbname=winds + user=wisp + password=W$2Hc00YSgP + port=6543 + +There are four valid arguments to the I<sslmode> parameter, which controls +whether to use SSL to connect to the database: + +=over 4 + +=item * disable: SSL connections are never used + +=item * allow: try non-SSL, then SSL + +=item * prefer: try SSL, then non-SSL + +=item * require: connect only with SSL + +=back + +You can also connect using sockets in a specific directory. This +may be needed if the server you are connecting to has a different +default socket directory from the one used to compile DBD::Pg. +Use the complete path to the socket directory as the name of the +host, like this: + + $dbh = DBI->connect('dbi:Pg:dbname=foo;host=/var/tmp/socket', + $username, + $password, + {AutoCommit => 0, RaiseError => 1}); + +The attribute hash can also contain a key named C<dbd_verbose>, which +simply calls C<< $dbh->trace('DBD') >> after the handle is created. This attribute +is not recommended, as it is clearer to simply explicitly call C<trace> explicitly +in your script. + +=head3 B<connect_cached> + + $dbh = DBI->connect_cached("dbi:Pg:dbname=$dbname", $username, $password, \%options); + +Implemented by DBI, no driver-specific impact. + +=head3 B<data_sources> + + @data_sources = DBI->data_sources('Pg'); + @data_sources = $dbh->data_sources(); + +Returns a list of available databases. Unless the environment variable C<DBI_DSN> is set, +a connection will be attempted to the database C<template1>. The normal connection +environment variables also apply, such as C<PGHOST>, C<PGPORT>, C<DBI_USER>, +C<DBI_PASS>, and C<PGSERVICE>. + +You can also pass in options to add to the connection string For example, to specify +an alternate port and host: + + @data_sources = DBI->data_sources('Pg', 'port=5824;host=example.com'); + + or: + + @data_sources = $dbh->data_sources('port=5824;host=example.com'); + + +=head2 Methods Common To All Handles + +For all of the methods below, B<$h> can be either a database handle (B<$dbh>) +or a statement handle (B<$sth>). Note that I<$dbh> and I<$sth> can be replaced with +any variable name you choose: these are just the names most often used. Another +common variable used in this documentation is $I<rv>, which stands for "return value". + +=head3 B<err> + + $rv = $h->err; + +Returns the error code from the last method called. For the connect method it returns +C<PQstatus>, which is a number used by I<libpq> (the Postgres connection library). A value of 0 +indicates no error (CONNECTION_OK), while any other number indicates a failed connection. The +only other number commonly seen is 1 (CONNECTION_BAD). See the libpq documentation for the +complete list of return codes. + +In all other non-connect methods C<< $h->err >> returns the C<PQresultStatus> of the current +handle. This is a number used by libpq and is one of: + + 0 Empty query string + 1 A command that returns no data successfully completed. + 2 A command that returns data sucessfully completed. + 3 A COPY OUT command is still in progress. + 4 A COPY IN command is still in progress. + 5 A bad response was received from the backend. + 6 A nonfatal error occurred (a notice or warning message) + 7 A fatal error was returned: the last query failed. + +=head3 B<errstr> + + $str = $h->errstr; + +Returns the last error that was reported by Postgres. This message is affected +by the L</pg_errorlevel> setting. + +=head3 B<state> + + $str = $h->state; + +Returns a five-character "SQLSTATE" code. Success is indicated by a C<00000> code, which +gets mapped to an empty string by DBI. A code of C<S8006> indicates a connection failure, +usually because the connection to the Postgres server has been lost. + +While this method can be called as either C<< $sth->state >> or C<< $dbh->state >>, it +is usually clearer to always use C<< $dbh->state >>. + +The list of codes used by PostgreSQL can be found at: +L<http://www.postgresql.org/docs/current/static/errcodes-appendix.html> + +Note that these codes are part of the SQL standard and only a small number +of them will be used by PostgreSQL. + +Common codes: + + 00000 Successful completion + 25P01 No active SQL transaction + 25P02 In failed SQL transaction + S8006 Connection failure + +=head3 B<trace> + + $h->trace($trace_settings); + $h->trace($trace_settings, $trace_filename); + $trace_settings = $h->trace; + +Changes the trace settings on a database or statement handle. +The optional second argument specifies a file to write the +trace information to. If no filename is given, the information +is written to F<STDERR>. Note that tracing can be set globally as +well by setting C<< DBI->trace >>, or by using the environment +variable I<DBI_TRACE>. + +The value is either a numeric level or a named flag. For the +flags that DBD::Pg uses, see L<parse_trace_flag|/parse_trace_flag and parse_trace_flags>. + +=head3 B<trace_msg> + + $h->trace_msg($message_text); + $h->trace_msg($message_text, $min_level); + +Writes a message to the current trace output (as set by the L</trace> method). If a second argument +is given, the message is only written if the current tracing level is equal to or greater than +the C<$min_level>. + +=head3 B<parse_trace_flag> and B<parse_trace_flags> + + $h->trace($h->parse_trace_flags('SQL|pglibpq')); + $h->trace($h->parse_trace_flags('1|pgstart')); + + ## Simpler: + $h->trace('SQL|pglibpq'); + $h->trace('1|pgstart'); + + my $value = DBD::Pg->parse_trace_flag('pglibpq'); + DBI->trace($value); + +The parse_trace_flags method is used to convert one or more named +flags to a number which can passed to the L</trace> method. +DBD::Pg currently supports the DBI-specific flag, C<SQL>, +as well as the ones listed below. + +Flags can be combined by using the parse_trace_flags method, +which simply calls C<parse_trace_flag> on each item and +combines them. + +Sometimes you may wish to turn the tracing on before you connect +to the database. The second example above shows a way of doing this: +the call to C<< DBD::Pg->parse_trace_flags >> provides a number than can +be fed to C<< DBI->trace >> before you create a database handle. + +DBD::Pg supports the following trace flags: + +=over 4 + +=item SQL + +Outputs all SQL statements. Note that the output provided will not +necessarily be in a form suitable to passing directly to Postgres, +as server-side prepared statements are used extensively by DBD::Pg. +For maximum portability of output (but with a potential performance +hit), use with C<< $dbh->{pg_server_prepare} = 0 >>. + +=item DBD + +Turns on all non-DBI flags, in other words, only the ones that are specific +to DBD::Pg (all those below which start with the letters 'pg'). + +=item pglibpq + +Outputs the name of each libpq function (without arguments) immediately +before running it. This is a good way to trace the flow of your program +at a low level. This information is also output if the trace level +is set to 4 or greater. + +=item pgstart + +Outputs the name of each internal DBD::Pg function, and other information such as +the function arguments or important global variables, as each function starts. This +information is also output if the trace level is set to 4 or greater. + +=item pgend + +Outputs a simple message at the very end of each internal DBD::Pg function. This is also +output if the trace level is set to 4 or greater. + +=item pgprefix + +Forces each line of trace output to begin with the string B<C<dbdpg: >>. This helps to +differentiate it from the normal DBI trace output. + +=item pglogin + +Outputs a message showing the connection string right before a new database connection +is attempted, a message when the connection was successful, and a message right after +the database has been disconnected. Also output if trace level is 5 or greater. + +=back + +=for text See the DBI section on TRACING for more information. + +=for html See the <a href="http://search.cpan.org/~timb/DBI/DBI.pm#TRACING">DBI section on TRACING</a> for more information.<br /> + +=head3 B<func> + +DBD::Pg uses the C<func> method to support a variety of functions. +Note that the name of the function comes I<last>, after the arguments. + +=over + +=item table_attributes + + $attrs = $dbh->func($table, 'table_attributes'); + +Use of the tables_attributes function is no longer recommended. Instead, +you can use the more portable C<column_info> and C<primary_key> methods +to access the same information. + +The table_attributes method returns, for the given table argument, a +reference to an array of hashes, each of which contains the following keys: + + NAME attribute name + TYPE attribute type + SIZE attribute size (-1 for variable size) + NULLABLE flag nullable + DEFAULT default value + CONSTRAINT constraint + PRIMARY_KEY flag is_primary_key + REMARKS attribute description + +=item pg_lo_creat + + $lobjId = $dbh->pg_lo_creat($mode); + +Creates a new large object and returns the object-id. C<$mode> is a bitmask +describing read and write access to the new object. This setting is ignored +since Postgres version 8.1. For backwards compatibility, however, you should +set a valid mode anyway (see L</pg_lo_open> for a list of valid modes). + +Upon failure it returns C<undef>. This function cannot be used if AutoCommit is enabled. + +The old way of calling large objects functions is deprecated: $dbh->func(.., 'lo_); + +=item lo_open + + $lobj_fd = $dbh->pg_lo_open($lobjId, $mode); + +Opens an existing large object and returns an object-descriptor for use in +subsequent C<lo_*> calls. C<$mode> is a bitmask describing read and write +access to the opened object. It may be one of: + + $dbh->{pg_INV_READ} + $dbh->{pg_INV_WRITE} + $dbh->{pg_INV_READ} | $dbh->{pg_INV_WRITE} + +C<pg_INV_WRITE> and C<pg_INV_WRITE | pg_INV_READ> modes are identical; in +both modes, the large object can be read from or written to. +Reading from the object will provide the object as written in other committed +transactions, along with any writes performed by the current transaction. +Objects opened with C<pg_INV_READ> cannot be written to. Reading from this +object will provide the stored data at the time of the transaction snapshot +which was active when C<lo_write> was called. + +Returns C<undef> upon failure. Note that 0 is a perfectly correct (and common) +object descriptor! This function cannot be used if AutoCommit is enabled. + +=item lo_write + + $nbytes = $dbh->pg_lo_write($lobj_fd, $buffer, $len); + +Writes C<$len> bytes of c<$buffer> into the large object C<$lobj_fd>. Returns the number +of bytes written and C<undef> upon failure. This function cannot be used if AutoCommit is enabled. + +=item lo_read + + $nbytes = $dbh->pg_lo_read($lobj_fd, $buffer, $len); + +Reads C<$len> bytes into c<$buffer> from large object C<$lobj_fd>. Returns the number of +bytes read and C<undef> upon failure. This function cannot be used if AutoCommit is enabled. + +=item lo_lseek + + $loc = $dbh->pg_lo_lseek($lobj_fd, $offset, $whence); + +Changes the current read or write location on the large object +C<$obj_id>. Currently C<$whence> can only be 0 (which is L_SET). Returns the current +location and C<undef> upon failure. This function cannot be used if AutoCommit is enabled. + +=item lo_tell + + $loc = $dbh->pg_lo_tell($lobj_fd); + +Returns the current read or write location on the large object C<$lobj_fd> and C<undef> upon failure. +This function cannot be used if AutoCommit is enabled. + +=item lo_close + + $lobj_fd = $dbh->pg_lo_close($lobj_fd); + +Closes an existing large object. Returns true upon success and false upon failure. +This function cannot be used if AutoCommit is enabled. + +=item lo_unlink + + $ret = $dbh->pg_lo_unlink($lobjId); + +Deletes an existing large object. Returns true upon success and false upon failure. +This function cannot be used if AutoCommit is enabled. + +=item lo_import + + + $lobjId = $dbh->pg_lo_import($filename); + +Imports a Unix file as a large object and returns the object id of the new +object or C<undef> upon failure. + +=item lo_export + + $ret = $dbh->pg_lo_export($lobjId, $filename); + +Exports a large object into a Unix file. Returns false upon failure, true otherwise. + +=item getfd + + $fd = $dbh->func('getfd'); + +Deprecated, use L<< $dbh->{pg_socket}|/pg_socket >> instead. + +=back + +=head3 B<private_attribute_info> + + $hashref = $dbh->private_attribute_info(); + $hashref = $sth->private_attribute_info(); + +Returns a hash of all private attributes used by DBD::Pg, for either +a database or a statement handle. Currently, all the hash values are undef. + +=head1 ATTRIBUTES COMMON TO ALL HANDLES + +=head3 B<InactiveDestroy> (boolean) + +If set to true, then the L</disconnect> method will not be automatically called when +the database handle goes out of scope. This is required if you are forking, and even +then you must tread carefully and ensure that either the parent or the child (but not +both!) handles all database calls from that point forwards, so that messages from the +Postgres backend are only handled by one of the processes. If you don't set things up +properly, you will see messages such as "I<server closed the connection unexpectedly>", +and "I<message type 0x32 arrived from server while idle>". The best solution is to either +have the child process reconnect to the database with a fresh database handle, or to +rewrite your application not to use use forking. See the section on L</Asynchronous Queries> +for a way to have your script continue to work while the database is processing a request. + +=head3 B<RaiseError> (boolean, inherited) + +Forces errors to always raise an exception. Although it defaults to off, it is recommended that this +be turned on, as the alternative is to check the return value of every method (prepare, execute, fetch, etc.) +manually, which is easy to forget to do. + +=head3 B<PrintError> (boolean, inherited) + +Forces database errors to also generate warnings, which can then be filtered with methods such as +locally redefining I<$SIG{__WARN__}> or using modules such as C<CGI::Carp>. This attribute is on +by default. + +=head3 B<ShowErrorStatement> (boolean, inherited) + +Appends information about the current statement to error messages. If placeholder information +is available, adds that as well. Defaults to false. + +=head3 B<Warn> (boolean, inherited) + +Enables warnings. This is on by default, and should only be turned off in a local block +for a short a time only when absolutely needed. + +=head3 B<Executed> (boolean, read-only) + +Indicates if a handle has been executed. For database handles, this value is true after the L</do> method has been called, or +when one of the child statement handles has issued an L</execute>. Issuing a L</commit> or L</rollback> always resets the +attribute to false for database handles. For statement handles, any call to L</execute> or its variants will flip the value to +true for the lifetime of the statement handle. + +=head3 B<TraceLevel> (integer, inherited) + +Sets the trace level, similar to the L</trace> method. See the sections on +L</trace> and L</parse_trace_flag> for more details. + +=head3 B<Active> (boolean, read-only) + +Indicates if a handle is active or not. For database handles, this indicates if the database has +been disconnected or not. For statement handles, it indicates if all the data has been fetched yet +or not. Use of this attribute is not encouraged. + +=head3 B<Kids> (integer, read-only) + +Returns the number of child processes created for each handle type. For a driver handle, indicates the number +of database handles created. For a database handle, indicates the number of statement handles created. For +statement handles, it always returns zero, because statement handles do not create kids. + +=head3 B<ActiveKids> (integer, read-only) + +Same as C<Kids>, but only returns those that are active. + +=head3 B<CachedKids> (hash ref) + +Returns a hashref of handles. If called on a database handle, returns all statement handles created by use of the +C<prepare_cached> method. If called on a driver handle, returns all database handles created by the L</connect_cached> +method. + +=head3 B<ChildHandles> (array ref) + +Implemented by DBI, no driver-specific impact. + +=head3 B<PrintWarn> (boolean, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<HandleError> (boolean, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<HandleSetErr> (code ref, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<ErrCount> (unsigned integer) + +Implemented by DBI, no driver-specific impact. + +=head3 B<FetchHashKeyName> (string, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<ChopBlanks> (boolean, inherited) + +Supported by DBD::Pg as proposed by DBI. This method is similar to the +SQL function C<RTRIM>. + +=head3 B<Taint> (boolean, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<TaintIn> (boolean, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<TaintOut> (boolean, inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<Profile> (inherited) + +Implemented by DBI, no driver-specific impact. + +=head3 B<Type> (scalar) + +Returns C<dr> for a driver handle, C<db> for a database handle, and C<st> for a statement handle. +Should be rarely needed. + +=head3 B<LongReadLen> + +Not used by DBD::Pg + +=head3 B<LongTruncOk> + +Not used by DBD::Pg + +=head3 B<CompatMode> + +Not used by DBD::Pg + +=head1 DBI DATABASE HANDLE OBJECTS + +=head2 Database Handle Methods + +=head3 B<selectall_arrayref> + + $ary_ref = $dbh->selectall_arrayref($sql); + $ary_ref = $dbh->selectall_arrayref($sql, \%attr); + $ary_ref = $dbh->selectall_arrayref($sql, \%attr, @bind_values); + +Returns a reference to an array containing the rows returned by preparing and executing the SQL string. +See the DBI documentation for full details. + +=head3 B<selectall_hashref> + + $hash_ref = $dbh->selectall_hashref($sql, $key_field); + +Returns a reference to a hash containing the rows returned by preparing and executing the SQL string. +See the DBI documentation for full details. + +=head3 B<selectcol_arrayref> + + $ary_ref = $dbh->selectcol_arrayref($sql, \%attr, @bind_values); + +Returns a reference to an array containing the first column +from each rows returned by preparing and executing the SQL string. It is possible to specify exactly +which columns to return. See the DBI documentation for full details. + +=head3 B<prepare> + + $sth = $dbh->prepare($statement, \%attr); + +WARNING: DBD::Pg now (as of version 1.40) uses true prepared statements by sending them +to the backend to be prepared by the Postgres server. Statements +that were legal before may no longer work. See below for details. + +The prepare method prepares a statement for later execution. PostgreSQL supports +prepared statements, which enables DBD::Pg to only send the query once, and +simply send the arguments for every subsequent call to L</execute>. +DBD::Pg can use these server-side prepared statements, or it can +just send the entire query to the server each time. The best way +is automatically chosen for each query. This will be sufficient for +most users: keep reading for a more detailed explanation and some +optional flags. + +Queries that do not begin with the word "SELECT", "INSERT", +"UPDATE", or "DELETE" are never sent as server-side prepared statements. + +Deciding whether or not to use prepared statements depends on many factors, +but you can force them to be used or not used by using the +L</pg_server_prepare> attribute when calling L</prepare>. Setting this to "0" means to never use +prepared statements. Setting L</pg_server_prepare> to "1" means that prepared +statements should be used whenever possible. This is the default when connected +to Postgres servers version 8.0 or higher. Servers that are version 7.4 get a special +default value of "2", because server-side statements were only partially supported +in that version. In this case, it only uses server-side prepares if all +parameters are specifically bound. + +The L</pg_server_prepare> attribute can also be set at connection time like so: + + $dbh = DBI->connect($DBNAME, $DBUSER, $DBPASS, + { AutoCommit => 0, + RaiseError => 1, + pg_server_prepare => 0, + }); + +or you may set it after your database handle is created: + + $dbh->{pg_server_prepare} = 1; + +To enable it for just one particular statement: + + $sth = $dbh->prepare("SELECT id FROM mytable WHERE val = ?", + { pg_server_prepare => 1 }); + +You can even toggle between the two as you go: + + $sth->{pg_server_prepare} = 1; + $sth->execute(22); + $sth->{pg_server_prepare} = 0; + $sth->execute(44); + $sth->{pg_server_prepare} = 1; + $sth->execute(66); + +In the above example, the first execute will use the previously prepared statement. +The second execute will not, but will build the query into a single string and send +it to the server. The third one will act like the first and only send the arguments. +Even if you toggle back and forth, a statement is only prepared once. + +Using prepared statements is in theory quite a bit faster: not only does the +PostgreSQL backend only have to prepare the query only once, but DBD::Pg no +longer has to worry about quoting each value before sending it to the server. + +However, there are some drawbacks. The server cannot always choose the ideal +parse plan because it will not know the arguments before hand. But for most +situations in which you will be executing similar data many times, the default +plan will probably work out well. Programs such as PgBouncer which cache connections +at a low level should not use prepared statements via DBD::Pg, or must take +extra care in the application to account for the fact that prepared statements +are not shared across database connections. Further discussion on this subject is beyond +the scope of this documentation: please consult the pgsql-performance mailing +list, L<http://archives.postgresql.org/pgsql-performance/> + +Only certain commands will be sent to a server-side prepare: currently these +include C<SELECT>, C<INSERT>, C<UPDATE>, and C<DELETE>. DBD::Pg uses a simple +naming scheme for the prepared statements themselves: B<dbdpg_XY_Z>, where B<Y> is the current +PID, B<X> is either 'p' or 'n' (depending on if the PID is a positive or negative +number), and B<Z> is a number that starts at 1 and increases each time a new statement +is prepared. This number is tracked at the database handle level, so multiple +statement handles will not collide. + +You cannot send more than one command at a time in the same prepare command +(by separating them with semi-colons) when using server-side prepares. + +The actual C<PREPARE> is usually not performed until the first execute is called, due +to the fact that information on the data types (provided by L</bind_param>) may +be provided after the prepare but before the execute. + +A server-side prepare may happen before the first L</execute>, but only if the server can +handle the server-side prepare, and the statement contains no placeholders. It will +also be prepared if the L</pg_prepare_now> attribute is passed in and set to a true +value. Similarly, the L</pg_prepare_now> attribute can be set to 0 to ensure that +the statement is B<not> prepared immediately, although the cases in which you would +want this are very rare. Finally, you can set the default behavior of all prepare +statements by setting the L</pg_prepare_now> attribute on the database handle: + + $dbh->{pg_prepare_now} = 1; + +The following two examples will be prepared right away: + + $sth->prepare("SELECT 123"); ## no placeholders + + $sth->prepare("SELECT 123, ?", {pg_prepare_now => 1}); + +The following two examples will NOT be prepared right away: + + $sth->prepare("SELECT 123, ?"); ## has a placeholder + + $sth->prepare("SELECT 123", {pg_prepare_now => 0}); + +There are times when you may want to prepare a statement yourself. To do this, +simply send the C<PREPARE> statement directly to the server (e.g. with +the L</do> method). Create a statement handle and set the prepared name via +the L</pg_prepare_name> attribute. The statement handle can be created with a dummy +statement, as it will not be executed. However, it should have the same +number of placeholders as your prepared statement. Example: + + $dbh->do('PREPARE mystat AS SELECT COUNT(*) FROM pg_class WHERE reltuples < ?'); + $sth = $dbh->prepare('SELECT ?'); + $sth->bind_param(1, 1, SQL_INTEGER); + $sth->{pg_prepare_name} = 'mystat'; + $sth->execute(123); + +The above will run the equivalent of this query on the backend: + + EXECUTE mystat(123); + +which is the equivalent of: + + SELECT COUNT(*) FROM pg_class WHERE reltuples < 123; + +You can force DBD::Pg to send your query directly to the server by adding +the L</pg_direct> attribute to your prepare call. This is not recommended, +but is added just in case you need it. + +=head4 B<Placeholders> + +There are three types of placeholders that can be used in DBD::Pg. The first is +the "question mark" type, in which each placeholder is represented by a single +question mark character. This is the method recommended by the DBI specs and is the most +portable. Each question mark is internally replaced by a "dollar sign number" in the order +in which they appear in the query (important when using L</bind_param>). + +The method second type of placeholder is "dollar sign numbers". This is the method +that Postgres uses internally and is overall probably the best method to use +if you do not need compatibility with other database systems. DBD::Pg, like +PostgreSQL, allows the same number to be used more than once in the query. +Numbers must start with "1" and increment by one value (but can appear in any order +within the query). If the same number appears more than once in a query, it is treated as a +single parameter and all instances are replaced at once. Examples: + +Not legal: + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages > $2'; # Does not start with 1 + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages BETWEEN $1 AND $3'; # Missing 2 + +Legal: + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages > $1'; + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages BETWEEN $1 AND $2'; + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages BETWEEN $2 AND $1'; # legal but confusing + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages BETWEEN $1 AND $2 AND reltuples > $1'; + + $SQL = 'SELECT count(*) FROM pg_class WHERE relpages > $1 AND reltuples > $1'; + +In the final statement above, DBI thinks there is only one placeholder, so this +statement will replace both placeholders: + + $sth->bind_param(1, 2045); + +While a simple execute with no bind_param calls requires only a single argument as well: + + $sth->execute(2045); + +The final placeholder type is "named parameters" in the format ":foo". While this +syntax is supported by DBD::Pg, its use is discouraged in favor of +dollar-sign numbers. + +The different types of placeholders cannot be mixed within a statement, but you may +use different ones for each statement handle you have. This is confusing at best, so +stick to one style within your program. + +If your queries use operators that contain question marks (e.g. some of the native +Postgres geometric operators) or array slices (e.g. C<data[100:300]>), you can tell +DBD::Pg to ignore any non-dollar sign placeholders by setting the +L</pg_placeholder_dollaronly> attribute at either the database handle or the statement +handle level. Examples: + + $dbh->{pg_placeholder_dollaronly} = 1; + $sth = $dbh->prepare(q{SELECT * FROM mytable WHERE lseg1 ?# lseg2 AND name = $1}); + $sth->execute('segname'); + +Alternatively, you can set it at prepare time: + + $sth = $dbh->prepare(q{SELECT * FROM mytable WHERE lseg1 ?-| lseg2 AND name = $1}, + {pg_placeholder_dollaronly = 1}); + $sth->execute('segname'); + +=head3 B<prepare_cached> + + $sth = $dbh->prepare_cached($statement, \%attr); + +Implemented by DBI, no driver-specific impact. This method is most useful +when using a server that supports server-side prepares, and you have asked +the prepare to happen immediately via the L</pg_prepare_now> attribute. + +=head3 B<do> + + $rv = $dbh->do($statement); + $rv = $dbh->do($statement, \%attr); + $rv = $dbh->do($statement, \%attr, @bind_values); + +Prepare and execute a single statement. Returns the number of rows affected if the +query was successful, returns undef if an error occurred, and returns -1 if the +number of rows is unknown or not available. Note that this method will return B<0E0> instead +of 0 for 'no rows were affected', in order to always return a true value if no error occurred. + +If neither C<\%attr> nor C<@bind_values> is given, the query will be sent directly +to the server without the overhead of internally creating a statement handle and +running prepare and execute, for a measurable speed increase. + +Note that an empty statement (a string with no length) will not be passed to +the server; if you want a simple test, use "SELECT 123" or the L</ping> method. + +=head3 B<last_insert_id> + + $rv = $dbh->last_insert_id(undef, $schema, $table, undef); + $rv = $dbh->last_insert_id(undef, $schema, $table, undef, {sequence => $seqname}); + +Attempts to return the id of the last value to be inserted into a table. +You can either provide a sequence name (preferred) or provide a table +name with optional schema, and DBD::Pg will attempt to find the sequence itself. +The current value of the sequence is returned by a call to the C<CURRVAL()> +PostgreSQL function. This will fail if the sequence has not yet been used in the +current database connection. + +If you do not know the name of the sequence, you can provide a table name and +DBD::Pg will attempt to return the correct value. To do this, there must be at +least one column in the table with a C<NOT NULL> constraint, that has a unique +constraint, and which uses a sequence as a default value. If more than one column +meets these conditions, the primary key will be used. This involves some +looking up of things in the system table, so DBD::Pg will cache the sequence +name for subsequent calls. If you need to disable this caching for some reason, +(such as the sequence name changing), you can control it by adding C<< pg_cache => 0 >> +to the final (hashref) argument for last_insert_id. + +Please keep in mind that this method is far from foolproof, so make your +script use it properly. Specifically, make sure that it is called +immediately after the insert, and that the insert does not add a value +to the column that is using the sequence as a default value. However, because +we are using sequences, you can be sure that the value you got back has not +been used by any other process. + +Some examples: + + $dbh->do('CREATE SEQUENCE lii_seq START 1'); + $dbh->do(q{CREATE TABLE lii ( + foobar INTEGER NOT NULL UNIQUE DEFAULT nextval('lii_seq'), + baz VARCHAR)}); + $SQL = 'INSERT INTO lii(baz) VALUES (?)'; + $sth = $dbh->prepare($SQL); + for (qw(uno dos tres cuatro)) { + $sth->execute($_); + my $newid = $dbh->last_insert_id(undef,undef,undef,undef,{sequence=>'lii_seq'}); + print "Last insert id was $newid\n"; + } + +If you did not want to worry about the sequence name: + + $dbh->do('CREATE TABLE lii2 ( + foobar SERIAL UNIQUE, + baz VARCHAR)'); + $SQL = 'INSERT INTO lii2(baz) VALUES (?)'; + $sth = $dbh->prepare($SQL); + for (qw(uno dos tres cuatro)) { + $sth->execute($_); + my $newid = $dbh->last_insert_id(undef,undef,"lii2",undef); + print "Last insert id was $newid\n"; + } + +=head3 B<commit> + + $rv = $dbh->commit; + +Issues a COMMIT to the server, indicating that the current transaction is finished and that +all changes made will be visible to other processes. If AutoCommit is enabled, then +a warning is given and no COMMIT is issued. Returns true on success, false on error. +See also the the section on L</Transactions>. + +=head3 B<rollback> + + $rv = $dbh->rollback; + +Issues a ROLLBACK to the server, which discards any changes made in the current transaction. If AutoCommit +is enabled, then a warning is given and no ROLLBACK is issued. Returns true on success, and +false on error. See also the the section on L</Transactions>. + +=head3 B<begin_work> + +This method turns on transactions until the next call to L</commit> or L</rollback>, if L</AutoCommit> is +currently enabled. If it is not enabled, calling begin_work will issue an error. Note that the +transaction will not actually begin until the first statement after begin_work is called. +Example: + + $dbh->{AutoCommit} = 1; + $dbh->do('INSERT INTO foo VALUES (123)'); ## Changes committed immediately + $dbh->begin_work(); + ## Not in a transaction yet, but AutoCommit is set to 0 + + $dbh->do("INSERT INTO foo VALUES (345)"); + ## DBD::PG actually issues two statements here: + ## BEGIN; + ## INSERT INTO foo VALUES (345) + ## We are now in a transaction + + $dbh->commit(); + ## AutoCommit is now set to 1 again + +=head3 B<disconnect> + + $rv = $dbh->disconnect; + +Disconnects from the Postgres database. Any uncommitted changes will be rolled back upon disconnection. It's +good policy to always explicitly call commit or rollback at some point before disconnecting, rather than +relying on the default rollback behavior. + +This method may give warnings about "disconnect invalidates X active statement handle(s)". This means that +you called C<< $sth->execute() >> but did not finish fetching all the rows from them. To avoid seeing this +warning, either fetch all the rows or call C<< $sth->finish() >> for each executed statement handle. + +If the script exits before disconnect is called (or, more precisely, if the database handle is no longer +referenced by anything), then the database handle's DESTROY method will call the rollback() and disconnect() +methods automatically. It is best to explicitly disconnect rather than rely on this behavior. + +=head3 B<quote> + + $rv = $dbh->quote($value, $data_type); + +This module implements its own C<quote> method. For simple string types, both backslashes +and single quotes are doubled. You may also quote arrayrefs and receive a string +suitable for passing into Postgres array columns. + +If the value contains backslashes, and the server is version 8.1 or higher, +then the escaped string syntax will be used (which places a capital E before +the first single quote). This syntax is always used when quoting bytea values +on servers 8.1 and higher. + +The C<data_type> argument is optional and should be one of the type constants +exported by DBD::Pg (such as PG_BYTEA). In addition to string, bytea, char, bool, +and other standard types, the following geometric types are supported: point, line, +lseg, box, path, polygon, and circle (PG_POINT, PG_LINE, PG_LSEG, PG_BOX, +PG_PATH, PG_POLYGON, and PG_CIRCLE respectively). To quote a Postgres-specific +data type, you must use a 'hashref' argument like so: + + my $quotedval = $dbh->quote($value, { pg_type => PG_VARCHAR }); + +B<NOTE:> The undocumented (and invalid) support for the C<SQL_BINARY> data +type is officially deprecated. Use C<PG_BYTEA> with C<bind_param()> instead: + + $rv = $sth->bind_param($param_num, $bind_value, + { pg_type => PG_BYTEA }); + +=head3 B<quote_identifier> + + $string = $dbh->quote_identifier( $name ); + $string = $dbh->quote_identifier( undef, $schema, $table); + +Returns a quoted version of the supplied string, which is commonly a schema, +table, or column name. The three argument form will return the schema and +the table together, separated by a dot. Examples: + + print $dbh->quote_identifier('grapefruit'); ## Prints: "grapefruit" + + print $dbh->quote_identifier('juicy fruit'); ## Prints: "juicy fruit" + + print $dbh->quote_identifier(undef, 'public', 'pg_proc'); + ## Prints: "public"."pg_proc" + +=head3 B<pg_notifies> + + $ret = $dbh->pg_notifies; + +Looks for any asynchronous notifications received and returns either C<undef> +or a reference to a three-element array consisting of an event name, the PID +of the backend that sent the NOTIFY command, and the optional payload string. +Note that this does not check if the connection to the database is still valid first - +for that, use the c<ping> method. You may need to commit if not in autocommit mode - +new notices will not be picked up while in the middle of a transaction. An example: + + $dbh->do("LISTEN abc"); + $dbh->do("LISTEN def"); + + ## Hang around until we get the message we want + LISTENLOOP: { + while (my $notify = $dbh->pg_notifies) { + my ($name, $pid, $payload) = @$notify; + print qq{I received notice "$name" from PID $pid, payload was "$payload"\n}; + ## Do something based on the notice received + } + $dbh->ping() or die qq{Ping failed!}; + $dbh->commit(); + sleep(5); + redo; + } + +Payloads will always be an empty string unless you are connecting to a Postgres +server version 8.5 or higher. + +=head3 B<ping> + + $rv = $dbh->ping; + +This C<ping> method is used to check the validity of a database handle. The value returned is +either 0, indicating that the connection is no longer valid, or a positive integer, indicating +the following: + + Value Meaning + -------------------------------------------------- + 1 Database is idle (not in a transaction) + 2 Database is active, there is a command in progress (usually seen after a COPY command) + 3 Database is idle within a transaction + 4 Database is idle, within a failed transaction + +Additional information on why a handle is not valid can be obtained by using the +L</pg_ping> method. + +=head3 B<pg_ping> + + $rv = $dbh->pg_ping; + +This is a DBD::Pg-specific extension to the L</ping> method. This will check the +validity of a database handle in exactly the same way as C<ping>, but instead of +returning a 0 for an invalid connection, it will return a negative number. So in +addition to returning the positive numbers documented for C<ping>, it may also +return the following: + + Value Meaning + -------------------------------------------------- + -1 There is no connection to the database at all (e.g. after C<disconnect>) + -2 An unknown transaction status was returned (e.g. after forking) + -3 The handle exists, but no data was returned from a test query. + +In practice, you should only ever see -1 and -2. + +=head3 B<get_info> + + $value = $dbh->get_info($info_type); + +Supports a very large set (> 250) of the information types, including the minimum +recommended by DBI. + +=head3 B<table_info> + + $sth = $dbh->table_info(undef, $schema, $table, $type); + +Returns all tables and views visible to the current user. +The schema and table arguments will do a C<LIKE> search if a percent sign (C<%>) or an +underscore (C<_>) is detected in the argument. The C<$type> argument accepts a value of either +"TABLE" or "VIEW" (using both is the default action). Note that a statement handle is returned, +and not a direct list of tables. See the examples below for ways to handle this. + +The following fields are returned: + +B<TABLE_CAT>: Always NULL, as Postgres does not have the concept of catalogs. + +B<TABLE_SCHEM>: The name of the schema that the table or view is in. + +B<TABLE_NAME>: The name of the table or view. + +B<TABLE_TYPE>: The type of object returned. Will be one of "TABLE", "VIEW", +or "SYSTEM TABLE". + +The TABLE_SCHEM and TABLE_NAME will be quoted via C<quote_ident()>. + +Two additional fields specific to DBD::Pg are returned: + +B<pg_schema>: the unquoted name of the schema + +B<pg_table>: the unquoted name of the table + +If your database supports tablespaces (version 8.0 or greater), two additional +DBD::Pg specific fields are returned: + +B<pg_tablespace_name>: the name of the tablespace the table is in + +B<pg_tablespace_location>: the location of the tablespace the table is in + +Tables that have not been assigned to a particular tablespace (or views) +will return NULL (C<undef>) for both of the above field. + +Rows are returned alphabetically, with all tables first, and then all views. + +Examples of use: + + ## Display all tables and views in the public schema: + $sth = $dbh->table_info('', 'public', undef, undef); + for my $rel (@{$sth->fetchall_arrayref({})}) { + print "$rel->{TABLE_TYPE} name is $rel->{TABLE_NAME}\n"; + } + + + # Display the schema of all tables named 'foo': + $sth = $dbh->table_info('', undef, 'foo', 'TABLE'); + for my $rel (@{$sth->fetchall_arrayref({})}) { + print "Table name is $rel->{TABLE_SCHEM}.$rel->{TABLE_NAME}\n"; + } + +=head3 B<column_info> + + $sth = $dbh->column_info( undef, $schema, $table, $column ); + +Supported by this driver as proposed by DBI with the follow exceptions. +These fields are currently always returned with NULL (C<undef>) values: + + TABLE_CAT + BUFFER_LENGTH + DECIMAL_DIGITS + NUM_PREC_RADIX + SQL_DATA_TYPE + SQL_DATETIME_SUB + CHAR_OCTET_LENGTH + +Also, six additional non-standard fields are returned: + +B<pg_type>: data type with additional info i.e. "character varying(20)" + +B<pg_constraint>: holds column constraint definition + +B<pg_schema>: the unquoted name of the schema + +B<pg_table>: the unquoted name of the table + +B<pg_column>: the unquoted name of the column + +B<pg_enum_values>: an array reference of allowed values for an enum column + +Note that the TABLE_SCHEM, TABLE_NAME, and COLUMN_NAME fields all return +output wrapped in quote_ident(). If you need the unquoted version, use +the pg_ fields above. + +=head3 B<primary_key_info> + + $sth = $dbh->primary_key_info( undef, $schema, $table, \%attr ); + +Supported by this driver as proposed by DBI. There are no search patterns allowed, but leaving the +$schema argument blank will cause the first table found in the schema +search path to be used. An additional field, "DATA_TYPE", is returned and +shows the data type for each of the arguments in the "COLUMN_NAME" field. + +This method will also return tablespace information for servers that support +tablespaces. See the L</table_info> entry for more information. + +The five additional custom fields returned are: + +B<pg_tablespace_name>: name of the tablespace, if any + +B<pg_tablespace_location>: location of the tablespace + +B<pg_schema>: the unquoted name of the schema + +B<pg_table>: the unquoted name of the table + +B<pg_column>: the unquoted name of the column + +In addition to the standard format of returning one row for each column +found for the primary key, you can pass the C<pg_onerow> attribute to force +a single row to be used. If the primary key has multiple columns, the +"KEY_SEQ", "COLUMN_NAME", and "DATA_TYPE" fields will return a comma-delimited +string. If the C<pg_onerow> attribute is set to "2", the fields will be +returned as an arrayref, which can be useful when multiple columns are +involved: + + $sth = $dbh->primary_key_info('', '', 'dbd_pg_test', {pg_onerow => 2}); + if (defined $sth) { + my $pk = $sth->fetchall_arrayref()->[0]; + print "Table $pk->[2] has a primary key on these columns:\n"; + for (my $x=0; defined $pk->[3][$x]; $x++) { + print "Column: $pk->[3][$x] (data type: $pk->[6][$x])\n"; + } + } + +=head3 B<primary_key> + + @key_column_names = $dbh->primary_key(undef, $schema, $table); + +Simple interface to the L</primary_key_info> method. Returns a list of the column names that +comprise the primary key of the specified table. The list is in primary key column sequence +order. If there is no primary key then an empty list is returned. + +=head3 B<foreign_key_info> + + $sth = $dbh->foreign_key_info( $pk_catalog, $pk_schema, $pk_table, + $fk_catalog, $fk_schema, $fk_table ); + +Supported by this driver as proposed by DBI, using the SQL/CLI variant. +There are no search patterns allowed, but leaving the C<$schema> argument +blank will cause the first table found in the schema search path to be +used. Two additional fields, "UK_DATA_TYPE" and "FK_DATA_TYPE", are returned +to show the data type for the unique and foreign key columns. Foreign +keys that have no named constraint (where the referenced column only has +an unique index) will return C<undef> for the "UK_NAME" field. + +=head3 B<statistics_info> + + $sth = $dbh->statistics_info( undef, $schema, $table, $unique_only, $quick ); + +Returns a statement handle that can be fetched from to give statistics information +on a specific table and its indexes. The C<$table> argument is mandatory. The +C<$schema> argument is optional but recommended. The C<$unique_only> argument, if true, +causes only information about unique indexes to be returned. The C<$quick> argument is +not used by DBD::Pg. For information on the format of the rows returned, please see the DBI +documentation. + +=for html <a href="http://search.cpan.org/~timb/DBI/DBI.pm#statistics_info">DBI section on statistics_info</a> + +=head3 B<tables> + + @names = $dbh->tables( undef, $schema, $table, $type, \%attr ); + +Supported by this driver as proposed by DBI. This method returns all tables +and/or views which are visible to the current user: see L</table_info> +for more information about the arguments. The name of the schema appears +before the table or view name. This can be turned off by adding in the +C<pg_noprefix> attribute: + + my @tables = $dbh->tables( '', '', 'dbd_pg_test', '', {pg_noprefix => 1} ); + +=head3 B<type_info_all> + + $type_info_all = $dbh->type_info_all; + +Supported by this driver as proposed by DBI. Information is only provided for +SQL datatypes and for frequently used datatypes. The mapping between the +PostgreSQL typename and the SQL92 datatype (if possible) has been done +according to the following table: + + +---------------+------------------------------------+ + | typname | SQL92 | + |---------------+------------------------------------| + | bool | BOOL | + | text | / | + | bpchar | CHAR(n) | + | varchar | VARCHAR(n) | + | int2 | SMALLINT | + | int4 | INT | + | int8 | / | + | money | / | + | float4 | FLOAT(p) p<7=float4, p<16=float8 | + | float8 | REAL | + | abstime | / | + | reltime | / | + | tinterval | / | + | date | / | + | time | / | + | datetime | / | + | timespan | TINTERVAL | + | timestamp | TIMESTAMP | + +---------------+------------------------------------+ + +=head3 B<type_info> + + @type_info = $dbh->type_info($data_type); + +Returns a list of hash references holding information about one or more variants of $data_type. +See the DBI documentation for more details. + +=head3 B<pg_server_trace> + + $dbh->pg_server_trace($filehandle); + +Writes debugging information from the PostgreSQL backend to a file. This is +not related to the DBI L</trace> method and you should not use this method unless +you know what you are doing. If you do enable this, be aware that the file +will grow very large, very quick. To stop logging to the file, use the +L</pg_server_untrace> method. The first argument must be a file handle, not +a filename. Example: + + my $pid = $dbh->{pg_pid}; + my $file = "pgbackend.$pid.debug.log"; + open(my $fh, ">$file") or die qq{Could not open "$file": $!\n}; + $dbh->pg_server_trace($fh); + ## Run code you want to trace here + $dbh->pg_server_untrace; + close($fh); + +=head3 B<pg_server_untrace> + + $dbh->pg_server_untrace; + +Stop server logging to a previously opened file. + +=head3 B<selectrow_array> + + @row_ary = $dbh->selectrow_array($sql); + @row_ary = $dbh->selectrow_array($sql, \%attr); + @row_ary = $dbh->selectrow_array($sql, \%attr, @bind_values); + +Returns an array of row information after preparing and executing the provided SQL string. The rows are returned +by calling L</fetchrow_array>. The string can also be a statement handle generated by a previous prepare. Note that +only the first row of data is returned. If called in a scalar context, only the first column of the first row is +returned. Because this is not portable, it is not recommended that you use this method in that way. + +=head3 B<selectrow_arrayref> + + $ary_ref = $dbh->selectrow_arrayref($statement); + $ary_ref = $dbh->selectrow_arrayref($statement, \%attr); + $ary_ref = $dbh->selectrow_arrayref($statement, \%attr, @bind_values); + +Exactly the same as L</selectrow_array>, except that it returns a reference to an array, by internal use of +the L</fetchrow_arrayref> method. + +=head3 B<selectrow_hashref> + + $hash_ref = $dbh->selectrow_hashref($sql); + $hash_ref = $dbh->selectrow_hashref($sql, \%attr); + $hash_ref = $dbh->selectrow_hashref($sql, \%attr, @bind_values); + +Exactly the same as L</selectrow_array>, except that it returns a reference to an hash, by internal use of +the L</fetchrow_hashref> method. + +=head3 B<clone> + + $other_dbh = $dbh->clone(); + +Creates a copy of the database handle by connecting with the same parameters as the original +handle, then trying to merge the attributes. See the DBI documentation for complete usage. + +=head2 Database Handle Attributes + +=head3 B<AutoCommit> (boolean) + +Supported by DBD::Pg as proposed by DBI. According to the classification of +DBI, PostgreSQL is a database in which a transaction must be explicitly +started. Without starting a transaction, every change to the database becomes +immediately permanent. The default of AutoCommit is on, but this may change +in the future, so it is highly recommended that you explicitly set it when +calling L</connect>. For details see the notes about L</Transactions> +elsewhere in this document. + +=head3 B<pg_bool_tf> (boolean) + +DBD::Pg specific attribute. If true, boolean values will be returned +as the characters 't' and 'f' instead of '1' and '0'. + +=head3 B<ReadOnly> (boolean) + +$dbh->{ReadOnly} = 1; + +Specifies if the current database connection should be in read-only mode or not. +In this mode, changes that change the database are not allowed and will throw +an error. Note: this method will B<not> work if L</AutoCommit> is true. The +read-only effect is accomplished by sending a S<SET TRANSACTION READ ONLY> after +every begin. For more details, please see: + +http://www.postgresql.org/docs/current/interactive/sql-set-transaction.html + +Please not that this method is not foolproof: there are still ways to update the +database. Consider this a safety net to catch applications that should not be +issuing commands such as INSERT, UPDATE, or DELETE. + +This method method requires DBI version 1.55 or better. + +=head3 B<pg_server_prepare> (integer) + +DBD::Pg specific attribute. Indicates if DBD::Pg should attempt to use server-side +prepared statements. The default value, 1, indicates that prepared statements should +be used whenever possible. See the section on the L</prepare> method for more information. + +=head3 B<pg_placeholder_dollaronly> (boolean) + +DBD::Pg specific attribute. Defaults to false. When true, question marks inside of statements +are not treated as L<placeholders|/Placeholders>. Useful for statements that contain unquoted question +marks, such as geometric operators. + +=head3 B<pg_enable_utf8> (boolean) + +DBD::Pg specific attribute. If true, then the C<utf8> flag will be turned on +for returned character data (if the data is valid UTF-8). For details about +the C<utf8> flag, see the C<Encode> module. This attribute is only relevant under +perl 5.8 and later. + +=head3 B<pg_errorlevel> (integer) + +DBD::Pg specific attribute. Sets the amount of information returned by the server's +error messages. Valid entries are 0, 1, and 2. Any other number will be forced to the +default value of 1. + +A value of 0 ("TERSE") will show severity, primary text, and position only +and will usually fit on a single line. A value of 1 ("DEFAULT") will also +show any detail, hint, or context fields. A value of 2 ("VERBOSE") will +show all available information. + +=head3 B<pg_lib_version> (integer, read-only) + +DBD::Pg specific attribute. Indicates which version of PostgreSQL that +DBD::Pg was compiled against. In other words, which libraries were used. +Returns a number with major, minor, and revision together; version 8.1.4 +would be returned as C<80104>. + +=head3 B<pg_server_version> (integer, read-only) + +DBD::Pg specific attribute. Indicates which version of PostgreSQL that +the current database handle is connected to. Returns a number with major, +minor, and revision together; version 8.0.1 would be C<80001>. + +=head3 B<Name> (string, read-only) + +Returns the name of the current database. This is the same as the DSN, without the +"dbi:Pg:" part. Before version 2.0.0, this only returned the bare database name +(e.g. 'foo'). From version 2.0.0 onwards, it returns the more correct +output (e.g. 'dbname=foo') + +=head3 B<Username> (string, read-only) + +Returns the name of the user connected to the database. + +=head3 B<pg_db> (string, read-only) + +DBD::Pg specific attribute. Returns the name of the current database. + +=head3 B<pg_user> (string, read-only) + +DBD::Pg specific attribute. Returns the name of the user that +connected to the server. + +=head3 B<pg_host> (string, read-only) + +DBD::Pg specific attribute. Returns the host of the current +server connection. Locally connected hosts will return an empty +string. + +=head3 B<pg_port> (integer, read-only) + +DBD::Pg specific attribute. Returns the port of the connection to +the server. + +=head3 B<pg_socket> (integer, read-only) + +DBD::Pg specific attribute. Returns the file description number of +the connection socket to the server. + +=head3 B<pg_pass> (string, read-only) + +DBD::Pg specific attribute. Returns the password used to connect +to the server. + +=head3 B<pg_options> (string, read-only) + +DBD::Pg specific attribute. Returns the command-line options passed +to the server. May be an empty string. + +=head3 B<pg_default_port> (integer, read-only) + +DBD::Pg specific attribute. Returns the default port used if none is +specifically given. + +=head3 B<pg_pid> (integer, read-only) + +DBD::Pg specific attribute. Returns the process id (PID) of the +backend server process handling the connection. + +=head3 B<pg_prepare_now> (boolean) + +DBD::Pg specific attribute. Default is off. If true, then the L</prepare> method will +immediately prepare commands, rather than waiting until the first execute. + +=head3 B<pg_expand_array> (boolean) + +DBD::Pg specific attribute. Defaults to true. If false, arrays returned from the server will +not be changed into a Perl arrayref, but remain as a string. + +=head3 B<pg_async_status> (integer, read-only) + +DBD::Pg specific attribute. Returns the current status of an L<asynchronous|/Asynchronous Queries> +command. 0 indicates no asynchronous command is in progress, 1 indicates that +an asynchronous command has started and -1 indicated that an asynchronous command +has been cancelled. + +=head3 B<pg_standard_conforming_strings> (boolean, read-only) + +DBD::Pg specific attribute. Returns true if the server is currently using +standard conforming strings. Only available if the target +server is version 8.2 or better. + +=head3 B<pg_INV_READ> (integer, read-only) + +Constant to be used for the mode in L</lo_creat> and L</lo_open>. + +=head3 B<pg_INV_WRITE> (integer, read-only) + +Constant to be used for the mode in L</lo_creat> and L</lo_open>. + +=head3 B<Driver> (handle, read-only) + +Holds the handle of the parent driver. The only recommended use for this is to find the name +of the driver using: + + $dbh->{Driver}->{Name} + +=head3 B<pg_protocol> (integer, read-only) + +DBD::Pg specific attribute. Returns the version of the PostgreSQL server. +If DBD::Pg is unable to figure out the version, it will return a "0". Otherwise, +a "3" is returned. + +=head3 B<RowCacheSize> + +Not used by DBD::Pg + +=head1 DBI STATEMENT HANDLE OBJECTS + +=head2 Statement Handle Methods + +=head3 B<bind_param> + + $rv = $sth->bind_param($param_num, $bind_value); + $rv = $sth->bind_param($param_num, $bind_value, $bind_type); + $rv = $sth->bind_param($param_num, $bind_value, \%attr); + +Allows the user to bind a value and/or a data type to a placeholder. This is +especially important when using server-side prepares. See the +L</prepare> method for more information. + +The value of C<$param_num> is a number if using the '?' or '$1' style +placeholders. If using ":foo" style placeholders, the complete name +(e.g. ":foo") must be given. For numeric values, you can either use a +number or use a literal '$1'. See the examples below. + +The C<$bind_value> argument is fairly self-explanatory. A value of C<undef> will +bind a C<NULL> to the placeholder. Using C<undef> is useful when you want +to change just the type and will be overwriting the value later. +(Any value is actually usable, but C<undef> is easy and efficient). + +The C<\%attr> hash is used to indicate the data type of the placeholder. +The default value is "varchar". If you need something else, you must +use one of the values provided by DBI or by DBD::Pg. To use a SQL value, +modify your "use DBI" statement at the top of your script as follows: + + use DBI qw(:sql_types); + +This will import some constants into your script. You can plug those +directly into the L</bind_param> call. Some common ones that you will +encounter are: + + SQL_INTEGER + +To use PostgreSQL data types, import the list of values like this: + + use DBD::Pg qw(:pg_types); + +You can then set the data types by setting the value of the C<pg_type> +key in the hash passed to L</bind_param>. +The current list of Postgres data types exported is: + + PG_ABSTIME PG_ABSTIMEARRAY PG_ACLITEM PG_ACLITEMARRAY PG_ANY PG_ANYARRAY + PG_ANYELEMENT PG_ANYENUM PG_ANYNONARRAY PG_BIT PG_BITARRAY PG_BOOL + PG_BOOLARRAY PG_BOX PG_BOXARRAY PG_BPCHAR PG_BPCHARARRAY PG_BYTEA + PG_BYTEAARRAY PG_CHAR PG_CHARARRAY PG_CID PG_CIDARRAY PG_CIDR + PG_CIDRARRAY PG_CIRCLE PG_CIRCLEARRAY PG_CSTRING PG_CSTRINGARRAY PG_DATE + PG_DATEARRAY PG_FLOAT4 PG_FLOAT4ARRAY PG_FLOAT8 PG_FLOAT8ARRAY PG_GTSVECTOR + PG_GTSVECTORARRAY PG_INET PG_INETARRAY PG_INT2 PG_INT2ARRAY PG_INT2VECTOR + PG_INT2VECTORARRAY PG_INT4 PG_INT4ARRAY PG_INT8 PG_INT8ARRAY PG_INTERNAL + PG_INTERVAL PG_INTERVALARRAY PG_LANGUAGE_HANDLER PG_LINE PG_LINEARRAY PG_LSEG + PG_LSEGARRAY PG_MACADDR PG_MACADDRARRAY PG_MONEY PG_MONEYARRAY PG_NAME + PG_NAMEARRAY PG_NUMERIC PG_NUMERICARRAY PG_OID PG_OIDARRAY PG_OIDVECTOR + PG_OIDVECTORARRAY PG_OPAQUE PG_PATH PG_PATHARRAY PG_PG_ATTRIBUTE PG_PG_CLASS + PG_PG_PROC PG_PG_TYPE PG_POINT PG_POINTARRAY PG_POLYGON PG_POLYGONARRAY + PG_RECORD PG_RECORDARRAY PG_REFCURSOR PG_REFCURSORARRAY PG_REGCLASS PG_REGCLASSARRAY + PG_REGCONFIG PG_REGCONFIGARRAY PG_REGDICTIONARY PG_REGDICTIONARYARRAY PG_REGOPER PG_REGOPERARRAY + PG_REGOPERATOR PG_REGOPERATORARRAY PG_REGPROC PG_REGPROCARRAY PG_REGPROCEDURE PG_REGPROCEDUREARRAY + PG_REGTYPE PG_REGTYPEARRAY PG_RELTIME PG_RELTIMEARRAY PG_SMGR PG_TEXT + PG_TEXTARRAY PG_TID PG_TIDARRAY PG_TIME PG_TIMEARRAY PG_TIMESTAMP + PG_TIMESTAMPARRAY PG_TIMESTAMPTZ PG_TIMESTAMPTZARRAY PG_TIMETZ PG_TIMETZARRAY PG_TINTERVAL + PG_TINTERVALARRAY PG_TRIGGER PG_TSQUERY PG_TSQUERYARRAY PG_TSVECTOR PG_TSVECTORARRAY + PG_TXID_SNAPSHOT PG_TXID_SNAPSHOTARRAY PG_UNKNOWN PG_UUID PG_UUIDARRAY PG_VARBIT + PG_VARBITARRAY PG_VARCHAR PG_VARCHARARRAY PG_VOID PG_XID PG_XIDARRAY + PG_XML PG_XMLARRAY + +Data types are "sticky," in that once a data type is set to a certain placeholder, +it will remain for that placeholder, unless it is explicitly set to something +else afterwards. If the statement has already been prepared, and you switch the +data type to something else, DBD::Pg will re-prepare the statement for you before +doing the next execute. + +Examples: + + use DBI qw(:sql_types); + use DBD::Pg qw(:pg_types); + + $SQL = "SELECT id FROM ptable WHERE size > ? AND title = ?"; + $sth = $dbh->prepare($SQL); + + ## Both arguments below are bound to placeholders as "varchar" + $sth->execute(123, "Merk"); + + ## Reset the datatype for the first placeholder to an integer + $sth->bind_param(1, undef, SQL_INTEGER); + + ## The "undef" bound above is not used, since we supply params to execute + $sth->execute(123, "Merk"); + + ## Set the first placeholder's value and data type + $sth->bind_param(1, 234, { pg_type => PG_TIMESTAMP }); + + ## Set the second placeholder's value and data type. + ## We don't send a third argument, so the default "varchar" is used + $sth->bind_param('$2', "Zool"); + + ## We realize that the wrong data type was set above, so we change it: + $sth->bind_param('$1', 234, { pg_type => SQL_INTEGER }); + + ## We also got the wrong value, so we change that as well. + ## Because the data type is sticky, we don't need to change it + $sth->bind_param(1, 567); + + ## This executes the statement with 567 (integer) and "Zool" (varchar) + $sth->execute(); + +=head3 B<bind_param_inout> + + $rv = $sth->bind_param_inout($param_num, \$scalar, 0); + + +Experimental support for this feature is provided. The first argument to +bind_param_inout should be a placeholder number. The second argument +should be a reference to a scalar variable in your script. The third argument +is not used and should simply be set to 0. Note that what this really does is +assign a returned column to the variable, in the order in which the column +appears. For example: + + my $foo = 123; + $sth = $dbh->prepare("SELECT 1+?::int"); + $sth->bind_param_inout(1, \$foo, 0); + $foo = 222; + $sth->execute(444); + $sth->fetch; + +The above will cause $foo to have a new value of "223" after the final fetch. +Note that the variables bound in this manner are very sticky, and will trump any +values passed in to execute. This is because the binding is done as late as possible, +at the execute() stage, allowing the value to be changed between the time it was bound +and the time the query is executed. Thus, the above execute is the same as: + + $sth->execute(); + +=head3 B<bind_param_array> + + + $rv = $sth->bind_param_array($param_num, $array_ref_or_value) + $rv = $sth->bind_param_array($param_num, $array_ref_or_value, $bind_type) + $rv = $sth->bind_param_array($param_num, $array_ref_or_value, \%attr) + +Binds an array of values to a placeholder, so that each is used in turn by a call +to the L</execute_array> method. + +=head3 B<execute> + + $rv = $sth->execute(@bind_values); + +Executes a previously prepared statement. In addition to C<UPDATE>, C<DELETE>, +C<INSERT> statements, for which it returns always the number of affected rows, +the C<execute> method can also be used for C<SELECT ... INTO table> statements. + +The "prepare/bind/execute" process has changed significantly for PostgreSQL +servers 7.4 and later: please see the C<prepare()> and C<bind_param()> entries for +much more information. + +Setting one of the bind_values to "undef" is the equivalent of setting the value +to NULL in the database. Setting the bind_value to $DBDPG_DEFAULT is equivalent +to sending the literal string 'DEFAULT' to the backend. Note that using this +option will force server-side prepares off until such time as PostgreSQL +supports using DEFAULT in prepared statements. + +DBD::Pg also supports passing in arrays to execute: simply pass in an arrayref, +and DBD::Pg will flatten it into a string suitable for input on the backend. + +If you are using Postgres version 8.2 or greater, you can also use any of the +fetch methods to retrieve the values of a C<RETURNING> clause after you execute +an C<UPDATE>, C<DELETE>, or C<INSERT>. For example: + + $dbh->do(q{CREATE TABLE abc (id SERIAL, country TEXT)}); + $SQL = q{INSERT INTO abc (country) VALUES (?) RETURNING id}; + $sth = $dbh->prepare($SQL); + $sth->execute('France'); + $countryid = $sth->fetch()->[0]; + $sth->execute('New Zealand'); + $countryid = $sth->fetch()->[0]; + +=head3 B<execute_array> + + $tuples = $sth->execute_array() or die $sth->errstr; + $tuples = $sth->execute_array(\%attr) or die $sth->errstr; + $tuples = $sth->execute_array(\%attr, @bind_values) or die $sth->errstr; + + ($tuples, $rows) = $sth->execute_array(\%attr) or die $sth->errstr; + ($tuples, $rows) = $sth->execute_array(\%attr, @bind_values) or die $sth->errstr; + +Execute a prepared statement once for each item in a passed-in hashref, or items that +were previously bound via the L</bind_param_array> method. See the DBI documentation +for more details. + +=head3 B<execute_for_fetch> + + $tuples = $sth->execute_for_fetch($fetch_tuple_sub); + $tuples = $sth->execute_for_fetch($fetch_tuple_sub, \@tuple_status); + + ($tuples, $rows) = $sth->execute_for_fetch($fetch_tuple_sub); + ($tuples, $rows) = $sth->execute_for_fetch($fetch_tuple_sub, \@tuple_status); + +Used internally by the L</execute_array> method, and rarely used directly. See the +DBI documentation for more details. + +=head3 B<fetchrow_arrayref> + + $ary_ref = $sth->fetchrow_arrayref; + +Fetches the next row of data from the statement handle, and returns a reference to an array +holding the column values. Any columns that are NULL are returned as undef within the array. + +If there are no more rows or if an error occurs, the this method return undef. You should +check C<< $sth->err >> afterwards (or use the L</RaiseError> attribute) to discover if the undef returned +was due to an error. + +Note that the same array reference is returned for each fetch, so don't store the reference and +then use it after a later fetch. Also, the elements of the array are also reused for each row, +so take care if you want to take a reference to an element. See also L</bind_columns>. + +=head3 B<fetchrow_array> + + @ary = $sth->fetchrow_array; + +Similar to the L</fetchrow_arrayref> method, but returns a list of column information rather than +a reference to a list. Do not use this in a scalar context. + +=head3 B<fetchrow_hashref> + + $hash_ref = $sth->fetchrow_hashref; + $hash_ref = $sth->fetchrow_hashref($name); + +Fetches the next row of data and returns a hashref containing the name of the columns as the keys +and the data itself as the values. Any NULL value is returned as as undef value. + +If there are no more rows or if an error occurs, the this method return undef. You should +check C<< $sth->err >> afterwards (or use the L</RaiseError> attribute) to discover if the undef returned +was due to an error. + +The optional C<$name> argument should be either C<NAME>, C<NAME_lc> or C<NAME_uc>, and indicates +what sort of transformation to make to the keys in the hash. + +=head3 B<fetchall_arrayref> + + $tbl_ary_ref = $sth->fetchall_arrayref(); + $tbl_ary_ref = $sth->fetchall_arrayref( $slice ); + $tbl_ary_ref = $sth->fetchall_arrayref( $slice, $max_rows ); + +Returns a reference to an array of arrays that contains all the remaining rows to be fetched from the +statement handle. If there are no more rows, an empty arrayref will be returned. If an error occurs, +the data read in so far will be returned. Because of this, you should always check C<< $sth->err >> after +calling this method, unless L</RaiseError> has been enabled. + +If C<$slice> is an array reference, fetchall_arrayref uses the L</fetchrow_arrayref> method to fetch each +row as an array ref. If the C<$slice> array is not empty then it is used as a slice to select individual +columns by perl array index number (starting at 0, unlike column and parameter numbers which start at 1). + +With no parameters, or if $slice is undefined, fetchall_arrayref acts as if passed an empty array ref. + +If C<$slice> is a hash reference, fetchall_arrayref uses L</fetchrow_hashref> to fetch each row as a hash reference. + +See the DBI documentation for a complete discussion. + +=head3 B<fetchall_hashref> + + $hash_ref = $sth->fetchall_hashref( $key_field ); + +Returns a hashref containing all rows to be fetched from the statement handle. See the DBI documentation for +a full discussion. + +=head3 B<finish> + + $rv = $sth->finish; + +Indicates to DBI that you are finished with the statement handle and are not going to use it again. Only needed +when you have not fetched all the possible rows. + +=head3 B<rows> + + $rv = $sth->rows; + +Returns the number of rows returned by the last query. In contrast to many other DBD modules, +the number of rows is available immediately after calling C<< $sth->execute >>. Note that +the L</execute> method itself returns the number of rows itself, which means that this +method is rarely needed. + +=head3 B<bind_col> + + $rv = $sth->bind_col($column_number, \$var_to_bind); + $rv = $sth->bind_col($column_number, \$var_to_bind, \%attr ); + $rv = $sth->bind_col($column_number, \$var_to_bind, $bind_type ); + +Binds a Perl variable and/or some attributes to an output column of a SELECT statement. +Column numbers count up from 1. You do not need to bind output columns in order to fetch data. + +See the DBI documentation for a discussion of the optional parameters C<\%attr> and C<$bind_type> + +=head3 B<bind_columns> + + $rv = $sth->bind_columns(@list_of_refs_to_vars_to_bind); + +Calls the L</bind_col> method for each column in the SELECT statement, using the supplied list. + +=head3 B<dump_results> + + $rows = $sth->dump_results($maxlen, $lsep, $fsep, $fh); + +Fetches all the rows from the statement handle, calls C<DBI::neat_list> for each row, and +prints the results to C<$fh> (which defaults to F<STDOUT>). Rows are separated by C<$lsep> (which defaults +to a newline). Columns are separated by C<$fsep> (which defaults to a comma). The C<$maxlen> controls +how wide the output can be, and defaults to 35. + +This method is designed as a handy utility for prototyping and testing queries. Since it uses +"neat_list" to format and edit the string for reading by humans, it is not recommended +for data transfer applications. + +=head3 B<blob_read> + + $blob = $sth->blob_read($id, $offset, $len); + +Supported by DBD::Pg. This method is implemented by DBI but not +currently documented by DBI, so this method might change. + +This method seems to be heavily influenced by the current implementation of +blobs in Oracle. Nevertheless we try to be as compatible as possible. Whereas +Oracle suffers from the limitation that blobs are related to tables and every +table can have only one blob (datatype LONG), PostgreSQL handles its blobs +independent of any table by using so-called object identifiers. This explains +why the C<blob_read> method is blessed into the STATEMENT package and not part of +the DATABASE package. Here the field parameter has been used to handle this +object identifier. The offset and len parameters may be set to zero, in which +case the whole blob is fetched at once. + +See also the PostgreSQL-specific functions concerning blobs, which are +available via the C<func> interface. + +For further information and examples about blobs, please read the chapter +about Large Objects in the PostgreSQL Programmer's Guide at +L<http://www.postgresql.org/docs/current/static/largeobjects.html>. + +=head2 Statement Handle Attributes + +=head3 B<NUM_OF_FIELDS> (integer, read-only) + +Returns the number of columns returned by the current statement. A number will only be returned for +SELECT statements, for SHOW statements (which always return C<1>), and for INSERT, +UPDATE, and DELETE statements which contain a RETURNING clause. +This method returns undef if called before C<execute()>. + +=head3 B<NUM_OF_PARAMS> (integer, read-only) + +Returns the number of placeholders in the current statement. + +=head3 B<NAME> (arrayref, read-only) + +Returns an arrayref of column names for the current statement. This +method will only work for SELECT statements, for SHOW statements, and for +INSERT, UPDATE, and DELETE statements which contain a RETURNING clause. +This method returns undef if called before C<execute()>. + +=head3 B<NAME_lc> (arrayref, read-only) + +The same as the C<NAME> attribute, except that all column names are forced to lower case. + +=head3 B<NAME_uc> (arrayref, read-only) + +The same as the C<NAME> attribute, except that all column names are forced to upper case. + +=head3 B<NAME_hash> (hashref, read-only) + +Similar to the C<NAME> attribute, but returns a hashref of column names instead of an arrayref. The names of the columns +are the keys of the hash, and the values represent the order in which the columns are returned, starting at 0. +This method returns undef if called before C<execute()>. + +=head3 B<NAME_lc_hash> (hashref, read-only) + +The same as the C<NAME_hash> attribute, except that all column names are forced to lower case. + +=head3 B<NAME_uc_hash> (hashref, read-only) + +The same as the C<NAME_hash> attribute, except that all column names are forced to lower case. + +=head3 B<TYPE> (arrayref, read-only) + +Returns an arrayref indicating the data type for each column in the statement. +This method returns undef if called before C<execute()>. + +=head3 B<PRECISION> (arrayref, read-only) + +Returns an arrayref of integer values for each column returned by the statement. +The number indicates the precision for C<NUMERIC> columns, the size in number of +characters for C<CHAR> and C<VARCHAR> columns, and for all other types of columns +it returns the number of I<bytes>. +This method returns undef if called before C<execute()>. + +=head3 B<SCALE> (arrayref, read-only) + +Returns an arrayref of integer values for each column returned by the statement. The number +indicates the scale of the that column. The only type that will return a value is C<NUMERIC>. +This method returns undef if called before C<execute()>. + +=head3 B<NULLABLE> (arrayref, read-only) + +Returns an arrayref of integer values for each column returned by the statement. The number +indicates if the column is nullable or not. 0 = not nullable, 1 = nullable, 2 = unknown. +This method returns undef if called before C<execute()>. + +=head3 B<Database> (dbh, read-only) + +Returns the database handle this statement handle was created from. + +=head3 B<ParamValues> (hash ref, read-only) + +Returns a reference to a hash containing the values currently bound to placeholders. If the "named parameters" +type of placeholders are being used (such as ":foo"), then the keys of the hash will be the names of the +placeholders (without the colon). If the "dollar sign numbers" type of placeholders are being used, the keys of the hash will +be the numbers, without the dollar signs. If the "question mark" type is used, integer numbers will be returned, +starting at one and increasing for every placeholder. + +If this method is called before L</execute>, the literal values passed in are returned. If called after +L</execute>, then the quoted versions of the values are returned. + +=head3 B<ParamTypes> (hash ref, read-only) + +Returns a reference to a hash containing the type names currently bound to placeholders. The keys +are the same as returned by the ParamValues method. The values are hashrefs containing a single key value +pair, in which the key is either 'TYPE' if the type has a generic SQL equivalent, and 'pg_type' if the type can +only be expressed by a Postgres type. The value is the internal number corresponding to the type originally +passed in. (Placeholders that have not yet been bound will return undef as the value). This allows the output of +ParamTypes to be passed back to the L</bind_param> method. + +=head3 B<Statement> (string, read-only) + +Returns the statement string passed to the most recent "prepare" method called in this database handle, even if that method +failed. This is especially useful where "RaiseError" is enabled and the exception handler checks $@ and sees that a C<prepare> +method call failed. + +=head3 B<pg_current_row> (integer, read-only) + +DBD::Pg specific attribute. Returns the number of the tuple (row) that was +most recently fetched. Returns zero before and after fetching is performed. + +=head3 B<pg_numbound> (integer, read-only) + +DBD::Pg specific attribute. Returns the number of placeholders +that are currently bound (via bind_param). + +=head3 B<pg_bound> (hashref, read-only) + +DBD::Pg specific attribute. Returns a hash of all named placeholders. The +key is the name of the placeholder, and the value is a 0 or a 1, indicating if +the placeholder has been bound yet (e.g. via bind_param) + +=head3 B<pg_size> (arrayref, read-only) + +DBD::Pg specific attribute. It returns a reference to an array of integer +values for each column. The integer shows the size of the column in +bytes. Variable length columns are indicated by -1. + +=head3 B<pg_type> (arrayref, read-only) + +DBD::Pg specific attribute. It returns a reference to an array of strings +for each column. The string shows the name of the data_type. + +=head3 B<pg_segments> (arrayref, read-only) + +DBD::Pg specific attribute. Returns an arrayref of the query split on the +placeholders. + +=head3 B<pg_oid_status> (integer, read-only) + +DBD::Pg specific attribute. It returns the OID of the last INSERT command. + +=head3 B<pg_cmd_status> (integer, read-only) + +DBD::Pg specific attribute. It returns the type of the last +command. Possible types are: "INSERT", "DELETE", "UPDATE", "SELECT". + +=head3 B<pg_direct> (boolean) + +DBD::Pg specific attribute. Default is false. If true, the query is passed +directly to the backend without parsing for placeholders. + +=head3 B<pg_prepare_now> (boolean) + +DBD::Pg specific attribute. Default is off. If true, the query will be immediately +prepared, rather than waiting for the L</execute> call. + +=head3 B<pg_prepare_name> (string) + +DBD::Pg specific attribute. Specifies the name of the prepared statement to use for this +statement handle. Not normally needed, see the section on the L</prepare> method for +more information. + +=head3 B<pg_server_prepare> (integer) + +DBD::Pg specific attribute. Indicates if DBD::Pg should attempt to use server-side +prepared statements for this statement handle. The default value, 1, indicates that prepared +statements should be used whenever possible. See the section on the L</prepare> method for +more information. + +=head3 B<pg_placeholder_dollaronly> (boolean) + +DBD::Pg specific attribute. Defaults to off. When true, question marks inside of the query +being prepared are not treated as placeholders. Useful for statements that contain unquoted question +marks, such as geometric operators. + +=head3 B<pg_async> (integer) + +DBD::Pg specific attribute. Indicates the current behavior for asynchronous queries. See the section +on L</Asynchronous Constants> for more information. + +=head3 B<RowsInCache> + +Not used by DBD::Pg + +=head3 B<RowCache> + +Not used by DBD::Pg + +=head3 B<CursorName> + +Not used by DBD::Pg. See the note about L</Cursors> elsewhere in this document. + +=head1 FURTHER INFORMATION + +=head2 Transactions + +Transaction behavior is controlled via the L</AutoCommit> attribute. For a +complete definition of C<AutoCommit> please refer to the DBI documentation. + +According to the DBI specification the default for C<AutoCommit> is a true +value. In this mode, any change to the database becomes valid immediately. Any +C<BEGIN>, C<COMMIT> or C<ROLLBACK> statements will be rejected. DBD::Pg +implements C<AutoCommit> by issuing a C<BEGIN> statement immediately before +executing a statement, and a C<COMMIT> afterwards. Note that preparing a +statement is not always enough to trigger the first C<BEGIN>, as the actual +C<PREPARE> is usually postponed until the first call to L</execute>. + +=head2 Savepoints + +PostgreSQL version 8.0 introduced the concept of savepoints, which allows +transactions to be rolled back to a certain point without affecting the +rest of the transaction. DBD::Pg encourages using the following methods to +control savepoints: + +=head3 C<pg_savepoint> + +Creates a savepoint. This will fail unless you are inside of a transaction. The +only argument is the name of the savepoint. Note that PostgreSQL DOES allow +multiple savepoints with the same name to exist. + + $dbh->pg_savepoint("mysavepoint"); + +=head3 C<pg_rollback_to> + +Rolls the database back to a named savepoint, discarding any work performed after +that point. If more than one savepoint with that name exists, rolls back to the +most recently created one. + + $dbh->pg_rollback_to("mysavepoint"); + +=head3 C<pg_release> + +Releases (or removes) a named savepoint. If more than one savepoint with that name +exists, it will only destroy the most recently created one. Note that all savepoints +created after the one being released are also destroyed. + + $dbh->pg_release("mysavepoint"); + +=head2 Asynchronous Queries + +It is possible to send a query to the backend and have your script do other work while the query is +running on the backend. Both queries sent by the L</do> method, and by the L</execute> method can be +sent asynchronously. (NOTE: This will only work if DBD::Pg has been compiled against Postgres libraries +of version 8.0 or greater) The basic usage is as follows: + + use DBD::Pg ':async'; + + print "Async do() example:\n"; + $dbh->do("SELECT long_running_query()", {pg_async => PG_ASYNC}); + do_something_else(); + { + if ($dbh->pg_ready()) { + $res = $pg_result(); + print "Result of do(): $res\n"; + } + print "Query is still running...\n"; + if (cancel_request_received) { + $dbh->pg_cancel(); + } + sleep 1; + redo; + } + + print "Async prepare/execute example:\n"; + $sth = $dbh->prepare("SELECT long_running_query(1)", {pg_async => PG_ASYNC}); + $sth->execute(); + + ## Changed our mind, cancel and run again: + $sth = $dbh->prepare("SELECT 678", {pg_async => PG_ASYNC + PG_OLDQUERY_CANCEL}); + $sth->execute(); + + do_something_else(); + + if (!$sth->pg_ready) { + do_another_thing(); + } + + ## We wait until it is done, and get the result: + $res = $dbh->pg_result(); + +=head3 Asynchronous Constants + +There are currently three asynchronous constants exported by DBD::Pg. You can import all of them by putting +either of these at the top of your script: + + use DBD::Pg; + + use DBD::Pg ':async'; + +You may also use the numbers instead of the constants, but using the constants is recommended as it +makes your script more readable. + +=over 4 + +=item PG_ASYNC + +This is a constant for the number 1. It is passed to either the L</do> or the L</prepare> method as a value +to the pg_async key and indicates that the query should be sent asynchronously. + +=item PG_OLDQUERY_CANCEL + +This is a constant for the number 2. When passed to either the L</do> or the L</prepare> method, it causes any +currently running asynchronous query to be cancelled and rolled back. It has no effect if no asynchronous +query is currently running. + +=item PG_OLDQUERY_WAIT + +This is a constant for the number 4. When passed to either the L</do> or the L</prepare> method, it waits for any +currently running asynchronous query to complete. It has no effect if there is no asynchronous query currently running. + +=back + +=head3 Asynchronous Methods + +=over 4 + +=item B<pg_cancel> + +This database-level method attempts to cancel any currently running asynchronous query. It returns true if +the cancel succeeded, and false otherwise. Note that a query that has finished before this method is executed +will also return false. B<WARNING>: a successful cancellation will leave the database in an unusable state, +so DBD::Pg will automatically clear out the error message and issue a ROLLBACK. + + $result = $dbh->pg_cancel(); + +=item B<pg_ready> + +This method can be called as a database handle method or (for convenience) as a statement handle method. Both simply +see if a previously issued asynchronous query has completed yet. It returns true if the statement has finished, in which +case you should then call the L</pg_result> method. Calls to C<pg_ready()> should only be used when you have other +things to do while the query is running. If you simply want to wait until the query is done, do not call pg_ready() +over and over, but simply call the pg_result() method. + + my $time = 0; + while (!$dbh->pg_ready) { + print "Query is still running. Seconds: $time\n"; + $time++; + sleep 1; + } + $result = $dbh->pg_result; + +=item B<pg_result> + +This database handle method returns the results of a previously issued asynchronous query. If the query is still +running, this method will wait until it has finished. The result returned is the number of rows: the same thing +that would have been returned by the asynchronous L</do> or L</execute> if it had been called without an asynchronous flag. + + $result = $dbh->pg_result; + +=back + +=head3 Asynchronous Examples + +Here are some working examples of asynchronous queries. Note that we'll use the B<pg_sleep> function to emulate a +long-running query. + + use strict; + use warnings; + use Time::HiRes 'sleep'; + use DBD::Pg ':async'; + + my $dbh = DBI->connect('dbi:Pg:dbname=postgres', 'postgres', '', {AutoCommit=>0,RaiseError=>1}); + + ## Kick off a long running query on the first database: + my $sth = $dbh->prepare("SELECT pg_sleep(?)", {pg_async => PG_ASYNC}); + $sth->execute(5); + + ## While that is running, do some other things + print "Your query is processing. Thanks for waiting\n"; + check_on_the_kids(); ## Expensive sub, takes at least three seconds. + + while (!$dbh->pg_ready) { + check_on_the_kids(); + ## If the above function returns quickly for some reason, we add a small sleep + sleep 0.1; + } + + print "The query has finished. Gathering results\n"; + my $result = $sth->pg_result; + print "Result: $result\n"; + my $info = $sth->fetchall_arrayref(); + +Without asynchronous queries, the above script would take about 8 seconds to run: five seconds waiting +for the execute to finish, then three for the check_on_the_kids() function to return. With asynchronous +queries, the script takes about 6 seconds to run, and gets in two iterations of check_on_the_kids in +the process. + +Here's an example showing the ability to cancel a long-running query. Imagine two slave databases in +different geographic locations over a slow network. You need information as quickly as possible, so +you query both at once. When you get an answer, you tell the other one to stop working on your query, +as you don't need it anymore. + + use strict; + use warnings; + use Time::HiRes 'sleep'; + use DBD::Pg ':async'; + + my $dbhslave1 = DBI->connect('dbi:Pg:dbname=postgres;host=slave1', 'postgres', '', {AutoCommit=>0,RaiseError=>1}); + my $dbhslave2 = DBI->connect('dbi:Pg:dbname=postgres;host=slave2', 'postgres', '', {AutoCommit=>0,RaiseError=>1}); + + $SQL = "SELECT count(*) FROM largetable WHERE flavor='blueberry'"; + + my $sth1 = $dbhslave1->prepare($SQL, {pg_async => PG_ASYNC}); + my $sth2 = $dbhslave2->prepare($SQL, {pg_async => PG_ASYNC}); + + $sth1->execute(); + $sth2->execute(); + + my $winner; + while (!defined $winner) { + if ($sth1->pg_ready) { + $winner = 1; + } + elsif ($sth2->pg_ready) { + $winner = 2; + } + Time::HiRes::sleep 0.05; + } + + my $count; + if ($winner == 1) { + $sth2->pg_cancel(); + $sth1->pg_result(); + $count = $sth1->fetchall_arrayref()->[0][0]; + } + else { + $sth1->pg_cancel(); + $sth2->pg_result(); + $count = $sth2->fetchall_arrayref()->[0][0]; + } + +=head2 Array support + +DBD::Pg allows arrays (as arrayrefs) to be passed in to both +the L</quote> and the L</execute> methods. In both cases, the array is +flattened into a string representing a Postgres array. + +When fetching rows from a table that contains a column with an +array type, the result will be passed back to your script as an arrayref. + +To turn off the automatic parsing of returned arrays into arrayrefs, +you can set the attribute L<pg_expand_array|/pg_expand_array_(boolean)>, which is true by default. + + $dbh->{pg_expand_array} = 0; + + +=head2 COPY support + +DBD::Pg allows for quick (bulk) reading and storing of data by using +the B<COPY> command. The basic process is to use C<< $dbh->do >> to issue a +COPY command, and then to either add rows using L</pg_putcopydata>, or to +read them by using L</pg_getcopydata>. + +The first step is to put the server into "COPY" mode. This is done by +sending a complete COPY command to the server, by using the L</do> method. +For example: + + $dbh->do("COPY foobar FROM STDIN"); + +This would tell the server to enter a COPY IN mode (yes, that's confusing, but +the I<mode> is COPY IN because of the I<command> COPY FROM). It is now ready to +receive information via the L</pg_putcopydata> method. The complete syntax of the +COPY command is more complex and not documented here: the canonical +PostgreSQL documentation for COPY can be found at: + +http://www.postgresql.org/docs/current/static/sql-copy.html + +Once a COPY command has been issued, no other SQL commands are allowed +until L</pg_putcopyend> has been issued (for COPY FROM), or the final +L</pg_getcopydata> has been called (for COPY TO). + +Note: All other COPY methods (pg_putline, pg_getline, etc.) are now +heavily deprecated in favor of the pg_getcopydata, pg_putcopydata, and +pg_putcopyend methods. + +=head3 B<pg_getcopydata> + +Used to retrieve data from a table after the server has been put into a +COPY OUT mode by calling "COPY tablename TO STDOUT". Data is always returned +one data row at a time. The first argument to pg_getcopydata +is the variable into which the data will be stored (this variable should not +be undefined, or it may throw a warning, although it may be a reference). The +pg_gecopydata method returns a number greater than 1 indicating the new size of +the variable, or a -1 when the COPY has finished. Once a -1 has been returned, no +other action is necessary, as COPY mode will have already terminated. Example: + + $dbh->do("COPY mytable TO STDOUT"); + my @data; + my $x=0; + 1 while $dbh->pg_getcopydata($data[$x++]) >= 0; + +There is also a variation of this method called B<pg_getcopydata_async>, which, +as the name suggests, returns immediately. The only difference from the original +method is that this version may return a 0, indicating that the row is not +ready to be delivered yet. When this happens, the variable has not been changed, +and you will need to call the method again until you get a non-zero result. +(Data is still always returned one data row at a time.) + +=head3 B<pg_putcopydata> + +Used to put data into a table after the server has been put into COPY IN mode +by calling "COPY tablename FROM STDIN". The only argument is the data you want +inserted. Issue a pg_putcopyend() when you have added all your rows. + +The default delimiter is a tab character, but this can be changed in +the COPY statement. Returns a 1 on successful input. Examples: + + ## Simple example: + $dbh->do("COPY mytable FROM STDIN"); + $dbh->pg_putcopydata("123\tPepperoni\t3\n"); + $dbh->pg_putcopydata("314\tMushroom\t8\n"); + $dbh->pg_putcopydata("6\tAnchovies\t100\n"); + $dbh->pg_putcopyend(); + + ## This example uses explicit columns and a custom delimiter + $dbh->do("COPY mytable(flavor, slices) FROM STDIN WITH DELIMITER '~'"); + $dbh->pg_putcopydata("Pepperoni~123\n"); + $dbh->pg_putcopydata("Mushroom~314\n"); + $dbh->pg_putcopydata("Anchovies~6\n"); + $dbh->pg_putcopyend(); + +=head3 B<pg_putcopyend> + +When you are finished with pg_putcopydata, call pg_putcopyend to let the server know +that you are done, and it will return to a normal, non-COPY state. Returns a 1 on +success. This method will fail if called when not in COPY IN mode. + +=head2 Large Objects + +DBD::Pg supports all largeobject functions provided by libpq via the +C<func> method. Please note that access to a large object, even read-only +large objects, must be put into a transaction. + +=head2 Cursors + +Although PostgreSQL supports cursors, they have not been used in the current +implementation. When DBD::Pg was created, cursors in PostgreSQL could only be +used inside a transaction block. Because only one transaction block at a time +is allowed, this would have implied the restriction not to use any nested +C<SELECT> statements. Therefore the L</execute> method fetches all data at +once into data structures located in the front-end application. This fact +must to be considered when selecting large amounts of data! + +You can use cursors in your application, but you'll need to do a little +work. First you must declare your cursor. Now you can issue queries against +the cursor, then select against your queries. This typically results in a +double loop, like this: + + # WITH HOLD is not needed if AutoCommit is off + $dbh->do("DECLARE csr CURSOR WITH HOLD FOR $sql"); + while (1) { + my $sth = $dbh->prepare("fetch 1000 from csr"); + $sth->execute; + last if 0 == $sth->rows; + + while (my $row = $sth->fetchrow_hashref) { + # Do something with the data. + } + } + $dbh->do("CLOSE csr"); + +=head2 Datatype bool + +The current implementation of PostgreSQL returns 't' for true and 'f' for +false. From the Perl point of view, this is a rather unfortunate +choice. DBD::Pg therefore translates the result for the C<BOOL> data type in a +Perlish manner: 'f' becomes the number C<0> and 't' becomes the number C<1>. This way +the application does not have to check the database-specific returned values for +the data-type C<BOOL> because Perl treats C<0> as false and C<1> as true. You may +set the L<pg_bool_tf|/pg_bool_tf_(boolean)> attribute to a true value to change the values back to 't' and +'f' if you wish. + +Boolean values can be passed to PostgreSQL as TRUE, 't', 'true', 'y', 'yes' or +'1' for true and FALSE, 'f', 'false', 'n', 'no' or '0' for false. + +=head2 Schema support + +The PostgreSQL schema concept may differ from those of other databases. In a nutshell, +a schema is a named collection of objects within a single database. Please refer to the +PostgreSQL documentation for more details: + +L<http://www.postgresql.org/docs/current/static/ddl-schemas.html> + +DBD::Pg does not provide explicit support for PostgreSQL schemas. +However, schema functionality may be used without any restrictions by +explicitly addressing schema objects, e.g. + + my $res = $dbh->selectall_arrayref("SELECT * FROM my_schema.my_table"); + +or by manipulating the schema search path with C<SET search_path>, e.g. + + $dbh->do("SET search_path TO my_schema, public"); + +=head1 SEE ALSO + +=for text The B<DBI> module. + +=for html <a href="http://search.cpan.org/~timb/DBI/DBI.pm">The DBI module</a> + +=head1 BUGS + +To report a bug, or view the current list of bugs, please visit +http://rt.cpan.org/Public/Dist/Display.html?Name=DBD-Pg + +=head1 AUTHORS + +DBI by Tim Bunce L<http://www.tim.bunce.name> + +The original DBD-Pg was by Edmund Mergl (E.Mergl@bawue.de) and Jeffrey W. Baker +(jwbaker@acm.org). Major developers include David Wheeler <david@justatheory.com>, Jason +Stewart <jason@openinformatics.com>, Bruce Momjian <pgman@candle.pha.pa.us>, and +Greg Sabino Mullane <greg@turnstep.com>, with help from many others: see the F<Changes> +file for a complete list. + +Parts of this package were originally copied from DBI and DBD-Oracle. + +B<Mailing List> + +The current maintainers may be reached through the 'dbd-pg' mailing list: +<dbd-pg@perl.org> + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 1994-2010, Greg Sabino Mullane + +This module (DBD::Pg) is free software; you can redistribute it and/or modify it +under the same terms as Perl 5.10.0. For more details, see the full text of the +licenses in the directory LICENSES. + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/Proxy.pm b/Master/tlpkg/tlperl/lib/DBD/Proxy.pm new file mode 100755 index 00000000000..f0e8a364476 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Proxy.pm @@ -0,0 +1,997 @@ +# -*- perl -*- +# +# +# DBD::Proxy - DBI Proxy driver +# +# +# Copyright (c) 1997,1998 Jochen Wiedmann +# +# The DBD::Proxy module is free software; you can redistribute it and/or +# modify it under the same terms as Perl itself. In particular permission +# is granted to Tim Bunce for distributing this as a part of the DBI. +# +# +# Author: Jochen Wiedmann +# Am Eisteich 9 +# 72555 Metzingen +# Germany +# +# Email: joe@ispsoft.de +# Phone: +49 7123 14881 +# + +use strict; +use Carp; + +require DBI; +DBI->require_version(1.0201); + +use RPC::PlClient 0.2000; # XXX change to 0.2017 once it's released + +{ package DBD::Proxy::RPC::PlClient; + @DBD::Proxy::RPC::PlClient::ISA = qw(RPC::PlClient); + sub Call { + my $self = shift; + if ($self->{debug}) { + my ($rpcmeth, $obj, $method, @args) = @_; + local $^W; # silence undefs + Carp::carp("Server $rpcmeth $method(@args)"); + } + return $self->SUPER::Call(@_); + } +} + + +package DBD::Proxy; + +use vars qw($VERSION $drh %ATTR); + +$VERSION = "0.2004"; + +$drh = undef; # holds driver handle once initialised + +%ATTR = ( # common to db & st, see also %ATTR in DBD::Proxy::db & ::st + 'Warn' => 'local', + 'Active' => 'local', + 'Kids' => 'local', + 'CachedKids' => 'local', + 'PrintError' => 'local', + 'RaiseError' => 'local', + 'HandleError' => 'local', + 'TraceLevel' => 'cached', + 'CompatMode' => 'local', +); + +sub driver ($$) { + if (!$drh) { + my($class, $attr) = @_; + + $class .= "::dr"; + + $drh = DBI::_new_drh($class, { + 'Name' => 'Proxy', + 'Version' => $VERSION, + 'Attribution' => 'DBD::Proxy by Jochen Wiedmann', + }); + $drh->STORE(CompatMode => 1); # disable DBI dispatcher attribute cache (for FETCH) + } + $drh; +} + +sub CLONE { + undef $drh; +} + +sub proxy_set_err { + my ($h,$errmsg) = @_; + my ($err, $state) = ($errmsg =~ s/ \[err=(.*?),state=(.*?)\]//) + ? ($1, $2) : (1, ' ' x 5); + return $h->set_err($err, $errmsg, $state); +} + +package DBD::Proxy::dr; # ====== DRIVER ====== + +$DBD::Proxy::dr::imp_data_size = 0; + +sub connect ($$;$$) { + my($drh, $dsn, $user, $auth, $attr)= @_; + my($dsnOrig) = $dsn; + + my %attr = %$attr; + my ($var, $val); + while (length($dsn)) { + if ($dsn =~ /^dsn=(.*)/) { + $attr{'dsn'} = $1; + last; + } + if ($dsn =~ /^(.*?);(.*)/) { + $var = $1; + $dsn = $2; + } else { + $var = $dsn; + $dsn = ''; + } + if ($var =~ /^(.*?)=(.*)/) { + $var = $1; + $val = $2; + $attr{$var} = $val; + } + } + + my $err = ''; + if (!defined($attr{'hostname'})) { $err .= " Missing hostname."; } + if (!defined($attr{'port'})) { $err .= " Missing port."; } + if (!defined($attr{'dsn'})) { $err .= " Missing remote dsn."; } + + # Create a cipher object, if requested + my $cipherRef = undef; + if ($attr{'cipher'}) { + $cipherRef = eval { $attr{'cipher'}->new(pack('H*', + $attr{'key'})) }; + if ($@) { $err .= " Cannot create cipher object: $@."; } + } + my $userCipherRef = undef; + if ($attr{'userkey'}) { + my $cipher = $attr{'usercipher'} || $attr{'cipher'}; + $userCipherRef = eval { $cipher->new(pack('H*', $attr{'userkey'})) }; + if ($@) { $err .= " Cannot create usercipher object: $@."; } + } + + return DBD::Proxy::proxy_set_err($drh, $err) if $err; # Returns undef + + my %client_opts = ( + 'peeraddr' => $attr{'hostname'}, + 'peerport' => $attr{'port'}, + 'socket_proto' => 'tcp', + 'application' => $attr{dsn}, + 'user' => $user || '', + 'password' => $auth || '', + 'version' => $DBD::Proxy::VERSION, + 'cipher' => $cipherRef, + 'debug' => $attr{debug} || 0, + 'timeout' => $attr{timeout} || undef, + 'logfile' => $attr{logfile} || undef + ); + # Options starting with 'proxy_rpc_' are forwarded to the RPC layer after + # stripping the prefix. + while (my($var,$val) = each %attr) { + if ($var =~ s/^proxy_rpc_//) { + $client_opts{$var} = $val; + } + } + # Create an RPC::PlClient object. + my($client, $msg) = eval { DBD::Proxy::RPC::PlClient->new(%client_opts) }; + + return DBD::Proxy::proxy_set_err($drh, "Cannot log in to DBI::ProxyServer: $@") + if $@; # Returns undef + return DBD::Proxy::proxy_set_err($drh, "Constructor didn't return a handle: $msg") + unless ($msg =~ /^((?:\w+|\:\:)+)=(\w+)/); # Returns undef + + $msg = RPC::PlClient::Object->new($1, $client, $msg); + + my $max_proto_ver; + my ($server_ver_str) = eval { $client->Call('Version') }; + if ( $@ ) { + # Server denies call, assume legacy protocol. + $max_proto_ver = 1; + } else { + # Parse proxy server version. + my ($server_ver_num) = $server_ver_str =~ /^DBI::ProxyServer\s+([\d\.]+)/; + $max_proto_ver = $server_ver_num >= 0.3 ? 2 : 1; + } + my $req_proto_ver; + if ( exists $attr{proxy_lazy_prepare} ) { + $req_proto_ver = ($attr{proxy_lazy_prepare} == 0) ? 2 : 1; + return DBD::Proxy::proxy_set_err($drh, + "DBI::ProxyServer does not support synchronous statement preparation.") + if $max_proto_ver < $req_proto_ver; + } + + # Switch to user specific encryption mode, if desired + if ($userCipherRef) { + $client->{'cipher'} = $userCipherRef; + } + + # create a 'blank' dbh + my $this = DBI::_new_dbh($drh, { + 'Name' => $dsnOrig, + 'proxy_dbh' => $msg, + 'proxy_client' => $client, + 'RowCacheSize' => $attr{'RowCacheSize'} || 20, + 'proxy_proto_ver' => $req_proto_ver || 1 + }); + + foreach $var (keys %attr) { + if ($var =~ /proxy_/) { + $this->{$var} = $attr{$var}; + } + } + $this->SUPER::STORE('Active' => 1); + + $this; +} + + +sub DESTROY { undef } + + +package DBD::Proxy::db; # ====== DATABASE ====== + +$DBD::Proxy::db::imp_data_size = 0; + +# XXX probably many more methods need to be added here +# in order to trigger our AUTOLOAD to redirect them to the server. +# (Unless the sub is declared it's bypassed by perl method lookup.) +# See notes in ToDo about method metadata +# The question is whether to add all the methods in %DBI::DBI_methods +# to the corresponding classes (::db, ::st etc) +# Also need to consider methods that, if proxied, would change the server state +# in a way that might not be visible on the client, ie begin_work -> AutoCommit. + +sub commit; +sub connected; +sub rollback; +sub ping; + + +use vars qw(%ATTR $AUTOLOAD); + +# inherited: STORE / FETCH against this class. +# local: STORE / FETCH against parent class. +# cached: STORE to remote and local objects, FETCH from local. +# remote: STORE / FETCH against remote object only (default). +# +# Note: Attribute names starting with 'proxy_' always treated as 'inherited'. +# +%ATTR = ( # see also %ATTR in DBD::Proxy::st + %DBD::Proxy::ATTR, + RowCacheSize => 'inherited', + #AutoCommit => 'cached', + 'FetchHashKeyName' => 'cached', + Statement => 'local', + Driver => 'local', + dbi_connect_closure => 'local', + Username => 'local', +); + +sub AUTOLOAD { + my $method = $AUTOLOAD; + $method =~ s/(.*::(.*)):://; + my $class = $1; + my $type = $2; + #warn "AUTOLOAD of $method (class=$class, type=$type)"; + my %expand = ( + 'method' => $method, + 'class' => $class, + 'type' => $type, + 'call' => "$method(\@_)", + # XXX was trying to be smart but was tripping up over the DBI's own + # smartness. Disabled, but left here in case there are issues. + # 'call' => (UNIVERSAL::can("DBI::_::$type", $method)) ? "$method(\@_)" : "func(\@_, '$method')", + ); + + my $method_code = q{ + package ~class~; + sub ~method~ { + my $h = shift; + local $@; + my @result = wantarray + ? eval { $h->{'proxy_~type~h'}->~call~ } + : eval { scalar $h->{'proxy_~type~h'}->~call~ }; + return DBD::Proxy::proxy_set_err($h, $@) if $@; + return wantarray ? @result : $result[0]; + } + }; + $method_code =~ s/\~(\w+)\~/$expand{$1}/eg; + local $SIG{__DIE__} = 'DEFAULT'; + my $err = do { local $@; eval $method_code.2; $@ }; + die $err if $err; + goto &$AUTOLOAD; +} + +sub DESTROY { + my $dbh = shift; + local $@ if $@; # protect $@ + $dbh->disconnect if $dbh->SUPER::FETCH('Active'); +} + +sub disconnect ($) { + my ($dbh) = @_; + + # Sadly the Proxy too-often disagrees with the backend database + # on the subject of 'Active'. In the short term, I'd like the + # Proxy to ease up and let me decide when it's proper to go over + # the wire. This ultimately applies to finish() as well. + #return unless $dbh->SUPER::FETCH('Active'); + + # Drop database connection at remote end + my $rdbh = $dbh->{'proxy_dbh'}; + if ( $rdbh ) { + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + eval { $rdbh->disconnect() } ; + DBD::Proxy::proxy_set_err($dbh, $@) if $@; + } + + # Close TCP connect to remote + # XXX possibly best left till DESTROY? Add a config attribute to choose? + #$dbh->{proxy_client}->Disconnect(); # Disconnect method requires newer PlRPC module + $dbh->{proxy_client}->{socket} = undef; # hack + + $dbh->SUPER::STORE('Active' => 0); + 1; +} + + +sub STORE ($$$) { + my($dbh, $attr, $val) = @_; + my $type = $ATTR{$attr} || 'remote'; + + if ($attr eq 'TraceLevel') { + warn("TraceLevel $val"); + my $pc = $dbh->{proxy_client} || die; + $pc->{logfile} ||= 1; # XXX hack + $pc->{debug} = ($val && $val >= 4); + $pc->Debug("$pc debug enabled") if $pc->{debug}; + } + + if ($attr =~ /^proxy_/ || $type eq 'inherited') { + $dbh->{$attr} = $val; + return 1; + } + + if ($type eq 'remote' || $type eq 'cached') { + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $dbh->{'proxy_dbh'}->STORE($attr => $val) }; + return DBD::Proxy::proxy_set_err($dbh, $@) if $@; # returns undef + $dbh->SUPER::STORE($attr => $val) if $type eq 'cached'; + return $result; + } + return $dbh->SUPER::STORE($attr => $val); +} + +sub FETCH ($$) { + my($dbh, $attr) = @_; + # we only get here for cached attribute values if the handle is in CompatMode + # otherwise the DBI dispatcher handles the FETCH itself from the attribute cache. + my $type = $ATTR{$attr} || 'remote'; + + if ($attr =~ /^proxy_/ || $type eq 'inherited' || $type eq 'cached') { + return $dbh->{$attr}; + } + + return $dbh->SUPER::FETCH($attr) unless $type eq 'remote'; + + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $dbh->{'proxy_dbh'}->FETCH($attr) }; + return DBD::Proxy::proxy_set_err($dbh, $@) if $@; + return $result; +} + +sub prepare ($$;$) { + my($dbh, $stmt, $attr) = @_; + my $sth = DBI::_new_sth($dbh, { + 'Statement' => $stmt, + 'proxy_attr' => $attr, + 'proxy_cache_only' => 0, + 'proxy_params' => [], + } + ); + my $proto_ver = $dbh->{'proxy_proto_ver'}; + if ( $proto_ver > 1 ) { + $sth->{'proxy_attr_cache'} = {cache_filled => 0}; + my $rdbh = $dbh->{'proxy_dbh'}; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $rsth = eval { $rdbh->prepare($sth->{'Statement'}, $sth->{'proxy_attr'}, undef, $proto_ver) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth") + unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/); + + my $client = $dbh->{'proxy_client'}; + $rsth = RPC::PlClient::Object->new($1, $client, $rsth); + + $sth->{'proxy_sth'} = $rsth; + # If statement is a positioned update we do not want any readahead. + $sth->{'RowCacheSize'} = 1 if $stmt =~ /\bfor\s+update\b/i; + # Since resources are used by prepared remote handle, mark us active. + $sth->SUPER::STORE(Active => 1); + } + $sth; +} + +sub quote { + my $dbh = shift; + my $proxy_quote = $dbh->{proxy_quote} || 'remote'; + + return $dbh->SUPER::quote(@_) + if $proxy_quote eq 'local' && @_ == 1; + + # For the common case of only a single argument + # (no $data_type) we could learn and cache the behaviour. + # Or we could probe the driver with a few test cases. + # Or we could add a way to ask the DBI::ProxyServer + # if $dbh->can('quote') == \&DBI::_::db::quote. + # Tim + # + # Sounds all *very* smart to me. I'd rather suggest to + # implement some of the typical quote possibilities + # and let the user set + # $dbh->{'proxy_quote'} = 'backslash_escaped'; + # for example. + # Jochen + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $dbh->{'proxy_dbh'}->quote(@_) }; + return DBD::Proxy::proxy_set_err($dbh, $@) if $@; + return $result; +} + +sub table_info { + my $dbh = shift; + my $rdbh = $dbh->{'proxy_dbh'}; + #warn "table_info(@_)"; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my($numFields, $names, $types, @rows) = eval { $rdbh->table_info(@_) }; + return DBD::Proxy::proxy_set_err($dbh, $@) if $@; + my ($sth, $inner) = DBI::_new_sth($dbh, { + 'Statement' => "SHOW TABLES", + 'proxy_params' => [], + 'proxy_data' => \@rows, + 'proxy_attr_cache' => { + 'NUM_OF_PARAMS' => 0, + 'NUM_OF_FIELDS' => $numFields, + 'NAME' => $names, + 'TYPE' => $types, + 'cache_filled' => 1 + }, + 'proxy_cache_only' => 1, + }); + $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields); + $inner->{NAME} = $names; + $inner->{TYPE} = $types; + $sth->SUPER::STORE('Active' => 1); # already execute()'d + $sth->{'proxy_rows'} = @rows; + return $sth; +} + +sub tables { + my $dbh = shift; + #warn "tables(@_)"; + return $dbh->SUPER::tables(@_); +} + + +sub type_info_all { + my $dbh = shift; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $dbh->{'proxy_dbh'}->type_info_all(@_) }; + return DBD::Proxy::proxy_set_err($dbh, $@) if $@; + return $result; +} + + +package DBD::Proxy::st; # ====== STATEMENT ====== + +$DBD::Proxy::st::imp_data_size = 0; + +use vars qw(%ATTR); + +# inherited: STORE to current object. FETCH from current if exists, else call up +# to the (proxy) database object. +# local: STORE / FETCH against parent class. +# cache_only: STORE noop (read-only). FETCH from private_* if exists, else call +# remote and cache the result. +# remote: STORE / FETCH against remote object only (default). +# +# Note: Attribute names starting with 'proxy_' always treated as 'inherited'. +# +%ATTR = ( # see also %ATTR in DBD::Proxy::db + %DBD::Proxy::ATTR, + 'Database' => 'local', + 'RowsInCache' => 'local', + 'RowCacheSize' => 'inherited', + 'NULLABLE' => 'cache_only', + 'NAME' => 'cache_only', + 'TYPE' => 'cache_only', + 'PRECISION' => 'cache_only', + 'SCALE' => 'cache_only', + 'NUM_OF_FIELDS' => 'cache_only', + 'NUM_OF_PARAMS' => 'cache_only' +); + +*AUTOLOAD = \&DBD::Proxy::db::AUTOLOAD; + +sub execute ($@) { + my $sth = shift; + my $params = @_ ? \@_ : $sth->{'proxy_params'}; + + # new execute, so delete any cached rows from previous execute + undef $sth->{'proxy_data'}; + undef $sth->{'proxy_rows'}; + + my $rsth = $sth->{proxy_sth}; + my $dbh = $sth->FETCH('Database'); + my $proto_ver = $dbh->{proxy_proto_ver}; + + my ($numRows, @outData); + + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + if ( $proto_ver > 1 ) { + ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + + # Attributes passed back only on the first execute() of a statement. + unless ($sth->{proxy_attr_cache}->{cache_filled}) { + my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4); + $sth->{'proxy_attr_cache'} = { + 'NUM_OF_FIELDS' => $numFields, + 'NUM_OF_PARAMS' => $numParams, + 'NAME' => $names, + 'cache_filled' => 1 + }; + $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields); + $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams); + } + + } + else { + if ($rsth) { + ($numRows, @outData) = eval { $rsth->execute($params, $proto_ver) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + + } + else { + my $rdbh = $dbh->{'proxy_dbh'}; + + # Legacy prepare is actually prepare + first execute on the server. + ($rsth, @outData) = + eval { $rdbh->prepare($sth->{'Statement'}, + $sth->{'proxy_attr'}, $params, $proto_ver) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + return DBD::Proxy::proxy_set_err($sth, "Constructor didn't return a handle: $rsth") + unless ($rsth =~ /^((?:\w+|\:\:)+)=(\w+)/); + + my $client = $dbh->{'proxy_client'}; + $rsth = RPC::PlClient::Object->new($1, $client, $rsth); + + my ($numFields, $numParams, $names, $types) = splice(@outData, 0, 4); + $sth->{'proxy_sth'} = $rsth; + $sth->{'proxy_attr_cache'} = { + 'NUM_OF_FIELDS' => $numFields, + 'NUM_OF_PARAMS' => $numParams, + 'NAME' => $names + }; + $sth->SUPER::STORE('NUM_OF_FIELDS' => $numFields); + $sth->SUPER::STORE('NUM_OF_PARAMS' => $numParams); + $numRows = shift @outData; + } + } + # Always condition active flag. + $sth->SUPER::STORE('Active' => 1) if $sth->FETCH('NUM_OF_FIELDS'); # is SELECT + $sth->{'proxy_rows'} = $numRows; + # Any remaining items are output params. + if (@outData) { + foreach my $p (@$params) { + if (ref($p->[0])) { + my $ref = shift @outData; + ${$p->[0]} = $$ref; + } + } + } + + $sth->{'proxy_rows'} || '0E0'; +} + +sub fetch ($) { + my $sth = shift; + + my $data = $sth->{'proxy_data'}; + + $sth->{'proxy_rows'} = 0 unless defined $sth->{'proxy_rows'}; + + if(!$data || !@$data) { + return undef unless $sth->SUPER::FETCH('Active'); + + my $rsth = $sth->{'proxy_sth'}; + if (!$rsth) { + die "Attempt to fetch row without execute"; + } + my $num_rows = $sth->FETCH('RowCacheSize') || 20; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my @rows = eval { $rsth->fetch($num_rows) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + unless (@rows == $num_rows) { + undef $sth->{'proxy_data'}; + # server side has already called finish + $sth->SUPER::STORE(Active => 0); + } + return undef unless @rows; + $sth->{'proxy_data'} = $data = [@rows]; + } + my $row = shift @$data; + + $sth->SUPER::STORE(Active => 0) if ( $sth->{proxy_cache_only} and !@$data ); + $sth->{'proxy_rows'}++; + return $sth->_set_fbav($row); +} +*fetchrow_arrayref = \&fetch; + +sub rows ($) { + my $rows = shift->{'proxy_rows'}; + return (defined $rows) ? $rows : -1; +} + +sub finish ($) { + my($sth) = @_; + return 1 unless $sth->SUPER::FETCH('Active'); + my $rsth = $sth->{'proxy_sth'}; + $sth->SUPER::STORE('Active' => 0); + return 0 unless $rsth; # Something's out of sync + my $no_finish = exists($sth->{'proxy_no_finish'}) + ? $sth->{'proxy_no_finish'} + : $sth->FETCH('Database')->{'proxy_no_finish'}; + unless ($no_finish) { + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $rsth->finish() }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + return $result; + } + 1; +} + +sub STORE ($$$) { + my($sth, $attr, $val) = @_; + my $type = $ATTR{$attr} || 'remote'; + + if ($attr =~ /^proxy_/ || $type eq 'inherited') { + $sth->{$attr} = $val; + return 1; + } + + if ($type eq 'cache_only') { + return 0; + } + + if ($type eq 'remote' || $type eq 'cached') { + my $rsth = $sth->{'proxy_sth'} or return undef; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $rsth->STORE($attr => $val) }; + return DBD::Proxy::proxy_set_err($sth, $@) if ($@); + return $result if $type eq 'remote'; # else fall through to cache locally + } + return $sth->SUPER::STORE($attr => $val); +} + +sub FETCH ($$) { + my($sth, $attr) = @_; + + if ($attr =~ /^proxy_/) { + return $sth->{$attr}; + } + + my $type = $ATTR{$attr} || 'remote'; + if ($type eq 'inherited') { + if (exists($sth->{$attr})) { + return $sth->{$attr}; + } + return $sth->FETCH('Database')->{$attr}; + } + + if ($type eq 'cache_only' && + exists($sth->{'proxy_attr_cache'}->{$attr})) { + return $sth->{'proxy_attr_cache'}->{$attr}; + } + + if ($type ne 'local') { + my $rsth = $sth->{'proxy_sth'} or return undef; + local $SIG{__DIE__} = 'DEFAULT'; + local $@; + my $result = eval { $rsth->FETCH($attr) }; + return DBD::Proxy::proxy_set_err($sth, $@) if $@; + return $result; + } + elsif ($attr eq 'RowsInCache') { + my $data = $sth->{'proxy_data'}; + $data ? @$data : 0; + } + else { + $sth->SUPER::FETCH($attr); + } +} + +sub bind_param ($$$@) { + my $sth = shift; my $param = shift; + $sth->{'proxy_params'}->[$param-1] = [@_]; +} +*bind_param_inout = \&bind_param; + +sub DESTROY { + my $sth = shift; + $sth->finish if $sth->SUPER::FETCH('Active'); +} + + +1; + + +__END__ + +=head1 NAME + +DBD::Proxy - A proxy driver for the DBI + +=head1 SYNOPSIS + + use DBI; + + $dbh = DBI->connect("dbi:Proxy:hostname=$host;port=$port;dsn=$db", + $user, $passwd); + + # See the DBI module documentation for full details + +=head1 DESCRIPTION + +DBD::Proxy is a Perl module for connecting to a database via a remote +DBI driver. See L<DBD::Gofer> for an alternative with different trade-offs. + +This is of course not needed for DBI drivers which already +support connecting to a remote database, but there are engines which +don't offer network connectivity. + +Another application is offering database access through a firewall, as +the driver offers query based restrictions. For example you can +restrict queries to exactly those that are used in a given CGI +application. + +Speaking of CGI, another application is (or rather, will be) to reduce +the database connect/disconnect overhead from CGI scripts by using +proxying the connect_cached method. The proxy server will hold the +database connections open in a cache. The CGI script then trades the +database connect/disconnect overhead for the DBD::Proxy +connect/disconnect overhead which is typically much less. +I<Note that the connect_cached method is new and still experimental.> + + +=head1 CONNECTING TO THE DATABASE + +Before connecting to a remote database, you must ensure, that a Proxy +server is running on the remote machine. There's no default port, so +you have to ask your system administrator for the port number. See +L<DBI::ProxyServer> for details. + +Say, your Proxy server is running on machine "alpha", port 3334, and +you'd like to connect to an ODBC database called "mydb" as user "joe" +with password "hello". When using DBD::ODBC directly, you'd do a + + $dbh = DBI->connect("DBI:ODBC:mydb", "joe", "hello"); + +With DBD::Proxy this becomes + + $dsn = "DBI:Proxy:hostname=alpha;port=3334;dsn=DBI:ODBC:mydb"; + $dbh = DBI->connect($dsn, "joe", "hello"); + +You see, this is mainly the same. The DBD::Proxy module will create a +connection to the Proxy server on "alpha" which in turn will connect +to the ODBC database. + +Refer to the L<DBI> documentation on the C<connect> method for a way +to automatically use DBD::Proxy without having to change your code. + +DBD::Proxy's DSN string has the format + + $dsn = "DBI:Proxy:key1=val1; ... ;keyN=valN;dsn=valDSN"; + +In other words, it is a collection of key/value pairs. The following +keys are recognized: + +=over 4 + +=item hostname + +=item port + +Hostname and port of the Proxy server; these keys must be present, +no defaults. Example: + + hostname=alpha;port=3334 + +=item dsn + +The value of this attribute will be used as a dsn name by the Proxy +server. Thus it must have the format C<DBI:driver:...>, in particular +it will contain colons. The I<dsn> value may contain semicolons, hence +this key *must* be the last and it's value will be the complete +remaining part of the dsn. Example: + + dsn=DBI:ODBC:mydb + +=item cipher + +=item key + +=item usercipher + +=item userkey + +By using these fields you can enable encryption. If you set, +for example, + + cipher=$class;key=$key + +(note the semicolon) then DBD::Proxy will create a new cipher object +by executing + + $cipherRef = $class->new(pack("H*", $key)); + +and pass this object to the RPC::PlClient module when creating a +client. See L<RPC::PlClient>. Example: + + cipher=IDEA;key=97cd2375efa329aceef2098babdc9721 + +The usercipher/userkey attributes allow you to use two phase encryption: +The cipher/key encryption will be used in the login and authorisation +phase. Once the client is authorised, he will change to usercipher/userkey +encryption. Thus the cipher/key pair is a B<host> based secret, typically +less secure than the usercipher/userkey secret and readable by anyone. +The usercipher/userkey secret is B<your> private secret. + +Of course encryption requires an appropriately configured server. See +<DBD::ProxyServer/CONFIGURATION FILE>. + +=item debug + +Turn on debugging mode + +=item stderr + +This attribute will set the corresponding attribute of the RPC::PlClient +object, thus logging will not use syslog(), but redirected to stderr. +This is the default under Windows. + + stderr=1 + +=item logfile + +Similar to the stderr attribute, but output will be redirected to the +given file. + + logfile=/dev/null + +=item RowCacheSize + +The DBD::Proxy driver supports this attribute (which is DBI standard, +as of DBI 1.02). It's used to reduce network round-trips by fetching +multiple rows in one go. The current default value is 20, but this may +change. + + +=item proxy_no_finish + +This attribute can be used to reduce network traffic: If the +application is calling $sth->finish() then the proxy tells the server +to finish the remote statement handle. Of course this slows down things +quite a lot, but is prefectly good for reducing memory usage with +persistent connections. + +However, if you set the I<proxy_no_finish> attribute to a TRUE value, +either in the database handle or in the statement handle, then finish() +calls will be supressed. This is what you want, for example, in small +and fast CGI applications. + +=item proxy_quote + +This attribute can be used to reduce network traffic: By default calls +to $dbh->quote() are passed to the remote driver. Of course this slows +down things quite a lot, but is the safest default behaviour. + +However, if you set the I<proxy_quote> attribute to the value 'C<local>' +either in the database handle or in the statement handle, and the call +to quote has only one parameter, then the local default DBI quote +method will be used (which will be faster but may be wrong). + +=back + +=head1 KNOWN ISSUES + +=head2 Unproxied method calls + +If a method isn't being proxied, try declaring a stub sub in the appropriate +package (DBD::Proxy::db for a dbh method, and DBD::Proxy::st for an sth method). +For example: + + sub DBD::Proxy::db::selectall_arrayref; + +That will enable selectall_arrayref to be proxied. + +Currently many methods aren't explicitly proxied and so you get the DBI's +default methods executed on the client. + +Some of those methods, like selectall_arrayref, may then call other methods +that are proxied (selectall_arrayref calls fetchall_arrayref which calls fetch +which is proxied). So things may appear to work but operate more slowly than +the could. + +This may all change in a later version. + +=head2 Complex handle attributes + +Sometimes handles are having complex attributes like hash refs or +array refs and not simple strings or integers. For example, with +DBD::CSV, you would like to write something like + + $dbh->{"csv_tables"}->{"passwd"} = + { "sep_char" => ":", "eol" => "\n"; + +The above example would advice the CSV driver to assume the file +"passwd" to be in the format of the /etc/passwd file: Colons as +separators and a line feed without carriage return as line +terminator. + +Surprisingly this example doesn't work with the proxy driver. To understand +the reasons, you should consider the following: The Perl compiler is +executing the above example in two steps: + +=over + +=item 1 + +The first step is fetching the value of the key "csv_tables" in the +handle $dbh. The value returned is complex, a hash ref. + +=item 2 + +The second step is storing some value (the right hand side of the +assignment) as the key "passwd" in the hash ref from step 1. + +=back + +This becomes a little bit clearer, if we rewrite the above code: + + $tables = $dbh->{"csv_tables"}; + $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n"; + +While the examples work fine without the proxy, the fail due to a +subtile difference in step 1: By DBI magic, the hash ref +$dbh->{'csv_tables'} is returned from the server to the client. +The client creates a local copy. This local copy is the result of +step 1. In other words, step 2 modifies a local copy of the hash ref, +but not the server's hash ref. + +The workaround is storing the modified local copy back to the server: + + $tables = $dbh->{"csv_tables"}; + $tables->{"passwd"} = { "sep_char" => ":", "eol" => "\n"; + $dbh->{"csv_tables"} = $tables; + + +=head1 AUTHOR AND COPYRIGHT + +This module is Copyright (c) 1997, 1998 + + Jochen Wiedmann + Am Eisteich 9 + 72555 Metzingen + Germany + + Email: joe@ispsoft.de + Phone: +49 7123 14887 + +The DBD::Proxy module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. In particular permission +is granted to Tim Bunce for distributing this as a part of the DBI. + + +=head1 SEE ALSO + +L<DBI>, L<RPC::PlClient>, L<Storable> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/SQLite.pm b/Master/tlpkg/tlperl/lib/DBD/SQLite.pm new file mode 100755 index 00000000000..cec461c38d7 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/SQLite.pm @@ -0,0 +1,1575 @@ +package DBD::SQLite; + +use 5.006; +use strict; +use DBI 1.57 (); +use DynaLoader (); + +use vars qw($VERSION @ISA); +use vars qw{$err $errstr $drh $sqlite_version}; +use vars qw{%COLLATION}; + +BEGIN { + $VERSION = '1.29'; + @ISA = 'DynaLoader'; + + # Initialize errors + $err = undef; + $errstr = undef; + + # Driver singleton + $drh = undef; + + # sqlite_version cache + $sqlite_version = undef; +} + +__PACKAGE__->bootstrap($VERSION); + +tie %COLLATION, 'DBD::SQLite::_WriteOnceHash'; +$COLLATION{perl} = sub { $_[0] cmp $_[1] }; +$COLLATION{perllocale} = sub { use locale; $_[0] cmp $_[1] }; + +my $methods_are_installed; + +sub driver { + return $drh if $drh; + + if (!$methods_are_installed && $DBI::VERSION >= 1.608) { + DBI->setup_driver('DBD::SQLite'); + + DBD::SQLite::db->install_method('sqlite_last_insert_rowid'); + DBD::SQLite::db->install_method('sqlite_busy_timeout'); + DBD::SQLite::db->install_method('sqlite_create_function'); + DBD::SQLite::db->install_method('sqlite_create_aggregate'); + DBD::SQLite::db->install_method('sqlite_create_collation'); + DBD::SQLite::db->install_method('sqlite_collation_needed'); + DBD::SQLite::db->install_method('sqlite_progress_handler'); + DBD::SQLite::db->install_method('sqlite_commit_hook'); + DBD::SQLite::db->install_method('sqlite_rollback_hook'); + DBD::SQLite::db->install_method('sqlite_update_hook'); + DBD::SQLite::db->install_method('sqlite_set_authorizer'); + DBD::SQLite::db->install_method('sqlite_backup_from_file'); + DBD::SQLite::db->install_method('sqlite_backup_to_file'); + DBD::SQLite::db->install_method('sqlite_enable_load_extension'); + $methods_are_installed++; + } + + $drh = DBI::_new_drh( "$_[0]::dr", { + Name => 'SQLite', + Version => $VERSION, + Attribution => 'DBD::SQLite by Matt Sergeant et al', + } ); + return $drh; +} + +sub CLONE { + undef $drh; +} + +package DBD::SQLite::dr; + +sub connect { + my ($drh, $dbname, $user, $auth, $attr) = @_; + + # Default PrintWarn to the value of $^W + unless ( defined $attr->{PrintWarn} ) { + $attr->{PrintWarn} = $^W ? 1 : 0; + } + + my $dbh = DBI::_new_dbh( $drh, { + Name => $dbname, + } ); + + my $real = $dbname; + if ( $dbname =~ /=/ ) { + foreach my $attrib ( split(/;/, $dbname) ) { + my ($key, $value) = split(/=/, $attrib, 2); + if ( $key eq 'dbname' ) { + $real = $value; + } else { + $attr->{$key} = $value; + } + } + } + + # To avoid unicode and long file name problems on Windows, + # convert to the shortname if the file (or parent directory) exists. + if ( $^O =~ /MSWin32/ and $real ne ':memory:' and $real ne '') { + require Win32; + require File::Basename; + my ($file, $dir, $suffix) = File::Basename::fileparse($real); + my $short = Win32::GetShortPathName($real); + if ( $short && -f $short ) { + # Existing files will work directly. + $real = $short; + } elsif ( -d $dir ) { + # We are creating a new file. + # Does the directory it's in at least exist? + $real = join '', grep { defined } Win32::GetShortPathName($dir), $file, $suffix; + } else { + # SQLite can't do mkpath anyway. + # So let it go through as it and fail. + } + } + + # Hand off to the actual login function + DBD::SQLite::db::_login($dbh, $real, $user, $auth, $attr) or return undef; + + # Register the on-demand collation installer + $DBI::VERSION >= 1.608 + ? $dbh->sqlite_collation_needed(\&install_collation) + : $dbh->func(\&install_collation, "collation_needed"); + + # Register the REGEXP function + $DBI::VERSION >= 1.608 + ? $dbh->sqlite_create_function("REGEXP", 2, \®exp) + : $dbh->func("REGEXP", 2, \®exp, "create_function"); + + # HACK: Since PrintWarn = 0 doesn't seem to actually prevent warnings + # in DBD::SQLite we set Warn to false if PrintWarn is false. + unless ( $attr->{PrintWarn} ) { + $attr->{Warn} = 0; + } + + return $dbh; +} + + +sub install_collation { + my ($dbh, $collation_name) = @_; + my $collation = $DBD::SQLite::COLLATION{$collation_name} + or die "can't install, unknown collation : $collation_name"; + $DBI::VERSION >= 1.608 + ? $dbh->sqlite_create_collation($collation_name => $collation) + : $dbh->func($collation_name => $collation, "create_collation"); +} + +# default implementation for sqlite 'REGEXP' infix operator. +# Note : args are reversed, i.e. "a REGEXP b" calls REGEXP(b, a) +# (see http://www.sqlite.org/vtab.html#xfindfunction) +sub regexp { + use locale; + return scalar($_[1] =~ $_[0]); +} + + +package DBD::SQLite::db; + +sub prepare { + my $dbh = shift; + my $sql = shift; + $sql = '' unless defined $sql; + + my $sth = DBI::_new_sth( $dbh, { + Statement => $sql, + } ); + + DBD::SQLite::st::_prepare($sth, $sql, @_) or return undef; + + return $sth; +} + +sub _get_version { + return ( DBD::SQLite::db::FETCH($_[0], 'sqlite_version') ); +} + +my %info = ( + 17 => 'SQLite', # SQL_DBMS_NAME + 18 => \&_get_version, # SQL_DBMS_VER + 29 => '"', # SQL_IDENTIFIER_QUOTE_CHAR +); + +sub get_info { + my($dbh, $info_type) = @_; + my $v = $info{int($info_type)}; + $v = $v->($dbh) if ref $v eq 'CODE'; + return $v; +} + +sub _attached_database_list { + my $dbh = shift; + my @attached; + + my $sth_databases = $dbh->prepare( 'PRAGMA database_list' ); + $sth_databases->execute; + while ( my $db_info = $sth_databases->fetchrow_hashref ) { + push @attached, $db_info->{name} if $db_info->{seq} >= 2; + } + return @attached; +} + +# SQL/CLI (ISO/IEC JTC 1/SC 32 N 0595), 6.63 Tables +# Based on DBD::Oracle's +# See also http://www.ch-werner.de/sqliteodbc/html/sqlite3odbc_8c.html#a213 +sub table_info { + my ($dbh, $cat_val, $sch_val, $tbl_val, $typ_val, $attr) = @_; + + my @where = (); + my $sql; + if ( defined($cat_val) && $cat_val eq '%' + && defined($sch_val) && $sch_val eq '' + && defined($tbl_val) && $tbl_val eq '') { # Rule 19a + $sql = <<'END_SQL'; +SELECT NULL TABLE_CAT + , NULL TABLE_SCHEM + , NULL TABLE_NAME + , NULL TABLE_TYPE + , NULL REMARKS +END_SQL + } + elsif ( defined($cat_val) && $cat_val eq '' + && defined($sch_val) && $sch_val eq '%' + && defined($tbl_val) && $tbl_val eq '') { # Rule 19b + $sql = <<'END_SQL'; +SELECT NULL TABLE_CAT + , t.tn TABLE_SCHEM + , NULL TABLE_NAME + , NULL TABLE_TYPE + , NULL REMARKS +FROM ( + SELECT 'main' tn + UNION SELECT 'temp' tn +END_SQL + for my $db_name (_attached_database_list($dbh)) { + $sql .= " UNION SELECT '$db_name' tn\n"; + } + $sql .= ") t\n"; + } + elsif ( defined($cat_val) && $cat_val eq '' + && defined($sch_val) && $sch_val eq '' + && defined($tbl_val) && $tbl_val eq '' + && defined($typ_val) && $typ_val eq '%') { # Rule 19c + $sql = <<'END_SQL'; +SELECT NULL TABLE_CAT + , NULL TABLE_SCHEM + , NULL TABLE_NAME + , t.tt TABLE_TYPE + , NULL REMARKS +FROM ( + SELECT 'TABLE' tt UNION + SELECT 'VIEW' tt UNION + SELECT 'LOCAL TEMPORARY' tt +) t +ORDER BY TABLE_TYPE +END_SQL + } + else { + $sql = <<'END_SQL'; +SELECT * +FROM +( +SELECT NULL TABLE_CAT + , TABLE_SCHEM + , tbl_name TABLE_NAME + , TABLE_TYPE + , NULL REMARKS + , sql sqlite_sql +FROM ( + SELECT 'main' TABLE_SCHEM, tbl_name, upper(type) TABLE_TYPE, sql + FROM sqlite_master +UNION ALL + SELECT 'temp' TABLE_SCHEM, tbl_name, 'LOCAL TEMPORARY' TABLE_TYPE, sql + FROM sqlite_temp_master +END_SQL + + for my $db_name (_attached_database_list($dbh)) { + $sql .= <<"END_SQL"; +UNION ALL + SELECT '$db_name' TABLE_SCHEM, tbl_name, upper(type) TABLE_TYPE, sql + FROM "$db_name".sqlite_master +END_SQL + } + + $sql .= <<'END_SQL'; +UNION ALL + SELECT 'main' TABLE_SCHEM, 'sqlite_master' tbl_name, 'SYSTEM TABLE' TABLE_TYPE, NULL sql +UNION ALL + SELECT 'temp' TABLE_SCHEM, 'sqlite_temp_master' tbl_name, 'SYSTEM TABLE' TABLE_TYPE, NULL sql +) +) +END_SQL + $attr = {} unless ref $attr eq 'HASH'; + my $escape = defined $attr->{Escape} ? " ESCAPE '$attr->{Escape}'" : ''; + if ( defined $sch_val ) { + push @where, "TABLE_SCHEM LIKE '$sch_val'$escape"; + } + if ( defined $tbl_val ) { + push @where, "TABLE_NAME LIKE '$tbl_val'$escape"; + } + if ( defined $typ_val ) { + my $table_type_list; + $typ_val =~ s/^\s+//; + $typ_val =~ s/\s+$//; + my @ttype_list = split (/\s*,\s*/, $typ_val); + foreach my $table_type (@ttype_list) { + if ($table_type !~ /^'.*'$/) { + $table_type = "'" . $table_type . "'"; + } + } + $table_type_list = join(', ', @ttype_list); + push @where, "TABLE_TYPE IN (\U$table_type_list)" if $table_type_list; + } + $sql .= ' WHERE ' . join("\n AND ", @where ) . "\n" if @where; + $sql .= " ORDER BY TABLE_TYPE, TABLE_SCHEM, TABLE_NAME\n"; + } + my $sth = $dbh->prepare($sql) or return undef; + $sth->execute or return undef; + $sth; +} + +sub primary_key_info { + my ($dbh, $catalog, $schema, $table) = @_; + + # Escape the schema and table name + $schema =~ s/([\\_%])/\\$1/g if defined $schema; + my $escaped = $table; + $escaped =~ s/([\\_%])/\\$1/g; + my $sth_tables = $dbh->table_info($catalog, $schema, $escaped, undef, {Escape => '\\'}); + + # This is a hack but much simpler than using pragma index_list etc + # also the pragma doesn't list 'INTEGER PRIMARY KEY' autoinc PKs! + my @pk_info; + while ( my $row = $sth_tables->fetchrow_hashref ) { + my $sql = $row->{sqlite_sql} or next; + next unless $sql =~ /(.*?)\s*PRIMARY\s+KEY\s*(?:\(\s*(.*?)\s*\))?/si; + my @pk = split /\s*,\s*/, $2 || ''; + unless ( @pk ) { + my $prefix = $1; + $prefix =~ s/.*create\s+table\s+.*?\(\s*//si; + $prefix = (split /\s*,\s*/, $prefix)[-1]; + @pk = (split /\s+/, $prefix)[0]; # take first word as name + } + my $key_seq = 0; + foreach my $pk_field (@pk) { + push @pk_info, { + TABLE_SCHEM => $row->{TABLE_SCHEM}, + TABLE_NAME => $row->{TABLE_NAME}, + COLUMN_NAME => $pk_field, + KEY_SEQ => ++$key_seq, + PK_NAME => 'PRIMARY KEY', + }; + } + } + + my $sponge = DBI->connect("DBI:Sponge:", '','') + or return $dbh->DBI::set_err($DBI::err, "DBI::Sponge: $DBI::errstr"); + my @names = qw(TABLE_CAT TABLE_SCHEM TABLE_NAME COLUMN_NAME KEY_SEQ PK_NAME); + my $sth = $sponge->prepare( "primary_key_info $table", { + rows => [ map { [ @{$_}{@names} ] } @pk_info ], + NUM_OF_FIELDS => scalar @names, + NAME => \@names, + }) or return $dbh->DBI::set_err( + $sponge->err(), + $sponge->errstr() + ); + return $sth; +} + +sub type_info_all { + return; # XXX code just copied from DBD::Oracle, not yet thought about +# return [ +# { +# TYPE_NAME => 0, +# DATA_TYPE => 1, +# COLUMN_SIZE => 2, +# LITERAL_PREFIX => 3, +# LITERAL_SUFFIX => 4, +# CREATE_PARAMS => 5, +# NULLABLE => 6, +# CASE_SENSITIVE => 7, +# SEARCHABLE => 8, +# UNSIGNED_ATTRIBUTE => 9, +# FIXED_PREC_SCALE => 10, +# AUTO_UNIQUE_VALUE => 11, +# LOCAL_TYPE_NAME => 12, +# MINIMUM_SCALE => 13, +# MAXIMUM_SCALE => 14, +# SQL_DATA_TYPE => 15, +# SQL_DATETIME_SUB => 16, +# NUM_PREC_RADIX => 17, +# }, +# [ 'CHAR', 1, 255, '\'', '\'', 'max length', 1, 1, 3, +# undef, '0', '0', undef, undef, undef, 1, undef, undef +# ], +# [ 'NUMBER', 3, 38, undef, undef, 'precision,scale', 1, '0', 3, +# '0', '0', '0', undef, '0', 38, 3, undef, 10 +# ], +# [ 'DOUBLE', 8, 15, undef, undef, undef, 1, '0', 3, +# '0', '0', '0', undef, undef, undef, 8, undef, 10 +# ], +# [ 'DATE', 9, 19, '\'', '\'', undef, 1, '0', 3, +# undef, '0', '0', undef, '0', '0', 11, undef, undef +# ], +# [ 'VARCHAR', 12, 1024*1024, '\'', '\'', 'max length', 1, 1, 3, +# undef, '0', '0', undef, undef, undef, 12, undef, undef +# ] +# ]; +} + +my @COLUMN_INFO = qw( + TABLE_CAT + TABLE_SCHEM + TABLE_NAME + COLUMN_NAME + DATA_TYPE + TYPE_NAME + COLUMN_SIZE + BUFFER_LENGTH + DECIMAL_DIGITS + NUM_PREC_RADIX + NULLABLE + REMARKS + COLUMN_DEF + SQL_DATA_TYPE + SQL_DATETIME_SUB + CHAR_OCTET_LENGTH + ORDINAL_POSITION + IS_NULLABLE +); + +sub column_info { + my ($dbh, $cat_val, $sch_val, $tbl_val, $col_val) = @_; + + if ( defined $col_val and $col_val eq '%' ) { + $col_val = undef; + } + + # Get a list of all tables ordered by TABLE_SCHEM, TABLE_NAME + my $sql = <<'END_SQL'; +SELECT TABLE_SCHEM, tbl_name TABLE_NAME +FROM ( + SELECT 'main' TABLE_SCHEM, tbl_name + FROM sqlite_master + WHERE type IN ('table','view') +UNION ALL + SELECT 'temp' TABLE_SCHEM, tbl_name + FROM sqlite_temp_master + WHERE type IN ('table','view') +END_SQL + + for my $db_name (_attached_database_list($dbh)) { + $sql .= <<"END_SQL"; +UNION ALL + SELECT '$db_name' TABLE_SCHEM, tbl_name + FROM "$db_name".sqlite_master + WHERE type IN ('table','view') +END_SQL + } + + $sql .= <<'END_SQL'; +UNION ALL + SELECT 'main' TABLE_SCHEM, 'sqlite_master' tbl_name +UNION ALL + SELECT 'temp' TABLE_SCHEM, 'sqlite_temp_master' tbl_name +) +END_SQL + + my @where; + if ( defined $sch_val ) { + push @where, "TABLE_SCHEM LIKE '$sch_val'"; + } + if ( defined $tbl_val ) { + push @where, "TABLE_NAME LIKE '$tbl_val'"; + } + $sql .= ' WHERE ' . join("\n AND ", @where ) . "\n" if @where; + $sql .= " ORDER BY TABLE_SCHEM, TABLE_NAME\n"; + my $sth_tables = $dbh->prepare($sql) or return undef; + $sth_tables->execute or return undef; + + # Taken from Fey::Loader::SQLite + my @cols; + while ( my ($schema, $table) = $sth_tables->fetchrow_array ) { + my $sth_columns = $dbh->prepare(qq{PRAGMA "$schema".table_info("$table")}); + $sth_columns->execute; + + for ( my $position = 1; my $col_info = $sth_columns->fetchrow_hashref; $position++ ) { + if ( defined $col_val ) { + # This must do a LIKE comparison + my $sth = $dbh->prepare("SELECT '$col_info->{name}' LIKE '$col_val'") or return undef; + $sth->execute or return undef; + # Skip columns that don't match $col_val + next unless ($sth->fetchrow_array)[0]; + } + + my %col = ( + TABLE_SCHEM => $schema, + TABLE_NAME => $table, + COLUMN_NAME => $col_info->{name}, + ORDINAL_POSITION => $position, + ); + + my $type = $col_info->{type}; + if ( $type =~ s/(\w+) ?\((\d+)(?:,(\d+))?\)/$1/ ) { + $col{COLUMN_SIZE} = $2; + $col{DECIMAL_DIGITS} = $3; + } + + $col{TYPE_NAME} = $type; + + if ( defined $col_info->{dflt_value} ) { + $col{COLUMN_DEF} = $col_info->{dflt_value} + } + + if ( $col_info->{notnull} ) { + $col{NULLABLE} = 0; + $col{IS_NULLABLE} = 'NO'; + } else { + $col{NULLABLE} = 1; + $col{IS_NULLABLE} = 'YES'; + } + ++ push @cols, \%col; + } + $sth_columns->finish; + } + $sth_tables->finish; + + my $sponge = DBI->connect("DBI:Sponge:", '','') + or return $dbh->DBI::set_err($DBI::err, "DBI::Sponge: $DBI::errstr"); + $sponge->prepare( "column_info", { + rows => [ map { [ @{$_}{@COLUMN_INFO} ] } @cols ], + NUM_OF_FIELDS => scalar @COLUMN_INFO, + NAME => [ @COLUMN_INFO ], + } ) or return $dbh->DBI::set_err( + $sponge->err, + $sponge->errstr, + ); +} + +#====================================================================== +# An internal tied hash package used for %DBD::SQLite::COLLATION, to +# prevent people from unintentionally overriding globally registered collations. + +package DBD::SQLite::_WriteOnceHash; + +require Tie::Hash; + +our @ISA = qw(Tie::StdHash); + +sub TIEHASH { + bless {}, $_[0]; +} + +sub STORE { + ! exists $_[0]->{$_[1]} or die "entry $_[1] already registered"; + $_[0]->{$_[1]} = $_[2]; +} + +sub DELETE { + die "deletion of entry $_[1] is forbidden"; +} + +1; + +__END__ + +=pod + +=head1 NAME + +DBD::SQLite - Self-contained RDBMS in a DBI Driver + +=head1 SYNOPSIS + + use DBI; + my $dbh = DBI->connect("dbi:SQLite:dbname=$dbfile","",""); + +=head1 DESCRIPTION + +SQLite is a public domain file-based relational database engine that +you can find at L<http://www.sqlite.org/>. + +B<DBD::SQLite> is a Perl DBI driver for SQLite, that includes +the entire thing in the distribution. +So in order to get a fast transaction capable RDBMS working for your +perl project you simply have to install this module, and B<nothing> +else. + +SQLite supports the following features: + +=over 4 + +=item Implements a large subset of SQL92 + +See L<http://www.sqlite.org/lang.html> for details. + +=item A complete DB in a single disk file + +Everything for your database is stored in a single disk file, making it +easier to move things around than with L<DBD::CSV>. + +=item Atomic commit and rollback + +Yes, B<DBD::SQLite> is small and light, but it supports full transactions! + +=item Extensible + +User-defined aggregate or regular functions can be registered with the +SQL parser. + +=back + +There's lots more to it, so please refer to the docs on the SQLite web +page, listed above, for SQL details. Also refer to L<DBI> for details +on how to use DBI itself. The API works like every DBI module does. +However, currently many statement attributes are not implemented or +are limited by the typeless nature of the SQLite database. + +=head1 NOTABLE DIFFERENCES FROM OTHER DRIVERS + +=head2 Database Name Is A File Name + +SQLite creates a file per a database. You should pass the C<path> of +the database file (with or without a parent directory) in the DBI +connection string (as a database C<name>): + + my $dbh = DBI->connect("dbi:SQLite:dbname=$dbfile","",""); + +The file is opened in read/write mode, and will be created if +it does not exist yet. + +Although the database is stored in a single file, the directory +containing the database file must be writable by SQLite because the +library will create several temporary files there. + +If the filename C<$dbfile> is ":memory:", then a private, temporary +in-memory database is created for the connection. This in-memory +database will vanish when the database connection is closed. +It is handy for your library tests. + +Note that future versions of SQLite might make use of additional +special filenames that begin with the ":" character. It is recommended +that when a database filename actually does begin with a ":" character +you should prefix the filename with a pathname such as "./" to avoid +ambiguity. + +If the filename C<$dbfile> is an empty string, then a private, +temporary on-disk database will be created. This private database will +be automatically deleted as soon as the database connection is closed. + +=head2 Accessing A Database With Other Tools + +To access the database from the command line, try using C<dbish> +which comes with the L<DBI::Shell> module. Just type: + + dbish dbi:SQLite:foo.db + +On the command line to access the file F<foo.db>. + +Alternatively you can install SQLite from the link above without +conflicting with B<DBD::SQLite> and use the supplied C<sqlite3> +command line tool. + +=head2 Blobs + +As of version 1.11, blobs should "just work" in SQLite as text columns. +However this will cause the data to be treated as a string, so SQL +statements such as length(x) will return the length of the column as a NUL +terminated string, rather than the size of the blob in bytes. In order to +store natively as a BLOB use the following code: + + use DBI qw(:sql_types); + my $dbh = DBI->connect("dbi:SQLite:dbfile","",""); + + my $blob = `cat foo.jpg`; + my $sth = $dbh->prepare("INSERT INTO mytable VALUES (1, ?)"); + $sth->bind_param(1, $blob, SQL_BLOB); + $sth->execute(); + +And then retrieval just works: + + $sth = $dbh->prepare("SELECT * FROM mytable WHERE id = 1"); + $sth->execute(); + my $row = $sth->fetch; + my $blobo = $row->[1]; + + # now $blobo == $blob + +=head2 Functions And Bind Parameters + +As of this writing, a SQL that compares a return value of a function +with a numeric bind value like this doesn't work as you might expect. + + my $sth = $dbh->prepare(q{ + SELECT bar FROM foo GROUP BY bar HAVING count(*) > ?; + }); + $sth->execute(5); + +This is because DBD::SQLite assumes that all the bind values are text +(and should be quoted) by default. Thus the above statement becomes +like this while executing: + + SELECT bar FROM foo GROUP BY bar HAVING count(*) > "5"; + +There are two workarounds for this. + +=over 4 + +=item Use bind_param() explicitly + +As shown above in the C<BLOB> section, you can always use +C<bind_param()> to tell the type of a bind value. + + use DBI qw(:sql_types); # Don't forget this + + my $sth = $dbh->prepare(q{ + SELECT bar FROM foo GROUP BY bar HAVING count(*) > ?; + }); + $sth->bind_param(1, 5, SQL_INTEGER); + $sth->execute(); + +=item Add zero to make it a number + +This is somewhat weird, but works anyway. + + my $sth = $dbh->prepare(q{ + SELECT bar FROM foo GROUP BY bar HAVING count(*) > (? + 0); + }); + $sth->execute(5); + +=back + +=head2 Foreign Keys + +B<BE PREPARED! WOLVES APPROACH!!> + +SQLite has started supporting foreign key constraints since 3.6.19 +(released on Oct 14, 2009; bundled with DBD::SQLite 1.26_05). +To be exact, SQLite has long been able to parse a schema with foreign +keys, but the constraints has not been enforced. Now you can issue +a pragma actually to enable this feature and enforce the constraints. + +To do this, issue the following pragma (see below), preferably as +soon as you connect to a database and you're not in a transaction: + + $dbh->do("PRAGMA foreign_keys = ON"); + +And you can explicitly disable the feature whenever you like by +turning the pragma off: + + $dbh->do("PRAGMA foreign_keys = OFF"); + +As of this writing, this feature is disabled by default by the +sqlite team, and by us, to secure backward compatibility, as +this feature may break your applications, and actually broke +some for us. If you have used a schema with foreign key constraints +but haven't cared them much and supposed they're always ignored for +SQLite, be prepared, and B<please do extensive testing to ensure +that your applications will continue to work when the foreign keys +support is enabled by default>. It is very likely that the sqlite +team will turn it default-on in the future, and we plan to do it +NO LATER THAN they do so. + +See L<http://www.sqlite.org/foreignkeys.html> for details. + +=head2 Pragma + +SQLite has a set of "Pragma"s to modifiy its operation or to query +for its internal data. These are specific to SQLite and are not +likely to work with other DBD libraries, but you may find some of +these are quite useful. DBD::SQLite actually sets some (like +C<show_datatypes>) for you when you connect to a database. +See L<http://www.sqlite.org/pragma.html> for details. + +=head2 Transactions + +DBI/DBD::SQLite's transactions may be a bit confusing. They behave +differently according to the status of the C<AutoCommit> flag: + +=over 4 + +=item When the AutoCommit flag is on + +You're supposed to always use the auto-commit mode, except you +explicitly begin a transaction, and when the transaction ended, +you're supposed to go back to the auto-commit mode. To begin a +transaction, call C<begin_work> method, or issue a C<BEGIN> +statement. To end it, call C<commit/rollback> methods, or issue +the corresponding statements. + + $dbh->{AutoCommit} = 1; + + $dbh->begin_work; # or $dbh->do('BEGIN TRANSACTION'); + + # $dbh->{AutoCommit} is turned off temporarily during a transaction; + + $dbh->commit; # or $dbh->do('COMMIT'); + + # $dbh->{AutoCommit} is turned on again; + +=item When the AutoCommit flag is off + +You're supposed to always use the transactinal mode, until you +explicitly turn on the AutoCommit flag. You can explicitly issue +a C<BEGIN> statement (only when an actual transaction has not +begun yet) but you're not allowed to call C<begin_work> method +(if you don't issue a C<BEGIN>, it will be issued internally). +You can commit or roll it back freely. Another transaction will +automatically begins if you execute another statement. + + $dbh->{AutoCommit} = 0; + + # $dbh->do('BEGIN TRANSACTION') is not necessary, but possible + + ... + + $dbh->commit; # or $dbh->do('COMMIT'); + + # $dbh->{AutoCommit} stays intact; + + $dbh->{AutoCommit} = 1; # ends the transactional mode + +=back + +This C<AutoCommit> mode is independent from the autocommit mode +of the internal SQLite library, which always begins by a C<BEGIN> +statement, and ends by a C<COMMIT> or a <ROLLBACK>. + +=head2 Performance + +SQLite is fast, very fast. Matt processed my 72MB log file with it, +inserting the data (400,000+ rows) by using transactions and only +committing every 1000 rows (otherwise the insertion is quite slow), +and then performing queries on the data. + +Queries like count(*) and avg(bytes) took fractions of a second to +return, but what surprised him most of all was: + + SELECT url, count(*) as count + FROM access_log + GROUP BY url + ORDER BY count desc + LIMIT 20 + +To discover the top 20 hit URLs on the site (L<http://axkit.org>), +and it returned within 2 seconds. He was seriously considering +switching his log analysis code to use this little speed demon! + +Oh yeah, and that was with no indexes on the table, on a 400MHz PIII. + +For best performance be sure to tune your hdparm settings if you +are using linux. Also you might want to set: + + PRAGMA default_synchronous = OFF + +Which will prevent sqlite from doing fsync's when writing (which +slows down non-transactional writes significantly) at the expense +of some peace of mind. Also try playing with the cache_size pragma. + +The memory usage of SQLite can also be tuned using the cache_size +pragma. + + $dbh->do("PRAGMA cache_size = 800000"); + +The above will allocate 800M for DB cache; the default is 2M. +Your sweet spot probably lies somewhere in between. + +=head1 DRIVER PRIVATE ATTRIBUTES + +=head2 Database Handle Attributes + +=over 4 + +=item sqlite_version + +Returns the version of the SQLite library which B<DBD::SQLite> is using, +e.g., "2.8.0". Can only be read. + +=item sqlite_unicode + +If set to a true value, B<DBD::SQLite> will turn the UTF-8 flag on for all +text strings coming out of the database (this feature is currently disabled +for perl < 5.8.5). For more details on the UTF-8 flag see +L<perlunicode>. The default is for the UTF-8 flag to be turned off. + +Also note that due to some bizarreness in SQLite's type system (see +L<http://www.sqlite.org/datatype3.html>), if you want to retain +blob-style behavior for B<some> columns under C<< $dbh->{sqlite_unicode} = 1 +>> (say, to store images in the database), you have to state so +explicitly using the 3-argument form of L<DBI/bind_param> when doing +updates: + + use DBI qw(:sql_types); + $dbh->{sqlite_unicode} = 1; + my $sth = $dbh->prepare("INSERT INTO mytable (blobcolumn) VALUES (?)"); + + # Binary_data will be stored as is. + $sth->bind_param(1, $binary_data, SQL_BLOB); + +Defining the column type as C<BLOB> in the DDL is B<not> sufficient. + +This attribute was originally named as C<unicode>, and renamed to +C<sqlite_unicode> for integrity since version 1.26_06. Old C<unicode> +attribute is still accessible but will be deprecated in the near future. + +=back + +=head1 METHODS + +=head2 table_info + + $sth = $dbh->table_info(undef, $schema, $table, $type, \%attr); + +Returns all tables and schemas (databases) as specified in L<DBI/table_info>. +The schema and table arguments will do a C<LIKE> search. You can specify an +ESCAPE character by including an 'Escape' attribute in \%attr. The C<$type> +argument accepts a comma seperated list of the following types 'TABLE', +'VIEW', 'LOCAL TEMPORARY' and 'SYSTEM TABLE' (by default all are returned). +Note that a statement handle is returned, and not a direct list of tables. + +The following fields are returned: + +B<TABLE_CAT>: Always NULL, as SQLite does not have the concept of catalogs. + +B<TABLE_SCHEM>: The name of the schema (database) that the table or view is +in. The default schema is 'main', temporary tables are in 'temp' and other +databases will be in the name given when the database was attached. + +B<TABLE_NAME>: The name of the table or view. + +B<TABLE_TYPE>: The type of object returned. Will be one of 'TABLE', 'VIEW', +'LOCAL TEMPORARY' or 'SYSTEM TABLE'. + +=head1 DRIVER PRIVATE METHODS + +The following methods can be called via the func() method with a little +tweak, but the use of func() method is now discouraged by the L<DBI> author +for various reasons (see DBI's document +L<http://search.cpan.org/dist/DBI/lib/DBI/DBD.pm#Using_install_method()_to_expose_driver-private_methods> +for details). So, if you're using L<DBI> >= 1.608, use these C<sqlite_> +methods. If you need to use an older L<DBI>, you can call these like this: + + $dbh->func( ..., "(method name without sqlite_ prefix)" ); + +=head2 $dbh->sqlite_last_insert_rowid() + +This method returns the last inserted rowid. If you specify an INTEGER PRIMARY +KEY as the first column in your table, that is the column that is returned. +Otherwise, it is the hidden ROWID column. See the sqlite docs for details. + +Generally you should not be using this method. Use the L<DBI> last_insert_id +method instead. The usage of this is: + + $h->last_insert_id($catalog, $schema, $table_name, $field_name [, \%attr ]) + +Running C<$h-E<gt>last_insert_id("","","","")> is the equivalent of running +C<$dbh-E<gt>sqlite_last_insert_rowid()> directly. + +=head2 $dbh->sqlite_busy_timeout() + +Retrieve the current busy timeout. + +=head2 $dbh->sqlite_busy_timeout( $ms ) + +Set the current busy timeout. The timeout is in milliseconds. + +=head2 $dbh->sqlite_create_function( $name, $argc, $code_ref ) + +This method will register a new function which will be useable in an SQL +query. The method's parameters are: + +=over + +=item $name + +The name of the function. This is the name of the function as it will +be used from SQL. + +=item $argc + +The number of arguments taken by the function. If this number is -1, +the function can take any number of arguments. + +=item $code_ref + +This should be a reference to the function's implementation. + +=back + +For example, here is how to define a now() function which returns the +current number of seconds since the epoch: + + $dbh->sqlite_create_function( 'now', 0, sub { return time } ); + +After this, it could be use from SQL as: + + INSERT INTO mytable ( now() ); + +=head3 REGEXP function + +SQLite includes syntactic support for an infix operator 'REGEXP', but +without any implementation. The C<DBD::SQLite> driver +automatically registers an implementation that performs standard +perl regular expression matching, using current locale. So for example +you can search for words starting with an 'A' with a query like + + SELECT * from table WHERE column REGEXP '\bA\w+' + +If you want case-insensitive searching, use perl regex flags, like this : + + SELECT * from table WHERE column REGEXP '(?i:\bA\w+)' + +The default REGEXP implementation can be overriden through the +C<create_function> API described above. + +Note that regexp matching will B<not> use SQLite indices, but will iterate +over all rows, so it could be quite costly in terms of performance. + +=head2 $dbh->sqlite_create_collation( $name, $code_ref ) + +This method manually registers a new function which will be useable in an SQL +query as a COLLATE option for sorting. Such functions can also be registered +automatically on demand: see section L</"COLLATION FUNCTIONS"> below. + +The method's parameters are: + +=over + +=item $name + +The name of the function exposed to SQL. + +=item $code_ref + +Reference to the function's implementation. +The driver will check that this is a proper sorting function. + +=back + +=head2 $dbh->sqlite_collation_needed( $code_ref ) + +This method manually registers a callback function that will +be invoked whenever an undefined collation sequence is required +from an SQL statement. The callback is invoked as + + $code_ref->($dbh, $collation_name) + +and should register the desired collation using +L</"sqlite_create_collation">. + +An initial callback is already registered by C<DBD::SQLite>, +so for most common cases it will be simpler to just +add your collation sequences in the C<%DBD::SQLite::COLLATION> +hash (see section L</"COLLATION FUNCTIONS"> below). + +=head2 $dbh->sqlite_create_aggregate( $name, $argc, $pkg ) + +This method will register a new aggregate function which can then be used +from SQL. The method's parameters are: + +=over + +=item $name + +The name of the aggregate function, this is the name under which the +function will be available from SQL. + +=item $argc + +This is an integer which tells the SQL parser how many arguments the +function takes. If that number is -1, the function can take any number +of arguments. + +=item $pkg + +This is the package which implements the aggregator interface. + +=back + +The aggregator interface consists of defining three methods: + +=over + +=item new() + +This method will be called once to create an object which should +be used to aggregate the rows in a particular group. The step() and +finalize() methods will be called upon the reference return by +the method. + +=item step(@_) + +This method will be called once for each row in the aggregate. + +=item finalize() + +This method will be called once all rows in the aggregate were +processed and it should return the aggregate function's result. When +there is no rows in the aggregate, finalize() will be called right +after new(). + +=back + +Here is a simple aggregate function which returns the variance +(example adapted from pysqlite): + + package variance; + + sub new { bless [], shift; } + + sub step { + my ( $self, $value ) = @_; + + push @$self, $value; + } + + sub finalize { + my $self = $_[0]; + + my $n = @$self; + + # Variance is NULL unless there is more than one row + return undef unless $n || $n == 1; + + my $mu = 0; + foreach my $v ( @$self ) { + $mu += $v; + } + $mu /= $n; + + my $sigma = 0; + foreach my $v ( @$self ) { + $sigma += ($x - $mu)**2; + } + $sigma = $sigma / ($n - 1); + + return $sigma; + } + + $dbh->sqlite_create_aggregate( "variance", 1, 'variance' ); + +The aggregate function can then be used as: + + SELECT group_name, variance(score) + FROM results + GROUP BY group_name; + +For more examples, see the L<DBD::SQLite::Cookbook>. + +=head2 $dbh->sqlite_progress_handler( $n_opcodes, $code_ref ) + +This method registers a handler to be invoked periodically during long +running calls to SQLite. + +An example use for this interface is to keep a GUI updated during a +large query. The parameters are: + +=over + +=item $n_opcodes + +The progress handler is invoked once for every C<$n_opcodes> +virtual machine opcodes in SQLite. + +=item $code_ref + +Reference to the handler subroutine. If the progress handler returns +non-zero, the SQLite operation is interrupted. This feature can be used to +implement a "Cancel" button on a GUI dialog box. + +Set this argument to C<undef> if you want to unregister a previous +progress handler. + +=back + +=head2 $dbh->sqlite_commit_hook( $code_ref ) + +This method registers a callback function to be invoked whenever a +transaction is committed. Any callback set by a previous call to +C<sqlite_commit_hook> is overridden. A reference to the previous +callback (if any) is returned. Registering an C<undef> disables the +callback. + +When the commit hook callback returns zero, the commit operation is +allowed to continue normally. If the callback returns non-zero, then +the commit is converted into a rollback (in that case, any attempt to +I<explicitly> call C<< $dbh->rollback() >> afterwards would yield an +error). + +=head2 $dbh->sqlite_rollback_hook( $code_ref ) + +This method registers a callback function to be invoked whenever a +transaction is rolled back. Any callback set by a previous call to +C<sqlite_rollback_hook> is overridden. A reference to the previous +callback (if any) is returned. Registering an C<undef> disables the +callback. + +=head2 $dbh->sqlite_update_hook( $code_ref ) + +This method registers a callback function to be invoked whenever a row +is updated, inserted or deleted. Any callback set by a previous call to +C<sqlite_update_hook> is overridden. A reference to the previous +callback (if any) is returned. Registering an C<undef> disables the +callback. + +The callback will be called as + + $code_ref->($action_code, $database, $table, $rowid) + +where + +=over + +=item $action_code + +is an integer equal to either C<DBD::SQLite::INSERT>, +C<DBD::SQLite::DELETE> or C<DBD::SQLite::UPDATE> +(see L</"Action Codes">); + +=item $database + +is the name of the database containing the affected row; + +=item $table + +is the name of the table containing the affected row; + +=item $rowid + +is the unique 64-bit signed integer key of the affected row within that table. + +=back + +=head2 $dbh->sqlite_set_authorizer( $code_ref ) + +This method registers an authorizer callback to be invoked whenever +SQL statements are being compiled by the L<DBI/prepare> method. The +authorizer callback should return C<DBD::SQLite::OK> to allow the +action, C<DBD::SQLite::IGNORE> to disallow the specific action but +allow the SQL statement to continue to be compiled, or +C<DBD::SQLite::DENY> to cause the entire SQL statement to be rejected +with an error. If the authorizer callback returns any other value, +then then C<prepare> call that triggered the authorizer will fail with +an error message. + +An authorizer is used when preparing SQL statements from an untrusted +source, to ensure that the SQL statements do not try to access data +they are not allowed to see, or that they do not try to execute +malicious statements that damage the database. For example, an +application may allow a user to enter arbitrary SQL queries for +evaluation by a database. But the application does not want the user +to be able to make arbitrary changes to the database. An authorizer +could then be put in place while the user-entered SQL is being +prepared that disallows everything except SELECT statements. + +The callback will be called as + + $code_ref->($action_code, $string1, $string2, $database, $trigger_or_view) + +where + +=over + +=item $action_code + +is an integer that specifies what action is being authorized +(see L</"Action Codes">). + +=item $string1, $string2 + +are strings that depend on the action code +(see L</"Action Codes">). + +=item $database + +is the name of the database (C<main>, C<temp>, etc.) if applicable. + +=item $trigger_or_view + +is the name of the inner-most trigger or view that is responsible for +the access attempt, or C<undef> if this access attempt is directly from +top-level SQL code. + +=back + +=head2 $dbh->sqlite_backup_from_file( $filename ) + +This method accesses the SQLite Online Backup API, and will take a backup of +the named database file, copying it to, and overwriting, your current database +connection. This can be particularly handy if your current connection is to the +special :memory: database, and you wish to populate it from an existing DB. + +=head2 $dbh->sqlite_backup_to_file( $filename ) + +This method accesses the SQLite Online Backup API, and will take a backup of +the currently connected database, and write it out to the named file. + +=head2 $dbh->sqlite_enable_load_extension( $bool ) + +Calling this method with a true value enables loading (external) +sqlite3 extensions. After the call, you can load extensions like this: + + $dbh->sqlite_enable_load_extension(1); + $sth = $dbh->prepare("select load_extension('libsqlitefunctions.so')") + or die "Cannot prepare: " . $dbh->errstr(); + +=head1 DRIVER CONSTANTS + +A subset of SQLite C constants are made available to Perl, +because they may be needed when writing +hooks or authorizer callbacks. For accessing such constants, +the C<DBD::Sqlite> module must be explicitly C<use>d at compile +time. For example, an authorizer that forbids any +DELETE operation would be written as follows : + + use DBD::SQLite; + $dbh->sqlite_set_authorizer(sub { + my $action_code = shift; + return $action_code == DBD::SQLite::DELETE ? DBD::SQLite::DENY + : DBD::SQLite::OK; + }); + +The list of constants implemented in C<DBD::SQLite> is given +below; more information can be found ad +at L<http://www.sqlite.org/c3ref/constlist.html>. + +=head2 Authorizer Return Codes + + OK + DENY + IGNORE + +=head2 Action Codes + +The L</set_authorizer> method registers a callback function that is +invoked to authorize certain SQL statement actions. The first +parameter to the callback is an integer code that specifies what +action is being authorized. The second and third parameters to the +callback are strings, the meaning of which varies according to the +action code. Below is the list of action codes, together with their +associated strings. + + # constant string1 string2 + # ======== ======= ======= + CREATE_INDEX Index Name Table Name + CREATE_TABLE Table Name undef + CREATE_TEMP_INDEX Index Name Table Name + CREATE_TEMP_TABLE Table Name undef + CREATE_TEMP_TRIGGER Trigger Name Table Name + CREATE_TEMP_VIEW View Name undef + CREATE_TRIGGER Trigger Name Table Name + CREATE_VIEW View Name undef + DELETE Table Name undef + DROP_INDEX Index Name Table Name + DROP_TABLE Table Name undef + DROP_TEMP_INDEX Index Name Table Name + DROP_TEMP_TABLE Table Name undef + DROP_TEMP_TRIGGER Trigger Name Table Name + DROP_TEMP_VIEW View Name undef + DROP_TRIGGER Trigger Name Table Name + DROP_VIEW View Name undef + INSERT Table Name undef + PRAGMA Pragma Name 1st arg or undef + READ Table Name Column Name + SELECT undef undef + TRANSACTION Operation undef + UPDATE Table Name Column Name + ATTACH Filename undef + DETACH Database Name undef + ALTER_TABLE Database Name Table Name + REINDEX Index Name undef + ANALYZE Table Name undef + CREATE_VTABLE Table Name Module Name + DROP_VTABLE Table Name Module Name + FUNCTION undef Function Name + SAVEPOINT Operation Savepoint Name + +=head1 COLLATION FUNCTIONS + +=head2 Definition + +SQLite v3 provides the ability for users to supply arbitrary +comparison functions, known as user-defined "collation sequences" or +"collating functions", to be used for comparing two text values. +L<http://www.sqlite.org/datatype3.html#collation> +explains how collations are used in various SQL expressions. + +=head2 Builtin collation sequences + +The following collation sequences are builtin within SQLite : + +=over + +=item B<BINARY> + +Compares string data using memcmp(), regardless of text encoding. + +=item B<NOCASE> + +The same as binary, except the 26 upper case characters of ASCII are +folded to their lower case equivalents before the comparison is +performed. Note that only ASCII characters are case folded. SQLite +does not attempt to do full UTF case folding due to the size of the +tables required. + +=item B<RTRIM> + +The same as binary, except that trailing space characters are ignored. + +=back + +In addition, C<DBD::SQLite> automatically installs the +following collation sequences : + +=over + +=item B<perl> + +corresponds to the Perl C<cmp> operator + +=item B<perllocale> + +Perl C<cmp> operator, in a context where C<use locale> is activated. + +=back + +=head2 Usage + +You can write for example + + CREATE TABLE foo( + txt1 COLLATE perl, + txt2 COLLATE perllocale, + txt3 COLLATE nocase + ) + +or + + SELECT * FROM foo ORDER BY name COLLATE perllocale + +=head2 Unicode handling + +If the attribute C<< $dbh->{sqlite_unicode} >> is set, strings coming from +the database and passed to the collation function will be properly +tagged with the utf8 flag; but this only works if the +C<sqlite_unicode> attribute is set B<before> the first call to +a perl collation sequence . The recommended way to activate unicode +is to set the parameter at connection time : + + my $dbh = DBI->connect( + "dbi:SQLite:dbname=foo", "", "", + { + RaiseError => 1, + sqlite_unicode => 1, + } + ); + +=head2 Adding user-defined collations + +The native SQLite API for adding user-defined collations is +exposed through methods L</"sqlite_create_collation"> and +L</"sqlite_collation_needed">. + +To avoid calling these functions every time a C<$dbh> handle is +created, C<DBD::SQLite> offers a simpler interface through the +C<%DBD::SQLite::COLLATION> hash : just insert your own +collation functions in that hash, and whenever an unknown +collation name is encountered in SQL, the appropriate collation +function will be loaded on demand from the hash. For example, +here is a way to sort text values regardless of their accented +characters : + + use DBD::SQLite; + $DBD::SQLite::COLLATION{no_accents} = sub { + my ( $a, $b ) = map lc, @_; + tr[àâáäåãçðèêéëìîíïñòôóöõøùûúüý] + [aaaaaacdeeeeiiiinoooooouuuuy] for $a, $b; + $a cmp $b; + }; + my $dbh = DBI->connect("dbi:SQLite:dbname=dbfile"); + my $sql = "SELECT ... FROM ... ORDER BY ... COLLATE no_accents"); + my $rows = $dbh->selectall_arrayref($sql); + +The builtin C<perl> or C<perllocale> collations are predefined +in that same hash. + +The COLLATION hash is a global registry within the current process; +hence there is a risk of undesired side-effects. Therefore, to +prevent action at distance, the hash is implemented as a "write-only" +hash, that will happily accept new entries, but will raise an +exception if any attempt is made to override or delete a existing +entry (including the builtin C<perl> and C<perllocale>). + +If you really, really need to change or delete an entry, you can +always grab the tied object underneath C<%DBD::SQLite::COLLATION> --- +but don't do that unless you really know what you are doing. Also +observe that changes in the global hash will not modify existing +collations in existing database handles: it will only affect new +I<requests> for collations. In other words, if you want to change +the behaviour of a collation within an existing C<$dbh>, you +need to call the L</create_collation> method directly. + +=head1 TO DO + +The following items remain to be done. + +=head2 Warnings Upgrade + +We currently use a horridly hacky method to issue and suppress warnings. +It suffices for now, but just barely. + +Migrate all of the warning code to use the recommended L<DBI> warnings. + +=head2 Leak Detection + +Implement one or more leak detection tests that only run during +AUTOMATED_TESTING and RELEASE_TESTING and validate that none of the C +code we work with leaks. + +=head2 Stream API for Blobs + +Reading/writing into blobs using C<sqlite2_blob_open> / C<sqlite2_blob_close>. + +=head2 Flags for sqlite3_open_v2 + +Support the full API of sqlite3_open_v2 (flags for opening the file). + +=head1 SUPPORT + +Bugs should be reported via the CPAN bug tracker at + +L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=DBD-SQLite> + +Note that bugs of bundled sqlite library (i.e. bugs in C<sqlite3.[ch]>) +should be reported to the sqlite developers at sqlite.org via their bug +tracker or via their mailing list. + +=head1 AUTHORS + +Matt Sergeant E<lt>matt@sergeant.orgE<gt> + +Francis J. Lacoste E<lt>flacoste@logreport.orgE<gt> + +Wolfgang Sourdeau E<lt>wolfgang@logreport.orgE<gt> + +Adam Kennedy E<lt>adamk@cpan.orgE<gt> + +Max Maischein E<lt>corion@cpan.orgE<gt> + +Laurent Dami E<lt>dami@cpan.orgE<gt> + +Kenichi Ishigaki E<lt>ishigaki@cpan.orgE<gt> + +=head1 COPYRIGHT + +The bundled SQLite code in this distribution is Public Domain. + +DBD::SQLite is copyright 2002 - 2007 Matt Sergeant. + +Some parts copyright 2008 Francis J. Lacoste. + +Some parts copyright 2008 Wolfgang Sourdeau. + +Some parts copyright 2008 - 2010 Adam Kennedy. + +Some parts derived from L<DBD::SQLite::Amalgamation> +copyright 2008 Audrey Tang. + +This program is free software; you can redistribute +it and/or modify it under the same terms as Perl itself. + +The full text of the license can be found in the +LICENSE file included with this module. + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/SQLite/Cookbook.pod b/Master/tlpkg/tlperl/lib/DBD/SQLite/Cookbook.pod new file mode 100755 index 00000000000..bdf6260b5b0 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/SQLite/Cookbook.pod @@ -0,0 +1,168 @@ +=head1 NAME + +DBD::SQLite::Cookbook - The DBD::SQLite Cookbook + +=head1 DESCRIPTION + +This is the L<DBD::SQLite> cookbook. + +It is intended to provide a place to keep a variety of functions and +formals for use in callback APIs in L<DBD::SQLite>. + +=head2 Variance + +This is a simple aggregate function which returns a variance. It is +adapted from an example implementation in pysqlite. + + package variance; + + sub new { bless [], shift; } + + sub step { + my ( $self, $value ) = @_; + + push @$self, $value; + } + + sub finalize { + my $self = $_[0]; + + my $n = @$self; + + # Variance is NULL unless there is more than one row + return undef unless $n || $n == 1; + + my $mu = 0; + foreach my $v ( @$self ) { + $mu += $v; + } + $mu /= $n; + + my $sigma = 0; + foreach my $v ( @$self ) { + $sigma += ($x - $mu)**2; + } + $sigma = $sigma / ($n - 1); + + return $sigma; + } + + # NOTE: If you use an older DBI (< 1.608), + # use $dbh->func(..., "create_aggregate") instead. + $dbh->sqlite_create_aggregate( "variance", 1, 'variance' ); + +The function can then be used as: + + SELECT group_name, variance(score) + FROM results + GROUP BY group_name; + +=head2 Variance (Memory Efficient) + +A more efficient variance function, optimized for memory usage at the +expense of precision: + + package variance2; + + my $sum = 0; + my $count = 0; + my %hash; + + sub new { bless [], shift; } + + sub step { + my ( $self, $value ) = @_; + + # by truncating and hashing, we can comsume many more data points + $value = int($value); # change depending on need for precision + # use sprintf for arbitrary fp precision + if (defined $hash{$value}) { + $hash{$value}++; + } else { + $hash{$value} = 1; + } + $sum += $value; + $count++; + } + + sub finalize { + my $self = $_[0]; + + # Variance is NULL unless there is more than one row + return undef unless $count > 1; + + # calculate avg + my $mu = $sum / $count; + + my $sigma = 0; + foreach my $h (keys %hash) { + $sigma += (($h - $mu)**2) * $hash{$h}; + } + $sigma = $sigma / ($count - 1); + + return $sigma; + } + +The function can then be used as: + + SELECT group_name, variance2(score) + FROM results + GROUP BY group_name; + +=head2 Variance (Highly Scalable) + +A third variable implementation, designed for arbitrarily large data sets: + + package variance; + + my $mu = 0; + my $count = 0; + my $S = 0 + + sub new { bless [], shift; } + + sub step { + my ( $self, $value ) = @_; + $count++; + $delta = $value - $mu; + $mu = $mu + $delta/$count + $S = $S + $delta*($value - $mu); + } + + sub finalize { + my $self = $_[0]; + return $S / ($count - 1); + } + +The function can then be used as: + + SELECT group_name, variance3(score) + FROM results + GROUP BY group_name; + +=head1 SUPPORT + +Bugs should be reported via the CPAN bug tracker at + +L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=DBD-SQLite> + +=head1 TO DO + +* Add more and varied cookbook recipes, until we have enough to +turn them into a seperate CPAN distribution. + +* Create a series of tests scripts that validate the cookbook recipies. + +=head1 AUTHOR + +Adam Kennedy E<lt>adamk@cpan.orgE<gt> + +=head1 COPYRIGHT + +Copyright 2009 Adam Kennedy. + +This program is free software; you can redistribute +it and/or modify it under the same terms as Perl itself. + +The full text of the license can be found in the +LICENSE file included with this module. diff --git a/Master/tlpkg/tlperl/lib/DBD/Sponge.pm b/Master/tlpkg/tlperl/lib/DBD/Sponge.pm new file mode 100755 index 00000000000..2413bc08506 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/Sponge.pm @@ -0,0 +1,305 @@ +{ + package DBD::Sponge; + + require DBI; + require Carp; + + our @EXPORT = qw(); # Do NOT @EXPORT anything. + our $VERSION = sprintf("12.%06d", q$Revision: 10002 $ =~ /(\d+)/o); + + +# $Id: Sponge.pm 10002 2007-09-26 21:03:25Z timbo $ +# +# Copyright (c) 1994-2003 Tim Bunce Ireland +# +# You may distribute under the terms of either the GNU General Public +# License or the Artistic License, as specified in the Perl README file. + + $drh = undef; # holds driver handle once initialised + my $methods_already_installed; + + sub driver{ + return $drh if $drh; + + DBD::Sponge::db->install_method("sponge_test_installed_method") + unless $methods_already_installed++; + + my($class, $attr) = @_; + $class .= "::dr"; + ($drh) = DBI::_new_drh($class, { + 'Name' => 'Sponge', + 'Version' => $VERSION, + 'Attribution' => "DBD::Sponge $VERSION (fake cursor driver) by Tim Bunce", + }); + $drh; + } + + sub CLONE { + undef $drh; + } +} + + +{ package DBD::Sponge::dr; # ====== DRIVER ====== + $imp_data_size = 0; + # we use default (dummy) connect method +} + + +{ package DBD::Sponge::db; # ====== DATABASE ====== + $imp_data_size = 0; + use strict; + + sub prepare { + my($dbh, $statement, $attribs) = @_; + my $rows = delete $attribs->{'rows'} + or return $dbh->set_err($DBI::stderr,"No rows attribute supplied to prepare"); + my ($outer, $sth) = DBI::_new_sth($dbh, { + 'Statement' => $statement, + 'rows' => $rows, + (map { exists $attribs->{$_} ? ($_=>$attribs->{$_}) : () } + qw(execute_hook) + ), + }); + if (my $behave_like = $attribs->{behave_like}) { + $outer->{$_} = $behave_like->{$_} + foreach (qw(RaiseError PrintError HandleError ShowErrorStatement)); + } + + if ($statement =~ /^\s*insert\b/) { # very basic, just for testing execute_array() + $sth->{is_insert} = 1; + my $NUM_OF_PARAMS = $attribs->{NUM_OF_PARAMS} + or return $dbh->set_err($DBI::stderr,"NUM_OF_PARAMS not specified for INSERT statement"); + $sth->STORE('NUM_OF_PARAMS' => $attribs->{NUM_OF_PARAMS} ); + } + else { #assume select + + # we need to set NUM_OF_FIELDS + my $numFields; + if ($attribs->{'NUM_OF_FIELDS'}) { + $numFields = $attribs->{'NUM_OF_FIELDS'}; + } elsif ($attribs->{'NAME'}) { + $numFields = @{$attribs->{NAME}}; + } elsif ($attribs->{'TYPE'}) { + $numFields = @{$attribs->{TYPE}}; + } elsif (my $firstrow = $rows->[0]) { + $numFields = scalar @$firstrow; + } else { + return $dbh->set_err($DBI::stderr, 'Cannot determine NUM_OF_FIELDS'); + } + $sth->STORE('NUM_OF_FIELDS' => $numFields); + $sth->{NAME} = $attribs->{NAME} + || [ map { "col$_" } 1..$numFields ]; + $sth->{TYPE} = $attribs->{TYPE} + || [ (DBI::SQL_VARCHAR()) x $numFields ]; + $sth->{PRECISION} = $attribs->{PRECISION} + || [ map { length($sth->{NAME}->[$_]) } 0..$numFields -1 ]; + $sth->{SCALE} = $attribs->{SCALE} + || [ (0) x $numFields ]; + $sth->{NULLABLE} = $attribs->{NULLABLE} + || [ (2) x $numFields ]; + } + + $outer; + } + + sub type_info_all { + my ($dbh) = @_; + my $ti = [ + { TYPE_NAME => 0, + DATA_TYPE => 1, + PRECISION => 2, + LITERAL_PREFIX => 3, + LITERAL_SUFFIX => 4, + CREATE_PARAMS => 5, + NULLABLE => 6, + CASE_SENSITIVE => 7, + SEARCHABLE => 8, + UNSIGNED_ATTRIBUTE=> 9, + MONEY => 10, + AUTO_INCREMENT => 11, + LOCAL_TYPE_NAME => 12, + MINIMUM_SCALE => 13, + MAXIMUM_SCALE => 14, + }, + [ 'VARCHAR', DBI::SQL_VARCHAR(), undef, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ], + ]; + return $ti; + } + + sub FETCH { + my ($dbh, $attrib) = @_; + # In reality this would interrogate the database engine to + # either return dynamic values that cannot be precomputed + # or fetch and cache attribute values too expensive to prefetch. + return 1 if $attrib eq 'AutoCommit'; + # else pass up to DBI to handle + return $dbh->SUPER::FETCH($attrib); + } + + sub STORE { + my ($dbh, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + if ($attrib eq 'AutoCommit') { + return 1 if $value; # is already set + Carp::croak("Can't disable AutoCommit"); + } + return $dbh->SUPER::STORE($attrib, $value); + } + + sub sponge_test_installed_method { + my ($dbh, @args) = @_; + return $dbh->set_err(42, "not enough parameters") unless @args >= 2; + return \@args; + } +} + + +{ package DBD::Sponge::st; # ====== STATEMENT ====== + $imp_data_size = 0; + use strict; + + sub execute { + my $sth = shift; + + # hack to support ParamValues (when not using bind_param) + $sth->{ParamValues} = (@_) ? { map { $_ => $_[$_-1] } 1..@_ } : undef; + + if (my $hook = $sth->{execute_hook}) { + &$hook($sth, @_) or return; + } + + if ($sth->{is_insert}) { + my $row; + $row = (@_) ? [ @_ ] : die "bind_param not supported yet" ; + my $NUM_OF_PARAMS = $sth->{NUM_OF_PARAMS}; + return $sth->set_err($DBI::stderr, @$row." values bound (@$row) but $NUM_OF_PARAMS expected") + if @$row != $NUM_OF_PARAMS; + { local $^W; $sth->trace_msg("inserting (@$row)\n"); } + push @{ $sth->{rows} }, $row; + } + else { # mark select sth as Active + $sth->STORE(Active => 1); + } + # else do nothing for select as data is already in $sth->{rows} + return 1; + } + + sub fetch { + my ($sth) = @_; + my $row = shift @{$sth->{'rows'}}; + unless ($row) { + $sth->STORE(Active => 0); + return undef; + } + return $sth->_set_fbav($row); + } + *fetchrow_arrayref = \&fetch; + + sub FETCH { + my ($sth, $attrib) = @_; + # would normally validate and only fetch known attributes + # else pass up to DBI to handle + return $sth->SUPER::FETCH($attrib); + } + + sub STORE { + my ($sth, $attrib, $value) = @_; + # would normally validate and only store known attributes + # else pass up to DBI to handle + return $sth->SUPER::STORE($attrib, $value); + } +} + +1; + +__END__ + +=pod + +=head1 NAME + +DBD::Sponge - Create a DBI statement handle from Perl data + +=head1 SYNOPSIS + + my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 }); + my $sth = $sponge->prepare($statement, { + rows => $data, + NAME => $names, + %attr + } + ); + +=head1 DESCRIPTION + +DBD::Sponge is useful for making a Perl data structure accessible through a +standard DBI statement handle. This may be useful to DBD module authors who +need to transform data in this way. + +=head1 METHODS + +=head2 connect() + + my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 }); + +Here's a sample syntax for creating a database handle for the Sponge driver. +No username and password are needed. + +=head2 prepare() + + my $sth = $sponge->prepare($statement, { + rows => $data, + NAME => $names, + %attr + } + ); + +=over 4 + +=item * + +The C<$statement> here is an arbitrary statement or name you want +to provide as identity of your data. If you're using DBI::Profile +it will appear in the profile data. + +Generally it's expected that you are preparing a statement handle +as if a C<select> statement happened. + +=item * + +C<$data> is a reference to the data you are providing, given as an array of arrays. + +=item * + +C<$names> is a reference an array of column names for the C<$data> you are providing. +The number and order should match the number and ordering of the C<$data> columns. + +=item * + +C<%attr> is a hash of other standard DBI attributes that you might pass to a prepare statement. + +Currently only NAME, TYPE, and PRECISION are supported. + +=back + +=head1 BUGS + +Using this module to prepare INSERT-like statements is not currently documented. + +=head1 AUTHOR AND COPYRIGHT + +This module is Copyright (c) 2003 Tim Bunce + +Documentation initially written by Mark Stosberg + +The DBD::Sponge module is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. In particular permission +is granted to Tim Bunce for distributing this as a part of the DBI. + +=head1 SEE ALSO + +L<DBI> + +=cut diff --git a/Master/tlpkg/tlperl/lib/DBD/mysql.pm b/Master/tlpkg/tlperl/lib/DBD/mysql.pm new file mode 100755 index 00000000000..d961d08baf0 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/mysql.pm @@ -0,0 +1,1939 @@ +# -*- cperl -*- + +package DBD::mysql; +use strict; +use vars qw(@ISA $VERSION $err $errstr $drh); + +use DBI (); +use DynaLoader(); +use Carp (); +@ISA = qw(DynaLoader); + +$VERSION = '4.012'; + +bootstrap DBD::mysql $VERSION; + + +$err = 0; # holds error code for DBI::err +$errstr = ""; # holds error string for DBI::errstr +$drh = undef; # holds driver handle once initialised + +sub driver{ + return $drh if $drh; + my($class, $attr) = @_; + + $class .= "::dr"; + + # not a 'my' since we use it above to prevent multiple drivers + $drh = DBI::_new_drh($class, { 'Name' => 'mysql', + 'Version' => $VERSION, + 'Err' => \$DBD::mysql::err, + 'Errstr' => \$DBD::mysql::errstr, + 'Attribution' => 'DBD::mysql by Patrick Galbraith' + }); + + $drh; +} + +sub CLONE { + undef $drh; +} + +sub _OdbcParse($$$) { + my($class, $dsn, $hash, $args) = @_; + my($var, $val); + if (!defined($dsn)) { + return; + } + while (length($dsn)) { + if ($dsn =~ /([^:;]*)[:;](.*)/) { + $val = $1; + $dsn = $2; + } else { + $val = $dsn; + $dsn = ''; + } + if ($val =~ /([^=]*)=(.*)/) { + $var = $1; + $val = $2; + if ($var eq 'hostname' || $var eq 'host') { + $hash->{'host'} = $val; + } elsif ($var eq 'db' || $var eq 'dbname') { + $hash->{'database'} = $val; + } else { + $hash->{$var} = $val; + } + } else { + foreach $var (@$args) { + if (!defined($hash->{$var})) { + $hash->{$var} = $val; + last; + } + } + } + } +} + +sub _OdbcParseHost ($$) { + my($class, $dsn) = @_; + my($hash) = {}; + $class->_OdbcParse($dsn, $hash, ['host', 'port']); + ($hash->{'host'}, $hash->{'port'}); +} + +sub AUTOLOAD { + my ($meth) = $DBD::mysql::AUTOLOAD; + my ($smeth) = $meth; + $smeth =~ s/(.*)\:\://; + + my $val = constant($smeth, @_ ? $_[0] : 0); + if ($! == 0) { eval "sub $meth { $val }"; return $val; } + + Carp::croak "$meth: Not defined"; +} + +1; + + +package DBD::mysql::dr; # ====== DRIVER ====== +use strict; +use DBI qw(:sql_types); +use DBI::Const::GetInfoType; + +sub connect { + my($drh, $dsn, $username, $password, $attrhash) = @_; + my($port); + my($cWarn); + my $connect_ref= { 'Name' => $dsn }; + my $dbi_imp_data; + + # Avoid warnings for undefined values + $username ||= ''; + $password ||= ''; + $attrhash ||= {}; + + # create a 'blank' dbh + my($this, $privateAttrHash) = (undef, $attrhash); + $privateAttrHash = { %$privateAttrHash, + 'Name' => $dsn, + 'user' => $username, + 'password' => $password + }; + + DBD::mysql->_OdbcParse($dsn, $privateAttrHash, + ['database', 'host', 'port']); + + + if ($DBI::VERSION >= 1.49) + { + $dbi_imp_data = delete $attrhash->{dbi_imp_data}; + $connect_ref->{'dbi_imp_data'} = $dbi_imp_data; + } + + if (!defined($this = DBI::_new_dbh($drh, + $connect_ref, + $privateAttrHash))) + { + return undef; + } + + # Call msqlConnect func in mSQL.xs file + # and populate internal handle data. + DBD::mysql::db::_login($this, $dsn, $username, $password) + or $this = undef; + + if ($this && ($ENV{MOD_PERL} || $ENV{GATEWAY_INTERFACE})) { + $this->{mysql_auto_reconnect} = 1; + } + $this; +} + +sub data_sources { + my($self) = shift; + my($attributes) = shift; + my($host, $port, $user, $password) = ('', '', '', ''); + if ($attributes) { + $host = $attributes->{host} || ''; + $port = $attributes->{port} || ''; + $user = $attributes->{user} || ''; + $password = $attributes->{password} || ''; + } + my(@dsn) = $self->func($host, $port, $user, $password, '_ListDBs'); + my($i); + for ($i = 0; $i < @dsn; $i++) { + $dsn[$i] = "DBI:mysql:$dsn[$i]"; + } + @dsn; +} + +sub admin { + my($drh) = shift; + my($command) = shift; + my($dbname) = ($command eq 'createdb' || $command eq 'dropdb') ? + shift : ''; + my($host, $port) = DBD::mysql->_OdbcParseHost(shift(@_) || ''); + my($user) = shift || ''; + my($password) = shift || ''; + + $drh->func(undef, $command, + $dbname || '', + $host || '', + $port || '', + $user, $password, '_admin_internal'); +} + +package DBD::mysql::db; # ====== DATABASE ====== +use strict; +use DBI qw(:sql_types); + +%DBD::mysql::db::db2ANSI = ("INT" => "INTEGER", + "CHAR" => "CHAR", + "REAL" => "REAL", + "IDENT" => "DECIMAL" + ); + +### ANSI datatype mapping to mSQL datatypes +%DBD::mysql::db::ANSI2db = ("CHAR" => "CHAR", + "VARCHAR" => "CHAR", + "LONGVARCHAR" => "CHAR", + "NUMERIC" => "INTEGER", + "DECIMAL" => "INTEGER", + "BIT" => "INTEGER", + "TINYINT" => "INTEGER", + "SMALLINT" => "INTEGER", + "INTEGER" => "INTEGER", + "BIGINT" => "INTEGER", + "REAL" => "REAL", + "FLOAT" => "REAL", + "DOUBLE" => "REAL", + "BINARY" => "CHAR", + "VARBINARY" => "CHAR", + "LONGVARBINARY" => "CHAR", + "DATE" => "CHAR", + "TIME" => "CHAR", + "TIMESTAMP" => "CHAR" + ); + +sub prepare { + my($dbh, $statement, $attribs)= @_; + + # create a 'blank' dbh + my $sth = DBI::_new_sth($dbh, {'Statement' => $statement}); + + # Populate internal handle data. + if (!DBD::mysql::st::_prepare($sth, $statement, $attribs)) { + $sth = undef; + } + + $sth; +} + +sub db2ANSI { + my $self = shift; + my $type = shift; + return $DBD::mysql::db::db2ANSI{"$type"}; +} + +sub ANSI2db { + my $self = shift; + my $type = shift; + return $DBD::mysql::db::ANSI2db{"$type"}; +} + +sub admin { + my($dbh) = shift; + my($command) = shift; + my($dbname) = ($command eq 'createdb' || $command eq 'dropdb') ? + shift : ''; + $dbh->{'Driver'}->func($dbh, $command, $dbname, '', '', '', + '_admin_internal'); +} + +sub _SelectDB ($$) { + die "_SelectDB is removed from this module; use DBI->connect instead."; +} + +sub table_info ($) { + my ($dbh, $catalog, $schema, $table, $type, $attr) = @_; + $dbh->{mysql_server_prepare}||= 0; + my $mysql_server_prepare_save= $dbh->{mysql_server_prepare}; + $dbh->{mysql_server_prepare}= 0; + my @names = qw(TABLE_CAT TABLE_SCHEM TABLE_NAME TABLE_TYPE REMARKS); + my @rows; + + my $sponge = DBI->connect("DBI:Sponge:", '','') + or return $dbh->DBI::set_err($DBI::err, "DBI::Sponge: $DBI::errstr"); + +# Return the list of catalogs + if (defined $catalog && $catalog eq "%" && + (!defined($schema) || $schema eq "") && + (!defined($table) || $table eq "")) + { + @rows = (); # Empty, because MySQL doesn't support catalogs (yet) + } + # Return the list of schemas + elsif (defined $schema && $schema eq "%" && + (!defined($catalog) || $catalog eq "") && + (!defined($table) || $table eq "")) + { + my $sth = $dbh->prepare("SHOW DATABASES") + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return undef); + + $sth->execute() + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return DBI::set_err($dbh, $sth->err(), $sth->errstr())); + + while (my $ref = $sth->fetchrow_arrayref()) + { + push(@rows, [ undef, $ref->[0], undef, undef, undef ]); + } + } + # Return the list of table types + elsif (defined $type && $type eq "%" && + (!defined($catalog) || $catalog eq "") && + (!defined($schema) || $schema eq "") && + (!defined($table) || $table eq "")) + { + @rows = ( + [ undef, undef, undef, "TABLE", undef ], + [ undef, undef, undef, "VIEW", undef ], + ); + } + # Special case: a catalog other than undef, "", or "%" + elsif (defined $catalog && $catalog ne "" && $catalog ne "%") + { + @rows = (); # Nothing, because MySQL doesn't support catalogs yet. + } + # Uh oh, we actually have a meaty table_info call. Work is required! + else + { + my @schemas; + # If no table was specified, we want them all + $table ||= "%"; + + # If something was given for the schema, we need to expand it to + # a list of schemas, since it may be a wildcard. + if (defined $schema && $schema ne "") + { + my $sth = $dbh->prepare("SHOW DATABASES LIKE " . + $dbh->quote($schema)) + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return undef); + $sth->execute() + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return DBI::set_err($dbh, $sth->err(), $sth->errstr())); + + while (my $ref = $sth->fetchrow_arrayref()) + { + push @schemas, $ref->[0]; + } + } + # Otherwise we want the current database + else + { + push @schemas, $dbh->selectrow_array("SELECT DATABASE()"); + } + + # Figure out which table types are desired + my ($want_tables, $want_views); + if (defined $type && $type ne "") + { + $want_tables = ($type =~ m/table/i); + $want_views = ($type =~ m/view/i); + } + else + { + $want_tables = $want_views = 1; + } + + for my $database (@schemas) + { + my $sth = $dbh->prepare("SHOW /*!50002 FULL*/ TABLES FROM " . + $dbh->quote_identifier($database) . + " LIKE " . $dbh->quote($table)) + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return undef); + + $sth->execute() or + ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return DBI::set_err($dbh, $sth->err(), $sth->errstr())); + + while (my $ref = $sth->fetchrow_arrayref()) + { + my $type = (defined $ref->[1] && + $ref->[1] =~ /view/i) ? 'VIEW' : 'TABLE'; + next if $type eq 'TABLE' && not $want_tables; + next if $type eq 'VIEW' && not $want_views; + push @rows, [ undef, $database, $ref->[0], $type, undef ]; + } + } + } + + my $sth = $sponge->prepare("table_info", + { + rows => \@rows, + NUM_OF_FIELDS => scalar @names, + NAME => \@names, + }) + or ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return $dbh->DBI::set_err($sponge->err(), $sponge->errstr())); + + $dbh->{mysql_server_prepare}= $mysql_server_prepare_save; + return $sth; +} + +sub _ListTables { + my $dbh = shift; + if (!$DBD::mysql::QUIET) { + warn "_ListTables is deprecated, use \$dbh->tables()"; + } + return map { $_ =~ s/.*\.//; $_ } $dbh->tables(); +} + + +sub column_info { + my ($dbh, $catalog, $schema, $table, $column) = @_; + $dbh->{mysql_server_prepare}||= 0; + my $mysql_server_prepare_save= $dbh->{mysql_server_prepare}; + $dbh->{mysql_server_prepare}= 0; + + # ODBC allows a NULL to mean all columns, so we'll accept undef + $column = '%' unless defined $column; + + my $ER_NO_SUCH_TABLE= 1146; + + my $table_id = $dbh->quote_identifier($catalog, $schema, $table); + + my @names = qw( + TABLE_CAT TABLE_SCHEM TABLE_NAME COLUMN_NAME + DATA_TYPE TYPE_NAME COLUMN_SIZE BUFFER_LENGTH DECIMAL_DIGITS + NUM_PREC_RADIX NULLABLE REMARKS COLUMN_DEF + SQL_DATA_TYPE SQL_DATETIME_SUB CHAR_OCTET_LENGTH + ORDINAL_POSITION IS_NULLABLE CHAR_SET_CAT + CHAR_SET_SCHEM CHAR_SET_NAME COLLATION_CAT COLLATION_SCHEM COLLATION_NAME + UDT_CAT UDT_SCHEM UDT_NAME DOMAIN_CAT DOMAIN_SCHEM DOMAIN_NAME + SCOPE_CAT SCOPE_SCHEM SCOPE_NAME MAX_CARDINALITY + DTD_IDENTIFIER IS_SELF_REF + mysql_is_pri_key mysql_type_name mysql_values + mysql_is_auto_increment + ); + my %col_info; + + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + # only ignore ER_NO_SUCH_TABLE in internal_execute if issued from here + my $desc_sth = $dbh->prepare("DESCRIBE $table_id " . $dbh->quote($column)); + my $desc = $dbh->selectall_arrayref($desc_sth, { Columns=>{} }); + + #return $desc_sth if $desc_sth->err(); + if (my $err = $desc_sth->err()) + { + # return the error, unless it is due to the table not + # existing per DBI spec + if ($err != $ER_NO_SUCH_TABLE) + { + $dbh->{mysql_server_prepare}= $mysql_server_prepare_save; + return undef; + } + $dbh->set_err(undef,undef); + $desc = []; + } + + my $ordinal_pos = 0; + for my $row (@$desc) + { + my $type = $row->{type}; + $type =~ m/^(\w+)(?:\((.*?)\))?\s*(.*)/; + my $basetype = lc($1); + my $typemod = $2; + my $attr = $3; + + my $info = $col_info{ $row->{field} }= { + TABLE_CAT => $catalog, + TABLE_SCHEM => $schema, + TABLE_NAME => $table, + COLUMN_NAME => $row->{field}, + NULLABLE => ($row->{null} eq 'YES') ? 1 : 0, + IS_NULLABLE => ($row->{null} eq 'YES') ? "YES" : "NO", + TYPE_NAME => uc($basetype), + COLUMN_DEF => $row->{default}, + ORDINAL_POSITION => ++$ordinal_pos, + mysql_is_pri_key => ($row->{key} eq 'PRI'), + mysql_type_name => $row->{type}, + mysql_is_auto_increment => ($row->{extra} =~ /auto_increment/i ? 1 : 0), + }; + # + # This code won't deal with a pathalogical case where a value + # contains a single quote followed by a comma, and doesn't unescape + # any escaped values. But who would use those in an enum or set? + # + my @type_params= ($typemod && index($typemod,"'")>=0) ? + ("$typemod," =~ /'(.*?)',/g) # assume all are quoted + : split /,/, $typemod||''; # no quotes, plain list + s/''/'/g for @type_params; # undo doubling of quotes + + my @type_attr= split / /, $attr||''; + + $info->{DATA_TYPE}= SQL_VARCHAR(); + if ($basetype =~ /^(char|varchar|\w*text|\w*blob)/) + { + $info->{DATA_TYPE}= SQL_CHAR() if $basetype eq 'char'; + if ($type_params[0]) + { + $info->{COLUMN_SIZE} = $type_params[0]; + } + else + { + $info->{COLUMN_SIZE} = 65535; + $info->{COLUMN_SIZE} = 255 if $basetype =~ /^tiny/; + $info->{COLUMN_SIZE} = 16777215 if $basetype =~ /^medium/; + $info->{COLUMN_SIZE} = 4294967295 if $basetype =~ /^long/; + } + } + elsif ($basetype =~ /^(binary|varbinary)/) + { + $info->{COLUMN_SIZE} = $type_params[0]; + # SQL_BINARY & SQL_VARBINARY are tempting here but don't match the + # semantics for mysql (not hex). SQL_CHAR & SQL_VARCHAR are correct here. + $info->{DATA_TYPE} = ($basetype eq 'binary') ? SQL_CHAR() : SQL_VARCHAR(); + } + elsif ($basetype =~ /^(enum|set)/) + { + if ($basetype eq 'set') + { + $info->{COLUMN_SIZE} = length(join ",", @type_params); + } + else + { + my $max_len = 0; + length($_) > $max_len and $max_len = length($_) for @type_params; + $info->{COLUMN_SIZE} = $max_len; + } + $info->{"mysql_values"} = \@type_params; + } + elsif ($basetype =~ /int/) + { + # big/medium/small/tiny etc + unsigned? + $info->{DATA_TYPE} = SQL_INTEGER(); + $info->{NUM_PREC_RADIX} = 10; + $info->{COLUMN_SIZE} = $type_params[0]; + } + elsif ($basetype =~ /^decimal/) + { + $info->{DATA_TYPE} = SQL_DECIMAL(); + $info->{NUM_PREC_RADIX} = 10; + $info->{COLUMN_SIZE} = $type_params[0]; + $info->{DECIMAL_DIGITS} = $type_params[1]; + } + elsif ($basetype =~ /^(float|double)/) + { + $info->{DATA_TYPE} = ($basetype eq 'float') ? SQL_FLOAT() : SQL_DOUBLE(); + $info->{NUM_PREC_RADIX} = 2; + $info->{COLUMN_SIZE} = ($basetype eq 'float') ? 32 : 64; + } + elsif ($basetype =~ /date|time/) + { + # date/datetime/time/timestamp + if ($basetype eq 'time' or $basetype eq 'date') + { + #$info->{DATA_TYPE} = ($basetype eq 'time') ? SQL_TYPE_TIME() : SQL_TYPE_DATE(); + $info->{DATA_TYPE} = ($basetype eq 'time') ? SQL_TIME() : SQL_DATE(); + $info->{COLUMN_SIZE} = ($basetype eq 'time') ? 8 : 10; + } + else + { + # datetime/timestamp + #$info->{DATA_TYPE} = SQL_TYPE_TIMESTAMP(); + $info->{DATA_TYPE} = SQL_TIMESTAMP(); + $info->{SQL_DATA_TYPE} = SQL_DATETIME(); + $info->{SQL_DATETIME_SUB} = $info->{DATA_TYPE} - ($info->{SQL_DATA_TYPE} * 10); + $info->{COLUMN_SIZE} = ($basetype eq 'datetime') ? 19 : $type_params[0] || 14; + } + $info->{DECIMAL_DIGITS}= 0; # no fractional seconds + } + elsif ($basetype eq 'year') + { + # no close standard so treat as int + $info->{DATA_TYPE} = SQL_INTEGER(); + $info->{NUM_PREC_RADIX} = 10; + $info->{COLUMN_SIZE} = 4; + } + else + { + Carp::carp("column_info: unrecognized column type '$basetype' of $table_id.$row->{field} treated as varchar"); + } + $info->{SQL_DATA_TYPE} ||= $info->{DATA_TYPE}; + #warn Dumper($info); + } + + my $sponge = DBI->connect("DBI:Sponge:", '','') + or ( $dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return $dbh->DBI::set_err($DBI::err, "DBI::Sponge: $DBI::errstr")); + + my $sth = $sponge->prepare("column_info $table", { + rows => [ map { [ @{$_}{@names} ] } values %col_info ], + NUM_OF_FIELDS => scalar @names, + NAME => \@names, + }) or + return ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + $dbh->DBI::set_err($sponge->err(), $sponge->errstr())); + + $dbh->{mysql_server_prepare}= $mysql_server_prepare_save; + return $sth; +} + + +sub primary_key_info { + my ($dbh, $catalog, $schema, $table) = @_; + $dbh->{mysql_server_prepare}||= 0; + my $mysql_server_prepare_save= $dbh->{mysql_server_prepare}; + + my $table_id = $dbh->quote_identifier($catalog, $schema, $table); + + my @names = qw( + TABLE_CAT TABLE_SCHEM TABLE_NAME COLUMN_NAME KEY_SEQ PK_NAME + ); + my %col_info; + + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $desc_sth = $dbh->prepare("SHOW KEYS FROM $table_id"); + my $desc= $dbh->selectall_arrayref($desc_sth, { Columns=>{} }); + my $ordinal_pos = 0; + for my $row (grep { $_->{key_name} eq 'PRIMARY'} @$desc) + { + $col_info{ $row->{column_name} }= { + TABLE_CAT => $catalog, + TABLE_SCHEM => $schema, + TABLE_NAME => $table, + COLUMN_NAME => $row->{column_name}, + KEY_SEQ => $row->{seq_in_index}, + PK_NAME => $row->{key_name}, + }; + } + + my $sponge = DBI->connect("DBI:Sponge:", '','') + or + ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return $dbh->DBI::set_err($DBI::err, "DBI::Sponge: $DBI::errstr")); + + my $sth= $sponge->prepare("primary_key_info $table", { + rows => [ map { [ @{$_}{@names} ] } values %col_info ], + NUM_OF_FIELDS => scalar @names, + NAME => \@names, + }) or + ($dbh->{mysql_server_prepare}= $mysql_server_prepare_save && + return $dbh->DBI::set_err($sponge->err(), $sponge->errstr())); + + $dbh->{mysql_server_prepare}= $mysql_server_prepare_save; + + return $sth; +} + + +sub foreign_key_info { + my ($dbh, + $pk_catalog, $pk_schema, $pk_table, + $fk_catalog, $fk_schema, $fk_table, + ) = @_; + + # INFORMATION_SCHEMA.KEY_COLUMN_USAGE was added in 5.0.6 + my ($maj, $min, $point) = _version($dbh); + return if $maj < 5 || ($maj == 5 && $point < 6); + + my $sql = <<'EOF'; +SELECT NULL AS PKTABLE_CAT, + A.REFERENCED_TABLE_SCHEMA AS PKTABLE_SCHEM, + A.REFERENCED_TABLE_NAME AS PKTABLE_NAME, + A.REFERENCED_COLUMN_NAME AS PKCOLUMN_NAME, + A.TABLE_CATALOG AS FKTABLE_CAT, + A.TABLE_SCHEMA AS FKTABLE_SCHEM, + A.TABLE_NAME AS FKTABLE_NAME, + A.COLUMN_NAME AS FKCOLUMN_NAME, + A.ORDINAL_POSITION AS KEY_SEQ, + NULL AS UPDATE_RULE, + NULL AS DELETE_RULE, + A.CONSTRAINT_NAME AS FK_NAME, + NULL AS PK_NAME, + NULL AS DEFERABILITY, + NULL AS UNIQUE_OR_PRIMARY + FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE A, + INFORMATION_SCHEMA.TABLE_CONSTRAINTS B + WHERE A.TABLE_SCHEMA = B.TABLE_SCHEMA AND A.TABLE_NAME = B.TABLE_NAME + AND A.CONSTRAINT_NAME = B.CONSTRAINT_NAME AND B.CONSTRAINT_TYPE IS NOT NULL +EOF + + my @where; + my @bind; + + # catalogs are not yet supported by MySQL + +# if (defined $pk_catalog) { +# push @where, 'A.REFERENCED_TABLE_CATALOG = ?'; +# push @bind, $pk_catalog; +# } + + if (defined $pk_schema) { + push @where, 'A.REFERENCED_TABLE_SCHEMA = ?'; + push @bind, $pk_schema; + } + + if (defined $pk_table) { + push @where, 'A.REFERENCED_TABLE_NAME = ?'; + push @bind, $pk_table; + } + +# if (defined $fk_catalog) { +# push @where, 'A.TABLE_CATALOG = ?'; +# push @bind, $fk_schema; +# } + + if (defined $fk_schema) { + push @where, 'A.TABLE_SCHEMA = ?'; + push @bind, $fk_schema; + } + + if (defined $fk_table) { + push @where, 'A.TABLE_NAME = ?'; + push @bind, $fk_table; + } + + if (@where) { + $sql .= ' AND '; + $sql .= join ' AND ', @where; + } + $sql .= " ORDER BY A.TABLE_SCHEMA, A.TABLE_NAME, A.ORDINAL_POSITION"; + + local $dbh->{FetchHashKeyName} = 'NAME_uc'; + my $sth = $dbh->prepare($sql); + $sth->execute(@bind); + + return $sth; +} + + +sub _version { + my $dbh = shift; + + return + $dbh->get_info($DBI::Const::GetInfoType::GetInfoType{SQL_DBMS_VER}) + =~ /(\d+)\.(\d+)\.(\d+)/; +} + + +#################### +# get_info() +# Generated by DBI::DBD::Metadata + +sub get_info { + my($dbh, $info_type) = @_; + require DBD::mysql::GetInfo; + my $v = $DBD::mysql::GetInfo::info{int($info_type)}; + $v = $v->($dbh) if ref $v eq 'CODE'; + return $v; +} + + + +package DBD::mysql::st; # ====== STATEMENT ====== +use strict; + +1; + +__END__ + +=pod + +=head1 NAME + +DBD::mysql - MySQL driver for the Perl5 Database Interface (DBI) + +=head1 SYNOPSIS + + use DBI; + + $dsn = "DBI:mysql:database=$database;host=$hostname;port=$port"; + + $dbh = DBI->connect($dsn, $user, $password); + + + $drh = DBI->install_driver("mysql"); + @databases = DBI->data_sources("mysql"); + or + @databases = DBI->data_sources("mysql", + {"host" => $host, "port" => $port, "user" => $user, password => $pass}); + + $sth = $dbh->prepare("SELECT * FROM foo WHERE bla"); + or + $sth = $dbh->prepare("LISTFIELDS $table"); + or + $sth = $dbh->prepare("LISTINDEX $table $index"); + $sth->execute; + $numRows = $sth->rows; + $numFields = $sth->{'NUM_OF_FIELDS'}; + $sth->finish; + + $rc = $drh->func('createdb', $database, $host, $user, $password, 'admin'); + $rc = $drh->func('dropdb', $database, $host, $user, $password, 'admin'); + $rc = $drh->func('shutdown', $host, $user, $password, 'admin'); + $rc = $drh->func('reload', $host, $user, $password, 'admin'); + + $rc = $dbh->func('createdb', $database, 'admin'); + $rc = $dbh->func('dropdb', $database, 'admin'); + $rc = $dbh->func('shutdown', 'admin'); + $rc = $dbh->func('reload', 'admin'); + + +=head1 EXAMPLE + + #!/usr/bin/perl + + use strict; + use DBI(); + + # Connect to the database. + my $dbh = DBI->connect("DBI:mysql:database=test;host=localhost", + "joe", "joe's password", + {'RaiseError' => 1}); + + # Drop table 'foo'. This may fail, if 'foo' doesn't exist. + # Thus we put an eval around it. + eval { $dbh->do("DROP TABLE foo") }; + print "Dropping foo failed: $@\n" if $@; + + # Create a new table 'foo'. This must not fail, thus we don't + # catch errors. + $dbh->do("CREATE TABLE foo (id INTEGER, name VARCHAR(20))"); + + # INSERT some data into 'foo'. We are using $dbh->quote() for + # quoting the name. + $dbh->do("INSERT INTO foo VALUES (1, " . $dbh->quote("Tim") . ")"); + + # Same thing, but using placeholders + $dbh->do("INSERT INTO foo VALUES (?, ?)", undef, 2, "Jochen"); + + # Now retrieve data from the table. + my $sth = $dbh->prepare("SELECT * FROM foo"); + $sth->execute(); + while (my $ref = $sth->fetchrow_hashref()) { + print "Found a row: id = $ref->{'id'}, name = $ref->{'name'}\n"; + } + $sth->finish(); + + # Disconnect from the database. + $dbh->disconnect(); + + +=head1 DESCRIPTION + +B<DBD::mysql> is the Perl5 Database Interface driver for the MySQL +database. In other words: DBD::mysql is an interface between the Perl +programming language and the MySQL programming API that comes with +the MySQL relational database management system. Most functions +provided by this programming API are supported. Some rarely used +functions are missing, mainly because noone ever requested +them. :-) + +In what follows we first discuss the use of DBD::mysql, +because this is what you will need the most. For installation, see the +sections on L<INSTALLATION>, and L<WIN32 INSTALLATION> +below. See L<EXAMPLE> for a simple example above. + +From perl you activate the interface with the statement + + use DBI; + +After that you can connect to multiple MySQL database servers +and send multiple queries to any of them via a simple object oriented +interface. Two types of objects are available: database handles and +statement handles. Perl returns a database handle to the connect +method like so: + + $dbh = DBI->connect("DBI:mysql:database=$db;host=$host", + $user, $password, {RaiseError => 1}); + +Once you have connected to a database, you can can execute SQL +statements with: + + my $query = sprintf("INSERT INTO foo VALUES (%d, %s)", + $number, $dbh->quote("name")); + $dbh->do($query); + +See L<DBI(3)> for details on the quote and do methods. An alternative +approach is + + $dbh->do("INSERT INTO foo VALUES (?, ?)", undef, + $number, $name); + +in which case the quote method is executed automatically. See also +the bind_param method in L<DBI(3)>. See L<DATABASE HANDLES> below +for more details on database handles. + +If you want to retrieve results, you need to create a so-called +statement handle with: + + $sth = $dbh->prepare("SELECT * FROM $table"); + $sth->execute(); + +This statement handle can be used for multiple things. First of all +you can retreive a row of data: + + my $row = $sth->fetchrow_hashref(); + +If your table has columns ID and NAME, then $row will be hash ref with +keys ID and NAME. See L<STATEMENT HANDLES> below for more details on +statement handles. + +But now for a more formal approach: + + +=head2 Class Methods + +=over + +=item B<connect> + + use DBI; + + $dsn = "DBI:mysql:$database"; + $dsn = "DBI:mysql:database=$database;host=$hostname"; + $dsn = "DBI:mysql:database=$database;host=$hostname;port=$port"; + + $dbh = DBI->connect($dsn, $user, $password); + +A C<database> must always be specified. + +=over + +=item host + +=item port + +The hostname, if not specified or specified as '' or 'localhost', will +default to a MySQL server running on the local machine using the default for +the UNIX socket. To connect to a MySQL server on the local machine via TCP, +you must specify the loopback IP address (127.0.0.1) as the host. + +Should the MySQL server be running on a non-standard port number, +you may explicitly state the port number to connect to in the C<hostname> +argument, by concatenating the I<hostname> and I<port number> together +separated by a colon ( C<:> ) character or by using the C<port> argument. + +To connect to a MySQL server on localhost using TCP/IP, you must specify the +hostname as 127.0.0.1 (with the optional port). + +=item mysql_client_found_rows + +Enables (TRUE value) or disables (FALSE value) the flag CLIENT_FOUND_ROWS +while connecting to the MySQL server. This has a somewhat funny effect: +Without mysql_client_found_rows, if you perform a query like + + UPDATE $table SET id = 1 WHERE id = 1 + +then the MySQL engine will always return 0, because no rows have changed. +With mysql_client_found_rows however, it will return the number of rows +that have an id 1, as some people are expecting. (At least for compatibility +to other engines.) + +=item mysql_compression + +As of MySQL 3.22.3, a new feature is supported: If your DSN contains +the option "mysql_compression=1", then the communication between client +and server will be compressed. + +=item mysql_connect_timeout + +If your DSN contains the option "mysql_connect_timeout=##", the connect +request to the server will timeout if it has not been successful after +the given number of seconds. + +=item mysql_read_default_file + +=item mysql_read_default_group + +These options can be used to read a config file like /etc/my.cnf or +~/.my.cnf. By default MySQL's C client library doesn't use any config +files unlike the client programs (mysql, mysqladmin, ...) that do, but +outside of the C client library. Thus you need to explicitly request +reading a config file, as in + + $dsn = "DBI:mysql:test;mysql_read_default_file=/home/joe/my.cnf"; + $dbh = DBI->connect($dsn, $user, $password) + +The option mysql_read_default_group can be used to specify the default +group in the config file: Usually this is the I<client> group, but +see the following example: + + [client] + host=localhost + + [perl] + host=perlhost + +(Note the order of the entries! The example won't work, if you reverse +the [client] and [perl] sections!) + +If you read this config file, then you'll be typically connected to +I<localhost>. However, by using + + $dsn = "DBI:mysql:test;mysql_read_default_group=perl;" + . "mysql_read_default_file=/home/joe/my.cnf"; + $dbh = DBI->connect($dsn, $user, $password); + +you'll be connected to I<perlhost>. Note that if you specify a +default group and do not specify a file, then the default config +files will all be read. See the documentation of +the C function mysql_options() for details. + +=item mysql_socket + +As of MySQL 3.21.15, it is possible to choose the Unix socket that is +used for connecting to the server. This is done, for example, with + + mysql_socket=/dev/mysql + +Usually there's no need for this option, unless you are using another +location for the socket than that built into the client. + +=item mysql_ssl + +A true value turns on the CLIENT_SSL flag when connecting to the MySQL +database: + + mysql_ssl=1 + +This means that your communication with the server will be encrypted. + +If you turn mysql_ssl on, you might also wish to use the following +flags: + +=item mysql_ssl_client_key + +=item mysql_ssl_client_cert + +=item mysql_ssl_ca_file + +=item mysql_ssl_ca_path + +=item mysql_ssl_cipher + +These are used to specify the respective parameters of a call +to mysql_ssl_set, if mysql_ssl is turned on. + + +=item mysql_local_infile + +As of MySQL 3.23.49, the LOCAL capability for LOAD DATA may be disabled +in the MySQL client library by default. If your DSN contains the option +"mysql_local_infile=1", LOAD DATA LOCAL will be enabled. (However, +this option is *ineffective* if the server has also been configured to +disallow LOCAL.) + +=item mysql_multi_statements + +As of MySQL 4.1, support for multiple statements seperated by a semicolon +(;) may be enabled by using this option. Enabling this option may cause +problems if server-side prepared statements are also enabled. + +=item Prepared statement support (server side prepare) + +As of 3.0002_1, server side prepare statements were on by default (if your +server was >= 4.1.3). As of 3.0009, they were off by default again due to +issues with the prepared statement API (all other mysql connectors are +set this way until C API issues are resolved). The requirement to use +prepared statements still remains that you have a server >= 4.1.3 + +To use server side prepared statements, all you need to do is set the variable +mysql_server_prepare in the connect: + +$dbh = DBI->connect( + "DBI:mysql:database=test;host=localhost;mysql_server_prepare=1", + "", + "", + { RaiseError => 1, AutoCommit => 1 } + ); + +* Note: delimiter for this param is ';' + +There are many benefits to using server side prepare statements, mostly if you are +performing many inserts because of that fact that a single statement is prepared +to accept multiple insert values. + +To make sure that the 'make test' step tests whether server prepare works, you just +need to export the env variable MYSQL_SERVER_PREPARE: + +export MYSQL_SERVER_PREPARE=1 + + +=item mysql_embedded_options + +The option <mysql_embedded_options> can be used to pass 'command-line' +options to embedded server. + +Example: + +use DBI; +$testdsn="DBI:mysqlEmb:database=test;mysql_embedded_options=--help,--verbose"; +$dbh = DBI->connect($testdsn,"a","b"); + +This would cause the command line help to the embedded MySQL server library +to be printed. + + +=item mysql_embedded_groups + +The option <mysql_embedded_groups> can be used to specify the groups in the +config file(I<my.cnf>) which will be used to get options for embedded server. +If not specified [server] and [embedded] groups will be used. + +Example: + +$testdsn="DBI:mysqlEmb:database=test;mysql_embedded_groups=embedded_server,common"; + + +=back + +=back + + +=head2 Private MetaData Methods + +=over + +=item B<ListDBs> + + my $drh = DBI->install_driver("mysql"); + @dbs = $drh->func("$hostname:$port", '_ListDBs'); + @dbs = $drh->func($hostname, $port, '_ListDBs'); + @dbs = $dbh->func('_ListDBs'); + +Returns a list of all databases managed by the MySQL server +running on C<$hostname>, port C<$port>. This is a legacy +method. Instead, you should use the portable method + + @dbs = DBI->data_sources("mysql"); + +=back + + +=head2 Server Administration + +=over + +=item admin + + $rc = $drh->func("createdb", $dbname, [host, user, password,], 'admin'); + $rc = $drh->func("dropdb", $dbname, [host, user, password,], 'admin'); + $rc = $drh->func("shutdown", [host, user, password,], 'admin'); + $rc = $drh->func("reload", [host, user, password,], 'admin'); + + or + + $rc = $dbh->func("createdb", $dbname, 'admin'); + $rc = $dbh->func("dropdb", $dbname, 'admin'); + $rc = $dbh->func("shutdown", 'admin'); + $rc = $dbh->func("reload", 'admin'); + +For server administration you need a server connection. For obtaining +this connection you have two options: Either use a driver handle (drh) +and supply the appropriate arguments (host, defaults localhost, user, +defaults to '' and password, defaults to ''). A driver handle can be +obtained with + + $drh = DBI->install_driver('mysql'); + +Otherwise reuse the existing connection of a database handle (dbh). + +There's only one function available for administrative purposes, comparable +to the m(y)sqladmin programs. The command being execute depends on the +first argument: + +=over + +=item createdb + +Creates the database $dbname. Equivalent to "m(y)sqladmin create $dbname". + +=item dropdb + +Drops the database $dbname. Equivalent to "m(y)sqladmin drop $dbname". + +It should be noted that database deletion is +I<not prompted for> in any way. Nor is it undo-able from DBI. + + Once you issue the dropDB() method, the database will be gone! + +These method should be used at your own risk. + +=item shutdown + +Silently shuts down the database engine. (Without prompting!) +Equivalent to "m(y)sqladmin shutdown". + +=item reload + +Reloads the servers configuration files and/or tables. This can be particularly +important if you modify access privileges or create new users. + +=back + +=back + + +=head1 DATABASE HANDLES + +The DBD::mysql driver supports the following attributes of database +handles (read only): + + $errno = $dbh->{'mysql_errno'}; + $error = $dbh->{'mysql_error'}; + $info = $dbh->{'mysql_hostinfo'}; + $info = $dbh->{'mysql_info'}; + $insertid = $dbh->{'mysql_insertid'}; + $info = $dbh->{'mysql_protoinfo'}; + $info = $dbh->{'mysql_serverinfo'}; + $info = $dbh->{'mysql_stat'}; + $threadId = $dbh->{'mysql_thread_id'}; + +These correspond to mysql_errno(), mysql_error(), mysql_get_host_info(), +mysql_info(), mysql_insert_id(), mysql_get_proto_info(), +mysql_get_server_info(), mysql_stat() and mysql_thread_id(), +respectively. + + + $info_hashref = $dhb->{mysql_dbd_stats} + +DBD::mysql keeps track of some statistics in the mysql_dbd_stats attribute. +The following stats are being maintained: + +=over + +=item auto_reconnects_ok + +The number of times that DBD::mysql successfully reconnected to the mysql +server. + +=item auto_reconnects_failed + +The number of times that DBD::mysql tried to reconnect to mysql but failed. + +=back + +The DBD::mysql driver also supports the following attribute(s) of database +handles (read/write): + + $bool_value = $dbh->{mysql_auto_reconnect}; + $dbh->{mysql_auto_reconnect} = $AutoReconnect ? 1 : 0; + + +=item mysql_auto_reconnect + +This attribute determines whether DBD::mysql will automatically reconnect +to mysql if the connection be lost. This feature defaults to off; however, +if either the GATEWAY_INTERFACE or MOD_PERL envionment variable is set, +DBD::mysql will turn mysql_auto_reconnect on. Setting mysql_auto_reconnect +to on is not advised if 'lock tables' is used because if DBD::mysql reconnect +to mysql all table locks will be lost. This attribute is ignored when +AutoCommit is turned off, and when AutoCommit is turned off, DBD::mysql will +not automatically reconnect to the server. + +=item mysql_use_result + +This attribute forces the driver to use mysql_use_result rather than +mysql_store_result. The former is faster and less memory consuming, but +tends to block other processes. (That's why mysql_store_result is the +default.) + +It is possible to set default value of the C<mysql_use_result> attribute +for $dbh using several ways: + + - through DSN + + $dbh= DBI->connect("DBI:mysql:test;mysql_use_result=1", "root", ""); + + - after creation of database handle + + $dbh->{'mysql_use_result'}=0; #disable + $dbh->{'mysql_use_result'}=1; #enable + +It is possible to set/unset the C<mysql_use_result> attribute after +creation of statement handle. See below. + +=item mysql_enable_utf8 + +This attribute determines whether DBD::mysql should assume strings +stored in the database are utf8. This feature defaults to off. + +When set, a data retrieved from a textual column type (char, varchar, +etc) will have the UTF-8 flag turned on if necessary. This enables +character semantics on that string. You will also need to ensure that +your database / table / column is configured to use UTF8. See Chapter +10 of the mysql manual for details. + +Additionally, turning on this flag tells MySQL that incoming data should +be treated as UTF-8. This will only take effect if used as part of the +call to connect(). If you turn the flag on after connecting, you will +need to issue the command C<SET NAMES utf8> to get the same effect. + +This option is experimental and may change in future versions. + +=item mysql_bind_type_guessing + +This attribute causes the driver (emulated prepare statements) +to attempt to guess if a value being bound is a numeric value, +and if so, doesn't quote the value. This was created by +Dragonchild and is one way to deal with the performance issue +of using quotes in a statement that is inserting or updating a +large numeric value. This was previously called +C<unsafe_bind_type_guessing> because it is experimental. I have +successfully run the full test suite with this option turned on, +the name can now be simply C<mysql_bind_type_guessing>. + +See bug: https://rt.cpan.org/Ticket/Display.html?id=43822 + +C<mysql_bind_type_guessing> can be turned on via + + - through DSN + + my $dbh= DBI->connect('DBI:mysql:test', 'username', 'pass', + { mysql_bind_type_guessing => 1}) + + - OR after handle creation + + $dbh->{mysql_bind_type_guessing} = 1; + +=item mysql_no_autocommit_cmd + +This attribute causes the driver to not issue 'set autocommit' +either through explicit or using mysql_autocommit(). This is +particularly useful in the case of using MySQL Proxy. + +See the bug report: + +https://rt.cpan.org/Public/Bug/Display.html?id=46308 + +As well as: + +http://bugs.mysql.com/bug.php?id=32464 + +C<mysql_no_autocommit_cmd> can be turned on via + + - through DSN + + my $dbh= DBI->connect('DBI:mysql:test', 'username', 'pass', + { mysql_no_autocommit_cmd => 1}) + + - OR after handle creation + + $dbh->{mysql_no_autocommit_cmd} = 1; + + + +=head1 STATEMENT HANDLES + +The statement handles of DBD::mysql support a number +of attributes. You access these by using, for example, + + my $numFields = $sth->{'NUM_OF_FIELDS'}; + +Note, that most attributes are valid only after a successfull I<execute>. +An C<undef> value will returned in that case. The most important exception +is the C<mysql_use_result> attribute: This forces the driver to use +mysql_use_result rather than mysql_store_result. The former is faster +and less memory consuming, but tends to block other processes. (That's why +mysql_store_result is the default.) + +To set the C<mysql_use_result> attribute, use either of the following: + + my $sth = $dbh->prepare("QUERY", { "mysql_use_result" => 1}); + +or + + my $sth = $dbh->prepare("QUERY"); + $sth->{"mysql_use_result"} = 1; + +Column dependent attributes, for example I<NAME>, the column names, +are returned as a reference to an array. The array indices are +corresponding to the indices of the arrays returned by I<fetchrow> +and similar methods. For example the following code will print a +header of table names together with all rows: + + my $sth = $dbh->prepare("SELECT * FROM $table"); + if (!$sth) { + die "Error:" . $dbh->errstr . "\n"; + } + if (!$sth->execute) { + die "Error:" . $sth->errstr . "\n"; + } + my $names = $sth->{'NAME'}; + my $numFields = $sth->{'NUM_OF_FIELDS'}; + for (my $i = 0; $i < $numFields; $i++) { + printf("%s%s", $i ? "," : "", $$names[$i]); + } + print "\n"; + while (my $ref = $sth->fetchrow_arrayref) { + for (my $i = 0; $i < $numFields; $i++) { + printf("%s%s", $i ? "," : "", $$ref[$i]); + } + print "\n"; + } + +For portable applications you should restrict yourself to attributes with +capitalized or mixed case names. Lower case attribute names are private +to DBD::mysql. The attribute list includes: + +=over + +=item ChopBlanks + +this attribute determines whether a I<fetchrow> will chop preceding +and trailing blanks off the column values. Chopping blanks does not +have impact on the I<max_length> attribute. + +=item mysql_insertid + +MySQL has the ability to choose unique key values automatically. If this +happened, the new ID will be stored in this attribute. An alternative +way for accessing this attribute is via $dbh->{'mysql_insertid'}. +(Note we are using the $dbh in this case!) + +=item mysql_is_blob + +Reference to an array of boolean values; TRUE indicates, that the +respective column is a blob. This attribute is valid for MySQL only. + +=item mysql_is_key + +Reference to an array of boolean values; TRUE indicates, that the +respective column is a key. This is valid for MySQL only. + +=item mysql_is_num + +Reference to an array of boolean values; TRUE indicates, that the +respective column contains numeric values. + +=item mysql_is_pri_key + +Reference to an array of boolean values; TRUE indicates, that the +respective column is a primary key. + +=item mysql_is_auto_increment + +Reference to an array of boolean values; TRUE indicates that the +respective column is an AUTO_INCREMENT column. This is only valid +for MySQL. + +=item mysql_length + +=item mysql_max_length + +A reference to an array of maximum column sizes. The I<max_length> is +the maximum physically present in the result table, I<length> gives +the theoretically possible maximum. I<max_length> is valid for MySQL +only. + +=item NAME + +A reference to an array of column names. + +=item NULLABLE + +A reference to an array of boolean values; TRUE indicates that this column +may contain NULL's. + +=item NUM_OF_FIELDS + +Number of fields returned by a I<SELECT> or I<LISTFIELDS> statement. +You may use this for checking whether a statement returned a result: +A zero value indicates a non-SELECT statement like I<INSERT>, +I<DELETE> or I<UPDATE>. + +=item mysql_table + +A reference to an array of table names, useful in a I<JOIN> result. + +=item TYPE + +A reference to an array of column types. The engine's native column +types are mapped to portable types like DBI::SQL_INTEGER() or +DBI::SQL_VARCHAR(), as good as possible. Not all native types have +a meaningfull equivalent, for example DBD::mysql::FIELD_TYPE_INTERVAL +is mapped to DBI::SQL_VARCHAR(). +If you need the native column types, use I<mysql_type>. See below. + +=item mysql_type + +A reference to an array of MySQL's native column types, for example +DBD::mysql::FIELD_TYPE_SHORT() or DBD::mysql::FIELD_TYPE_STRING(). +Use the I<TYPE> attribute, if you want portable types like +DBI::SQL_SMALLINT() or DBI::SQL_VARCHAR(). + +=item mysql_type_name + +Similar to mysql, but type names and not numbers are returned. +Whenever possible, the ANSI SQL name is preferred. + +=item mysql_warning_count + +The number of warnings generated during execution of the SQL statement. + +=back + +=head1 TRANSACTION SUPPORT + +Beginning with DBD::mysql 2.0416, transactions are supported. +The transaction support works as follows: + +=over + +=item * + +By default AutoCommit mode is on, following the DBI specifications. + +=item * + +If you execute + + $dbh->{'AutoCommit'} = 0; + +or + + $dbh->{'AutoCommit'} = 1; + +then the driver will set the MySQL server variable autocommit to 0 or +1, respectively. Switching from 0 to 1 will also issue a COMMIT, +following the DBI specifications. + +=item * + +The methods + + $dbh->rollback(); + $dbh->commit(); + +will issue the commands COMMIT and ROLLBACK, respectively. A +ROLLBACK will also be issued if AutoCommit mode is off and the +database handles DESTROY method is called. Again, this is following +the DBI specifications. + +=back + +Given the above, you should note the following: + +=over + +=item * + +You should never change the server variable autocommit manually, +unless you are ignoring DBI's transaction support. + +=item * + +Switching AutoCommit mode from on to off or vice versa may fail. +You should always check for errors, when changing AutoCommit mode. +The suggested way of doing so is using the DBI flag RaiseError. +If you don't like RaiseError, you have to use code like the +following: + + $dbh->{'AutoCommit'} = 0; + if ($dbh->{'AutoCommit'}) { + # An error occurred! + } + +=item * + +If you detect an error while changing the AutoCommit mode, you +should no longer use the database handle. In other words, you +should disconnect and reconnect again, because the transaction +mode is unpredictable. Alternatively you may verify the transaction +mode by checking the value of the server variable autocommit. +However, such behaviour isn't portable. + +=item * + +DBD::mysql has a "reconnect" feature that handles the so-called +MySQL "morning bug": If the server has disconnected, most probably +due to a timeout, then by default the driver will reconnect and +attempt to execute the same SQL statement again. However, this +behaviour is disabled when AutoCommit is off: Otherwise the +transaction state would be completely unpredictable after a +reconnect. + +=item * + +The "reconnect" feature of DBD::mysql can be toggled by using the +L<mysql_auto_reconnect> attribute. This behaviour should be turned off +in code that uses LOCK TABLE because if the database server time out +and DBD::mysql reconnect, table locks will be lost without any +indication of such loss. + +=back + +=over + +=head1 MULTIPLE RESULT SETS + +As of version 3.0002_5, DBD::mysql supports multiple result sets (Thanks +to Guy Harrison!). This is the first release of this functionality, so +there may be issues. Please report bugs if you run into them! + +The basic usage of multiple result sets is + + do + { + while (@row= $sth->fetchrow_array()) + { + do stuff; + } + } while ($sth->more_results) + +An example would be: + + $dbh->do("drop procedure if exists someproc") or print $DBI::errstr; + + $dbh->do("create procedure somproc() deterministic + begin + declare a,b,c,d int; + set a=1; + set b=2; + set c=3; + set d=4; + select a, b, c, d; + select d, c, b, a; + select b, a, c, d; + select c, b, d, a; + end") or print $DBI::errstr; + + $sth=$dbh->prepare('call someproc()') || + die $DBI::err.": ".$DBI::errstr; + + $sth->execute || die DBI::err.": ".$DBI::errstr; $rowset=0; + do { + print "\nRowset ".++$i."\n---------------------------------------\n\n"; + foreach $colno (0..$sth->{NUM_OF_FIELDS}) { + print $sth->{NAME}->[$colno]."\t"; + } + print "\n"; + while (@row= $sth->fetchrow_array()) { + foreach $field (0..$#row) { + print $row[$field]."\t"; + } + print "\n"; + } + } until (!$sth->more_results) + +For more examples, please see the eg/ directory. This is where helpful +DBD::mysql code snippits will be added in the future. + +=head2 Issues with Multiple result sets + +So far, the main issue is if your result sets are "jagged", meaning, the +number of columns of your results vary. Varying numbers of columns could +result in your script crashing. This is something that will be fixed soon. + + +=head1 MULTITHREADING + +The multithreading capabilities of DBD::mysql depend completely +on the underlying C libraries: The modules are working with handle data +only, no global variables are accessed or (to the best of my knowledge) +thread unsafe functions are called. Thus DBD::mysql is believed +to be completely thread safe, if the C libraries are thread safe +and you don't share handles among threads. + +The obvious question is: Are the C libraries thread safe? +In the case of MySQL the answer is "mostly" and, in theory, you should +be able to get a "yes", if the C library is compiled for being thread +safe (By default it isn't.) by passing the option -with-thread-safe-client +to configure. See the section on I<How to make a threadsafe client> in +the manual. + + +=head1 INSTALLATION + +Windows users may skip this section and pass over to L<WIN32 +INSTALLATION> below. Others, go on reading. + +First of all, you do not need an installed MySQL server for installing +DBD::mysql. However, you need at least the client +libraries and possibly the header files, if you are compiling DBD::mysql +from source. In the case of MySQL you can create a +client-only version by using the configure option --without-server. +If you are using precompiled binaries, then it may be possible to +use just selected RPM's like MySQL-client and MySQL-devel or something +similar, depending on the distribution. + +First you need to install the DBI module. For using I<dbimon>, a +simple DBI shell it is recommended to install Data::ShowTable another +Perl module. + +I recommend trying automatic installation via the CPAN module. Try + + perl -MCPAN -e shell + +If you are using the CPAN module for the first time, it will prompt +you a lot of questions. If you finally receive the CPAN prompt, enter + + install Bundle::DBD::mysql + +If this fails (which may be the case for a number of reasons, for +example because you are behind a firewall or don't have network +access), you need to do a manual installation. First of all you +need to fetch the modules from CPAN search + + http://search.cpan.org/ + +The following modules are required + + DBI + Data::ShowTable + DBD::mysql + +Then enter the following commands (note - versions are just examples): + + gzip -cd DBI-(version).tar.gz | tar xf - + cd DBI-(version) + perl Makefile.PL + make + make test + make install + + cd .. + gzip -cd Data-ShowTable-(version).tar.gz | tar xf - + cd Data-ShowTable-3.3 + perl Makefile.PL + make + make install + + cd .. + gzip -cd DBD-mysql-(version)-tar.gz | tar xf - + cd DBD-mysql-(version) + perl Makefile.PL + make + make test + make install + +During "perl Makefile.PL" you will be prompted some questions. +Other questions are the directories with header files and libraries. +For example, of your file F<mysql.h> is in F</usr/include/mysql/mysql.h>, +then enter the header directory F</usr>, likewise for +F</usr/lib/mysql/libmysqlclient.a> or F</usr/lib/libmysqlclient.so>. + + +=head1 WIN32 INSTALLATION + +If you are using ActivePerl, you may use ppm to install DBD-mysql. +For Perl 5.6, upgrade to Build 623 or later, then it is sufficient +to run + + ppm install DBI + ppm install DBD::mysql + +If you need an HTTP proxy, you might need to set the environment +variable http_proxy, for example like this: + + set http_proxy=http://myproxy.com:8080/ + +As of this writing, DBD::mysql is missing in the ActivePerl 5.8.0 +repository. However, Randy Kobes has kindly donated an own +distribution and the following might succeed: + + ppm install http://theoryx5.uwinnipeg.ca/ppms/DBD-mysql.ppd + +Otherwise you definitely *need* a C compiler. And it *must* be the same +compiler that was being used for compiling Perl itself. If you don't +have a C compiler, the file README.win32 from the Perl source +distribution tells you where to obtain freely distributable C compilers +like egcs or gcc. The Perl sources are available via CPAN search + + http://search.cpan.org + +I recommend using the win32clients package for installing DBD::mysql +under Win32, available for download on www.tcx.se. The following steps +have been required for me: + +=over + +=item - + +The current Perl versions (5.6, as of this writing) do have a problem +with detecting the C libraries. I recommend to apply the following +patch: + + *** c:\Perl\lib\ExtUtils\Liblist.pm.orig Sat Apr 15 20:03:40 2000 + --- c:\Perl\lib\ExtUtils\Liblist.pm Sat Apr 15 20:03:45 2000 + *************** + *** 230,235 **** + --- 230,239 ---- + # add "$Config{installarchlib}/CORE" to default search path + push @libpath, "$Config{installarchlib}/CORE"; + + + if ($VC and exists($ENV{LIB}) and defined($ENV{LIB})) { + + push(@libpath, split(/;/, $ENV{LIB})); + + } + + + foreach (Text::ParseWords::quotewords('\s+', 0, $potential_libs)){ + + $thislib = $_; + +=item - + +Extract sources into F<C:\>. This will create a directory F<C:\mysql> +with subdirectories include and lib. + +IMPORTANT: Make sure this subdirectory is not shared by other TCX +files! In particular do *not* store the MySQL server in the same +directory. If the server is already installed in F<C:\mysql>, +choose a location like F<C:\tmp>, extract the win32clients there. +Note that you can remove this directory entirely once you have +installed DBD::mysql. + +=item - + +Extract the DBD::mysql sources into another directory, for +example F<C:\src\siteperl> + +=item - + +Open a DOS shell and change directory to F<C:\src\siteperl>. + +=item - + +The next step is only required if you repeat building the modules: Make +sure that you have a clean build tree by running + + nmake realclean + +If you don't have VC++, replace nmake with your flavour of make. If +error messages are reported in this step, you may safely ignore them. + +=item - + +Run + + perl Makefile.PL + +which will prompt you for some settings. The really important ones are: + + Which DBMS do you want to use? + +enter a 1 here (MySQL only), and + + Where is your mysql installed? Please tell me the directory that + contains the subdir include. + +where you have to enter the win32clients directory, for example +F<C:\mysql> or F<C:\tmp\mysql>. + +=item - + +Continued in the usual way: + + nmake + nmake install + +=back + +If you want to create a PPM package for the ActiveState Perl version, then +modify the above steps as follows: Run + + perl Makefile.PL NAME=DBD-mysql BINARY_LOCATION=DBD-mysql.tar.gz + nmake ppd + nmake + +Once that is done, use tar and gzip (for example those from the CygWin32 +distribution) to create an archive: + + mkdir x86 + tar cf x86/DBD-mysql.tar blib + gzip x86/DBD-mysql.tar + +Put the files x86/DBD-mysql.tar.gz and DBD-mysql.ppd onto some WWW server +and install them by typing + + install http://your.server.name/your/directory/DBD-mysql.ppd + +in the PPM program. + + +=head1 AUTHORS + +The current version of B<DBD::mysql> is almost completely written +by Jochen Wiedmann, and is now being maintained by +Patrick Galbraith (I<patg@mysql.com>). +The first version's author was Alligator Descartes, who was aided +and abetted by Gary Shea, Andreas König and Tim Bunce amongst others. + +The B<Mysql> module was originally written by Andreas König +<koenig@kulturbox.de>. The current version, mainly an emulation +layer, is from Jochen Wiedmann. + + +=head1 COPYRIGHT + + +This module is +Large Portions Copyright (c) 2004-2006 MySQL Patrick Galbraith, Alexey Stroganov, +Large Portions Copyright (c) 2003-2005 Rudolf Lippan; Large Portions +Copyright (c) 1997-2003 Jochen Wiedmann, with code portions +Copyright (c)1994-1997 their original authors This module is +released under the same license as Perl itself. See the Perl README +for details. + + +=head1 MAILING LIST SUPPORT + +This module is maintained and supported on a mailing list, + + perl@lists.mysql.com + +To subscribe to this list, go to + +http://lists.mysql.com/perl?sub=1 + +Mailing list archives are available at + +http://lists.mysql.com/perl + +Additionally you might try the dbi-user mailing list for questions about +DBI and its modules in general. Subscribe via + +dbi-users-subscribe@perl.org + +Mailing list archives are at + +http://groups.google.com/group/perl.dbi.users?hl=en&lr= + +Also, the main DBI site is at + +http://dbi.perl.org/ + +=head1 ADDITIONAL DBI INFORMATION + +Additional information on the DBI project can be found on the World +Wide Web at the following URL: + + http://dbi.perl.org + +where documentation, pointers to the mailing lists and mailing list +archives and pointers to the most current versions of the modules can +be used. + +Information on the DBI interface itself can be gained by typing: + + perldoc DBI + +right now! + + +=head1 BUG REPORTING, ENHANCEMENT/FEATURE REQUESTS + +Please report bugs, including all the information needed +such as DBD::mysql version, MySQL version, OS type/version, etc +to this link: + +http://bugs.mysql.com/ + + +=cut + + diff --git a/Master/tlpkg/tlperl/lib/DBD/mysql/GetInfo.pm b/Master/tlpkg/tlperl/lib/DBD/mysql/GetInfo.pm new file mode 100755 index 00000000000..9a7418a8759 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/mysql/GetInfo.pm @@ -0,0 +1,306 @@ +package DBD::mysql::GetInfo; +######################################## +# DBD::mysql::GetInfo +# +# +# Generated by DBI::DBD::Metadata +# $Author: capttofu $ <-- the person to blame +# $Revision: 8435 $ +# $Date: 2006-12-23 14:03:49 -0500 (Sat, 23 Dec 2006) $ + +use strict; +use DBD::mysql; +# Beware: not officially documented interfaces... +# use DBI::Const::GetInfoType qw(%GetInfoType); +# use DBI::Const::GetInfoReturn qw(%GetInfoReturnTypes %GetInfoReturnValues); + +my $sql_driver = 'mysql'; +my $sql_ver_fmt = '%02d.%02d.%04d'; # ODBC version string: ##.##.##### +my $sql_driver_ver = do { + no warnings; + sprintf $sql_ver_fmt, split (/./, $DBD::mysql::VERSION); +}; + +my @Keywords = qw( + +BIGINT +BLOB +DEFAULT +KEYS +LIMIT +LONGBLOB +MEDIMUMBLOB +MEDIUMINT +MEDIUMTEXT +PROCEDURE +REGEXP +RLIKE +SHOW +TABLES +TINYBLOB +TINYTEXT +UNIQUE +UNSIGNED +ZEROFILL +); + + +sub sql_keywords { + + return join ',', @Keywords; + +} + + + +sub sql_data_source_name { + my $dbh = shift; + return "dbi:$sql_driver:" . $dbh->{Name}; +} + +sub sql_user_name { + my $dbh = shift; + # Non-standard attribute + return $dbh->{CURRENT_USER}; +} + + +#################### +# makefunc() +# returns a ref to a sub that that calls into XS to get +# values for info types that must needs be coded in C + +sub makefunk ($) { + my $type = shift; + return sub {dbd_mysql_get_info(shift, $type)} +} + + + + +our %info = ( + 20 => 'N', # SQL_ACCESSIBLE_PROCEDURES + 19 => 'Y', # SQL_ACCESSIBLE_TABLES + 0 => 0, # SQL_ACTIVE_CONNECTIONS + 116 => 0, # SQL_ACTIVE_ENVIRONMENTS + 1 => 0, # SQL_ACTIVE_STATEMENTS + 169 => 127, # SQL_AGGREGATE_FUNCTIONS + 117 => 0, # SQL_ALTER_DOMAIN + 86 => 3, # SQL_ALTER_TABLE + 10021 => 0, # SQL_ASYNC_MODE + 120 => 2, # SQL_BATCH_ROW_COUNT + 121 => 2, # SQL_BATCH_SUPPORT + 82 => 0, # SQL_BOOKMARK_PERSISTENCE + 114 => 1, # SQL_CATALOG_LOCATION + 10003 => 'Y', # SQL_CATALOG_NAME + 41 => makefunk 41, # SQL_CATALOG_NAME_SEPARATOR + 42 => makefunk 42, # SQL_CATALOG_TERM + 92 => 29, # SQL_CATALOG_USAGE + 10004 => '', # SQL_COLLATING_SEQUENCE + 10004 => '', # SQL_COLLATION_SEQ + 87 => 'Y', # SQL_COLUMN_ALIAS + 22 => 0, # SQL_CONCAT_NULL_BEHAVIOR + 53 => 259071, # SQL_CONVERT_BIGINT + 54 => 0, # SQL_CONVERT_BINARY + 55 => 259071, # SQL_CONVERT_BIT + 56 => 259071, # SQL_CONVERT_CHAR + 57 => 259071, # SQL_CONVERT_DATE + 58 => 259071, # SQL_CONVERT_DECIMAL + 59 => 259071, # SQL_CONVERT_DOUBLE + 60 => 259071, # SQL_CONVERT_FLOAT + 48 => 0, # SQL_CONVERT_FUNCTIONS +# 173 => undef, # SQL_CONVERT_GUID + 61 => 259071, # SQL_CONVERT_INTEGER + 123 => 0, # SQL_CONVERT_INTERVAL_DAY_TIME + 124 => 0, # SQL_CONVERT_INTERVAL_YEAR_MONTH + 71 => 0, # SQL_CONVERT_LONGVARBINARY + 62 => 259071, # SQL_CONVERT_LONGVARCHAR + 63 => 259071, # SQL_CONVERT_NUMERIC + 64 => 259071, # SQL_CONVERT_REAL + 65 => 259071, # SQL_CONVERT_SMALLINT + 66 => 259071, # SQL_CONVERT_TIME + 67 => 259071, # SQL_CONVERT_TIMESTAMP + 68 => 259071, # SQL_CONVERT_TINYINT + 69 => 0, # SQL_CONVERT_VARBINARY + 70 => 259071, # SQL_CONVERT_VARCHAR + 122 => 0, # SQL_CONVERT_WCHAR + 125 => 0, # SQL_CONVERT_WLONGVARCHAR + 126 => 0, # SQL_CONVERT_WVARCHAR + 74 => 1, # SQL_CORRELATION_NAME + 127 => 0, # SQL_CREATE_ASSERTION + 128 => 0, # SQL_CREATE_CHARACTER_SET + 129 => 0, # SQL_CREATE_COLLATION + 130 => 0, # SQL_CREATE_DOMAIN + 131 => 0, # SQL_CREATE_SCHEMA + 132 => 1045, # SQL_CREATE_TABLE + 133 => 0, # SQL_CREATE_TRANSLATION + 134 => 0, # SQL_CREATE_VIEW + 23 => 2, # SQL_CURSOR_COMMIT_BEHAVIOR + 24 => 2, # SQL_CURSOR_ROLLBACK_BEHAVIOR + 10001 => 0, # SQL_CURSOR_SENSITIVITY + 2 => \&sql_data_source_name, # SQL_DATA_SOURCE_NAME + 25 => 'N', # SQL_DATA_SOURCE_READ_ONLY + 119 => 7, # SQL_DATETIME_LITERALS + 17 => 'MySQL', # SQL_DBMS_NAME + 18 => makefunk 18, # SQL_DBMS_VER + 170 => 3, # SQL_DDL_INDEX + 26 => 2, # SQL_DEFAULT_TRANSACTION_ISOLATION + 26 => 2, # SQL_DEFAULT_TXN_ISOLATION + 10002 => 'N', # SQL_DESCRIBE_PARAMETER +# 171 => undef, # SQL_DM_VER + 3 => 137076632, # SQL_DRIVER_HDBC +# 135 => undef, # SQL_DRIVER_HDESC + 4 => 137076088, # SQL_DRIVER_HENV +# 76 => undef, # SQL_DRIVER_HLIB +# 5 => undef, # SQL_DRIVER_HSTMT + 6 => 'libmyodbc3.so', # SQL_DRIVER_NAME + 77 => '03.51', # SQL_DRIVER_ODBC_VER + 7 => $sql_driver_ver, # SQL_DRIVER_VER + 136 => 0, # SQL_DROP_ASSERTION + 137 => 0, # SQL_DROP_CHARACTER_SET + 138 => 0, # SQL_DROP_COLLATION + 139 => 0, # SQL_DROP_DOMAIN + 140 => 0, # SQL_DROP_SCHEMA + 141 => 7, # SQL_DROP_TABLE + 142 => 0, # SQL_DROP_TRANSLATION + 143 => 0, # SQL_DROP_VIEW + 144 => 0, # SQL_DYNAMIC_CURSOR_ATTRIBUTES1 + 145 => 0, # SQL_DYNAMIC_CURSOR_ATTRIBUTES2 + 27 => 'Y', # SQL_EXPRESSIONS_IN_ORDERBY + 8 => 63, # SQL_FETCH_DIRECTION + 84 => 0, # SQL_FILE_USAGE + 146 => 97863, # SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 + 147 => 6016, # SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 + 81 => 11, # SQL_GETDATA_EXTENSIONS + 88 => 3, # SQL_GROUP_BY + 28 => 4, # SQL_IDENTIFIER_CASE + #29 => sub {dbd_mysql_get_info(shift,$GetInfoType {SQL_IDENTIFIER_QUOTE_CHAR})}, + 29 => makefunk 29, # SQL_IDENTIFIER_QUOTE_CHAR + 148 => 0, # SQL_INDEX_KEYWORDS + 149 => 0, # SQL_INFO_SCHEMA_VIEWS + 172 => 7, # SQL_INSERT_STATEMENT + 73 => 'N', # SQL_INTEGRITY + 150 => 0, # SQL_KEYSET_CURSOR_ATTRIBUTES1 + 151 => 0, # SQL_KEYSET_CURSOR_ATTRIBUTES2 + 89 => \&sql_keywords, # SQL_KEYWORDS + 113 => 'Y', # SQL_LIKE_ESCAPE_CLAUSE + 78 => 0, # SQL_LOCK_TYPES + 34 => 64, # SQL_MAXIMUM_CATALOG_NAME_LENGTH + 97 => 0, # SQL_MAXIMUM_COLUMNS_IN_GROUP_BY + 98 => 32, # SQL_MAXIMUM_COLUMNS_IN_INDEX + 99 => 0, # SQL_MAXIMUM_COLUMNS_IN_ORDER_BY + 100 => 0, # SQL_MAXIMUM_COLUMNS_IN_SELECT + 101 => 0, # SQL_MAXIMUM_COLUMNS_IN_TABLE + 30 => 64, # SQL_MAXIMUM_COLUMN_NAME_LENGTH + 1 => 0, # SQL_MAXIMUM_CONCURRENT_ACTIVITIES + 31 => 18, # SQL_MAXIMUM_CURSOR_NAME_LENGTH + 0 => 0, # SQL_MAXIMUM_DRIVER_CONNECTIONS + 10005 => 64, # SQL_MAXIMUM_IDENTIFIER_LENGTH + 102 => 500, # SQL_MAXIMUM_INDEX_SIZE + 104 => 0, # SQL_MAXIMUM_ROW_SIZE + 32 => 0, # SQL_MAXIMUM_SCHEMA_NAME_LENGTH + 105 => makefunk 105, # SQL_MAXIMUM_STATEMENT_LENGTH +# 20000 => undef, # SQL_MAXIMUM_STMT_OCTETS +# 20001 => undef, # SQL_MAXIMUM_STMT_OCTETS_DATA +# 20002 => undef, # SQL_MAXIMUM_STMT_OCTETS_SCHEMA + 106 => makefunk 106, # SQL_MAXIMUM_TABLES_IN_SELECT + 35 => 64, # SQL_MAXIMUM_TABLE_NAME_LENGTH + 107 => 16, # SQL_MAXIMUM_USER_NAME_LENGTH + 10022 => 0, # SQL_MAX_ASYNC_CONCURRENT_STATEMENTS + 112 => 0, # SQL_MAX_BINARY_LITERAL_LEN + 34 => 64, # SQL_MAX_CATALOG_NAME_LEN + 108 => 0, # SQL_MAX_CHAR_LITERAL_LEN + 97 => 0, # SQL_MAX_COLUMNS_IN_GROUP_BY + 98 => 32, # SQL_MAX_COLUMNS_IN_INDEX + 99 => 0, # SQL_MAX_COLUMNS_IN_ORDER_BY + 100 => 0, # SQL_MAX_COLUMNS_IN_SELECT + 101 => 0, # SQL_MAX_COLUMNS_IN_TABLE + 30 => 64, # SQL_MAX_COLUMN_NAME_LEN + 1 => 0, # SQL_MAX_CONCURRENT_ACTIVITIES + 31 => 18, # SQL_MAX_CURSOR_NAME_LEN + 0 => 0, # SQL_MAX_DRIVER_CONNECTIONS + 10005 => 64, # SQL_MAX_IDENTIFIER_LEN + 102 => 500, # SQL_MAX_INDEX_SIZE + 32 => 0, # SQL_MAX_OWNER_NAME_LEN + 33 => 0, # SQL_MAX_PROCEDURE_NAME_LEN + 34 => 64, # SQL_MAX_QUALIFIER_NAME_LEN + 104 => 0, # SQL_MAX_ROW_SIZE + 103 => 'Y', # SQL_MAX_ROW_SIZE_INCLUDES_LONG + 32 => 0, # SQL_MAX_SCHEMA_NAME_LEN + 105 => 8192, # SQL_MAX_STATEMENT_LEN + 106 => 31, # SQL_MAX_TABLES_IN_SELECT + 35 => makefunk 35, # SQL_MAX_TABLE_NAME_LEN + 107 => 16, # SQL_MAX_USER_NAME_LEN + 37 => 'Y', # SQL_MULTIPLE_ACTIVE_TXN + 36 => 'Y', # SQL_MULT_RESULT_SETS + 111 => 'N', # SQL_NEED_LONG_DATA_LEN + 75 => 1, # SQL_NON_NULLABLE_COLUMNS + 85 => 2, # SQL_NULL_COLLATION + 49 => 16777215, # SQL_NUMERIC_FUNCTIONS + 9 => 1, # SQL_ODBC_API_CONFORMANCE + 152 => 2, # SQL_ODBC_INTERFACE_CONFORMANCE + 12 => 1, # SQL_ODBC_SAG_CLI_CONFORMANCE + 15 => 1, # SQL_ODBC_SQL_CONFORMANCE + 73 => 'N', # SQL_ODBC_SQL_OPT_IEF + 10 => '03.80', # SQL_ODBC_VER + 115 => 123, # SQL_OJ_CAPABILITIES + 90 => 'Y', # SQL_ORDER_BY_COLUMNS_IN_SELECT + 38 => 'Y', # SQL_OUTER_JOINS + 115 => 123, # SQL_OUTER_JOIN_CAPABILITIES + 39 => '', # SQL_OWNER_TERM + 91 => 0, # SQL_OWNER_USAGE + 153 => 2, # SQL_PARAM_ARRAY_ROW_COUNTS + 154 => 3, # SQL_PARAM_ARRAY_SELECTS + 80 => 3, # SQL_POSITIONED_STATEMENTS + 79 => 31, # SQL_POS_OPERATIONS + 21 => 'N', # SQL_PROCEDURES + 40 => '', # SQL_PROCEDURE_TERM + 114 => 1, # SQL_QUALIFIER_LOCATION + 41 => '.', # SQL_QUALIFIER_NAME_SEPARATOR + 42 => 'database', # SQL_QUALIFIER_TERM + 92 => 29, # SQL_QUALIFIER_USAGE + 93 => 3, # SQL_QUOTED_IDENTIFIER_CASE + 11 => 'N', # SQL_ROW_UPDATES + 39 => '', # SQL_SCHEMA_TERM + 91 => 0, # SQL_SCHEMA_USAGE + 43 => 7, # SQL_SCROLL_CONCURRENCY + 44 => 17, # SQL_SCROLL_OPTIONS + 14 => '\\', # SQL_SEARCH_PATTERN_ESCAPE + 13 => makefunk 13, # SQL_SERVER_NAME + 94 => 'ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜáíóúñÑ', # SQL_SPECIAL_CHARACTERS + 155 => 7, # SQL_SQL92_DATETIME_FUNCTIONS + 156 => 0, # SQL_SQL92_FOREIGN_KEY_DELETE_RULE + 157 => 0, # SQL_SQL92_FOREIGN_KEY_UPDATE_RULE + 158 => 8160, # SQL_SQL92_GRANT + 159 => 0, # SQL_SQL92_NUMERIC_VALUE_FUNCTIONS + 160 => 0, # SQL_SQL92_PREDICATES + 161 => 466, # SQL_SQL92_RELATIONAL_JOIN_OPERATORS + 162 => 32640, # SQL_SQL92_REVOKE + 163 => 7, # SQL_SQL92_ROW_VALUE_CONSTRUCTOR + 164 => 255, # SQL_SQL92_STRING_FUNCTIONS + 165 => 0, # SQL_SQL92_VALUE_EXPRESSIONS + 118 => 4, # SQL_SQL_CONFORMANCE + 166 => 2, # SQL_STANDARD_CLI_CONFORMANCE + 167 => 97863, # SQL_STATIC_CURSOR_ATTRIBUTES1 + 168 => 6016, # SQL_STATIC_CURSOR_ATTRIBUTES2 + 83 => 7, # SQL_STATIC_SENSITIVITY + 50 => 491519, # SQL_STRING_FUNCTIONS + 95 => 0, # SQL_SUBQUERIES + 51 => 7, # SQL_SYSTEM_FUNCTIONS + 45 => 'table', # SQL_TABLE_TERM + 109 => 0, # SQL_TIMEDATE_ADD_INTERVALS + 110 => 0, # SQL_TIMEDATE_DIFF_INTERVALS + 52 => 106495, # SQL_TIMEDATE_FUNCTIONS + 46 => 3, # SQL_TRANSACTION_CAPABLE + 72 => 15, # SQL_TRANSACTION_ISOLATION_OPTION + 46 => 3, # SQL_TXN_CAPABLE + 72 => 15, # SQL_TXN_ISOLATION_OPTION + 96 => 0, # SQL_UNION + 96 => 0, # SQL_UNION_STATEMENT + 47 => \&sql_user_name, # SQL_USER_NAME + 10000 => 1992, # SQL_XOPEN_CLI_YEAR +); + +1; + +__END__ diff --git a/Master/tlpkg/tlperl/lib/DBD/mysql/INSTALL.pod b/Master/tlpkg/tlperl/lib/DBD/mysql/INSTALL.pod new file mode 100755 index 00000000000..132629c7803 --- /dev/null +++ b/Master/tlpkg/tlperl/lib/DBD/mysql/INSTALL.pod @@ -0,0 +1,801 @@ +=head1 NAME + +INSTALL - How to install and configure DBD::mysql + + +=head1 SYNOPSIS + + perl Makefile.PL [options] + make + make test + make install + + +=head1 DESCRIPTION + +This document describes the installation and configuration of +DBD::mysql, the Perl DBI driver for the MySQL database. Before +reading on, make sure that you have the prerequisites available: +Perl, MySQL and DBI. For details see the separate section. +L</PREREQUISITES>. + +Depending on your version of Perl, it might be possible to +use a binary distribution of DBD::mysql. If possible, this is +recommended. Otherwise you need to install from the sources. +If so, you will definitely need a C compiler. Installation +from binaries and sources are both described in separate +sections. L<BINARY INSTALLATION>. L<SOURCE INSTALLATION>. + +Finally, if you encounter any problems, do not forget to +read the section on known problems. L<KNOWN PROBLEMS>. If +that doesn't help, you should look into the archive of the +mailing list B<perl@lists.mysql.com>. See +http://www.mysql.com for archive locations. And if that +still doesn't help, please post a question on this mailing +list. + + +=head1 PREREQUISITES + +=over + +=item Perl + +Preferrably a version of Perl, that comes preconfigured with +your system. For example, all Linux and FreeBSD distributions +come with Perl. For Windows, ActivePerl is recommended, see +http://www.activestate.com for details. + +=item MySQL + +You need not install the actual MySQL database server, the +client files and the devlopment files are sufficient. For +example, Fedora Core 4 Linux distribution comes with RPM files +(using YUM) B<mysql.i386> and B<mysql-server.i386> (use "yum search" +to find exact package names). These are sufficient, if the MySQL +server is located on a foreign machine. You may also create client +files by compiling from the MySQL source distribution and using + + configure --without-server + +If you are using Windows and need to compile from sources +(which is only the case if you are not using ActivePerl), +then you must ensure that the header and library files are +installed. This may require choosing a "Custom installation" +and selecting the appropriate option when running the +MySQL setup program. + +=item DBI + +DBD::mysql is a DBI driver, hence you need DBI. It is available +from the same source where you got the DBD::mysql distribution +from. + +=item C compiler + +A C compiler is only required, if you install from source. In +most cases there are binary distributions of DBD::mysql +available. However, if you need a C compiler, make sure, that +it is the same C compiler that was used for compiling Perl and +MySQL! Otherwise you will almost definitely encounter problems +because of differences in the underlying C runtime libraries. + +In the worst case, this might mean to compile Perl and MySQL +yourself. But believe me, experience shows that a lot of problems +are fixed this way. + +=item Gzip libraries + +Late versions of MySQL come with support for compression. Thus +it B<may> be required that you have install an RPM package like +libz-devel, libgz-devel or something similar. + +=back + + +=head1 BINARY INSTALLATION + +Binary installation is possible in the most cases, depending +on your system. I give some examples: + + +=head2 Windows + +ActivePerl offers a PPM archive of DBD::mysql. All you need to +do is typing + + ppm + install DBI + install DBD-mysql + +This will fetch the modules via HTTP and install them. If you +need to use a WWW proxy server, the environment variable +HTTP_proxy must be set: + + set HTTP_proxy=http://my.proxy.server:8000/ + ppm + install DBI + install DBD-mysql + +Of course you need to replace the host name C<my.proxy.server> +and the port number C<8000> with your local values. + +If the above procedure doesn't work, please upgrade to the latest +version of ActivePerl. Versions before build 623 are known to +have problems. + +PPM 3 is said to miss DBD::mysql in the repository. Thus use of +PPM 3 is discouraged, in favour of PPM 2. If you need to use +PPM 3, try + + ppm + rep add PPM2 http://ppm.activestate.com/PPMPackages/5.6plus/ + rep 2 + install DBI + install DBD-mysql + + + +=head2 Red Hat Linux + +As of version 7.1, Red Hat Linux comes with MySQL and DBD::mysql. +You need to ensure that the following RPM's are installed: + + mysql + perl-DBI + perl-DBD-MySQL + +For installation from source the following RPM's are required + + mysql-devel + libz-devel + +Optional are + + mysql-server + +=head2 Fedora Core Linux + +As of version 3, Fedora Linux comes with MySQL and DBD::mysql. +You need to ensure that the following RPM's are installed: + + mysql or mysql-server + perl-DBD-MySQL + +For installation from source the following RPM's are required + + mysql-devel + libz-devel + +Please try + + yum search mysql + +To see the exact names + +Note: (important) FC 3 comes with MySQL 3.x, and some people have +upgraded using MySQL RPMs for newer versions. If you do this, you +must re-compile you DBD::mysql because your existing DBD::mysql will be +linked against the old version of MySQL's client libs. CPAN has no way to +know or detect that you have upgraded MySQL. + +=head2 Other systems + +In the case of Linux or FreeBSD distributions it is very likely +that all you need comes with your distribution, as in the case +of Red Hat Linux. I just cannot give you names, as I am not using +these systems. + +Please let me know if you find the files in your SuSE Linux, Debian +Linux or FreeBSD distribution so that I can extend the above list. + + +=head1 SOURCE INSTALLATION + +So you need to install from sources. If you are lucky, the Perl +module C<CPAN> will do all for you, thanks to the excellent work +of Andreas Koenig. Otherwise you will need to do a manual +installation. Some of you, in particular system administrators +of multiple sites, will choose automatic installation. All of +these installation types have an own section. L</CPAN installation>. +L</Manual installation>. L</Configuration>. + +The DBD::mysql Makefile.PL needs to know where to find your MySQL +installation. This may be achieved using command line switches +(see L</Configuration>) or automatically using the mysql_config binary +which comes with most MySQL distributions. If your MySQL distribution +contains mysql_config the easiest method is to ensure this binary +is on your path. + +e.g. + + PATH=$PATH:/usr/local/mysql/bin + export PATH + + +=head2 CPAN installation + +Installation of DBD::mysql can be incredibly easy: + + cpan + install DBD::mysql + +If you are using the CPAN module for the first time, just answer +the questions by accepting the defaults which are fine in most +cases. If you are using an older version of Perl, you might +instead need a + + perl -MCPAN -e shell + install DBD::mysql + +If you cannot get the CPAN module working, you might try manual +installation. If installation with CPAN fails because the your local +settings have been guessed wrong, you need to ensure MySQL's +mysql_config is on your path (see L</SOURCE INSTALLATION>) or +alternatively create a script called C<mysql_config>. This is +described in more details later. L</Configuration>. + + +=head2 Manual installation + +For a manual installation you need to fetch the DBD::mysql +source distribution. The latest version is always available +from + + http://www.cpan.org/modules/by-module/DBD/ + +The name is typically something like + + DBD-mysql-1.2216.tar.gz + +The archive needs to be extracted. On Windows you may use a tool +like WinZip, on Unix you type + + gzip -cd DBD-mysql-1.2216.tar.gz | tar xf - + +This will create a subdirectory DBD-mysql-1.2216. Enter this +subdirectory and type + + perl Makefile.PL + make + make test + +(On Windows you may need to replace "make" with "nmake" or +"dmake".) If the tests seem to look fine, you may continue with + + make install + +If the compilation (make) or tests fail, you might need to +configure some settings. + +For example you might choose a different database, the C +compiler or the linker might need some flags. L</Configuration>. +L</Compiler flags>. L</Linker flags>. + +For Windows/CygWin there is a special section below. +L<Windows/CygWin>. + + +=head2 Configuration + +The install script "Makefile.PL" can be configured via a lot of +switches. All switches can be used on the command line. For +example, the test database: + + perl Makefile.PL --testdb=<db> + +If you do not like configuring these switches on the command +line, you may alternatively create a script called C<mysql_config>. +This is described later on. + +Available switches are: + +=over + +=item testdb + +Name of the test database, defaults to B<test>. + +=item testuser + +Name of the test user, defaults to empty. If the name is empty, +then the currently logged in users name will be used. + +=item testpassword + +Password of the test user, defaults to empty. + +=item testhost + +Host name or IP number of the test database; defaults to localhost. + +=item testport + +Port number of the test database + +=item ps-protcol=1 or 0 + +Whether to run the test suite using server prepared statements or driver +emulated prepared statemetns. ps-protocol=1 means use server prepare, +ps-protocol=0 means driver emulated. + +=item cflags + +This is a list of flags that you want to give to the C compiler. +The most important flag is the location of the MySQL header files. +For example, on Red Hat Linux the header files are in /usr/include/mysql +and you might try + + -I/usr/include/mysql + +On Windows the header files may be in C:\mysql\include and you might try + + -IC:\mysql\include + +The default flags are determined by running + + mysql_config --cflags + +More details on the C compiler flags can be found in the following +section. L</Compiler flags>. + +=item libs + +This is a list of flags that you want to give to the linker +or loader. The most important flags are the locations and names +of additional libraries. For example, on Red Hat Linux your +MySQL client libraries are in /usr/lib/mysql and you might try + + -L/usr/lib/mysql -lmysqlclient -lz + +On Windows the libraries may be in C:\mysql\lib and + + -LC:\mysql\lib -lmysqlclient + +might be a good choice. The default flags are determined by running + + mysql_config --libs + +More details on the linker flags can be found in a separate section. +L<Linker flags>. + +=back + +If a switch is not present on the command line, then the +script C<mysql_config> will be executed. This script comes +as part of the MySQL distribution. For example, to determine +the C compiler flags, we are executing + + mysql_config --cflags + mysql_config --libs + +If you want to configure your own settings for database name, +database user and so on, then you have to create a script with +the same name, that replies + + +=head2 Compiler flags + +Note: the folling info about compiler and linker flags, you shouldn't have +to use these options because Makefile.PL is pretty good at utilising +mysql_config to get the flags that you need for a successful compile. + +It is typically not so difficult to determine the appropriate +flags for the C compiler. The linker flags, which you find in +the next section, are another story. + +The determination of the C compiler flags is usually left to +a configuration script called F<mysql_config>, which can be +invoked with + + mysql_config --cflags + +When doing so, it will emit a line with suggested C compiler +flags, for example like this: + + -L/usr/include/mysql + +The C compiler must find some header files. Header files have +the extension C<.h>. MySQL header files are, for example, +F<mysql.h> and F<mysql_version.h>. In most cases the header +files are not installed by default. For example, on Windows +it is an installation option of the MySQL setup program +(Custom installation), whether the header files are installed +or not. On Red Hat Linux, you need to install an RPM archive +F<mysql-devel> or F<MySQL-devel>. + +If you know the location of the header files, then you will +need to add an option + + -L<header directory> + +to the C compiler flags, for example C<-L/usr/include/mysql>. + + +=head2 Linker flags + +Appropriate linker flags are the most common source of problems +while installing DBD::mysql. I will only give a rough overview, +you'll find more details in the troubleshooting section. +L<KNOWN PROBLEMS> + +The determination of the C compiler flags is usually left to +a configuration script called F<mysql_config>, which can be +invoked with + + mysql_config --libs + +When doing so, it will emit a line with suggested C compiler +flags, for example like this: + + -L'/usr/lib/mysql' -lmysqlclient -lnsl -lm -lz -lcrypt + +The following items typically need to be configured for the +linker: + +=over + +=item The mysqlclient library + +The MySQL client library comes as part of the MySQL distribution. +Depending on your system it may be a file called + + F<libmysqlclient.a> statically linked library, Unix + F<libmysqlclient.so> dynamically linked library, Unix + F<mysqlclient.lib> statically linked library, Windows + F<mysqlclient.dll> dynamically linked library, Windows + +or something similar. + +As in the case of the header files, the client library is typically +not installed by default. On Windows you will need to select them +while running the MySQL setup program (Custom installation). On +Red Hat Linux an RPM archive F<mysql-devel> or F<MySQL-devel> must +be installed. + +The linker needs to know the location and name of the mysqlclient +library. This can be done by adding the flags + + -L<lib directory> -lmysqlclient + +or by adding the complete path name. Examples: + + -L/usr/lib/mysql -lmysqlclient + -LC:\mysql\lib -lmysqlclient + +If you would like to use the static libraries (and there are +excellent reasons to do so), you need to create a separate +directory, copy the static libraries to that place and use +the -L switch above to point to your new directory. For example: + + mkdir /tmp/mysql-static + cp /usr/lib/mysql/*.a /tmp/mysql-static + perl Makefile.PL --libs="-L/tmp/mysql-static -lmysqlclient" + make + make test + make install + rm -rf /tmp/mysql-static + + +=item The gzip library + +The MySQL client can use compression when talking to the MySQL +server, a nice feature when sending or receiving large texts over +a slow network. + +On Unix you typically find the appropriate file name by running + + ldconfig -p | grep libz + ldconfig -p | grep libgz + +Once you know the name (libz.a or libgz.a is best), just add it +to the list of linker flags. If this seems to be causing problem +you may also try to link without gzip libraries. + +=back + + +=head1 SPECIAL SYSTEMS + +Below you find information on particular systems: + + +=head2 Windows/CygWin + +If you are a user of Cygwin (the Redhat distribution) you already +know, it contains a nicely running perl 5.6.1, installation of +additional modules usually works as a charme via the standard +procedure of + + perl makefile.PL + make + make test + make install + +The Windows binary distribution of MySQL runs smoothly under Cygwin. +You can start/stop the server and use all Windows clients without problem. +But to install DBD::mysql you have to take a little special action. + +Don't attempt to build DBD::mysql against either the MySQL Windows or +Linux/Unix BINARY distributions: neither will work! + +You MUST compile the MySQL clients yourself under Cygwin, to get a +'libmysqlclient.a' compiled under Cygwin. Really! You'll only need +that library and the header files, you don't need any other client parts. +Continue to use the Windows binaries. And don't attempt (currently) to +build the MySQL Server part, it is unneccessary, as MySQL AB does an +excellent job to deliver optimized binaries for the mainstream +operating systems, and it is told, that the server compiled under Cygwin is +unstable. + +Install MySQL (if you havn't already) + +=over + +=item - + +download the MySQL Windows Binaries from +http://www.mysql.com/downloads/index.html + +=item - + +unzip mysql-<version>-win.zip into some temporary location + +=item - + +start the setup.exe there and follow the instructions + +=item - + +start the server + +=item - + +alternatively download, install and start the server on a remote +server, on what supported OS ever + +=back + + +Build MySQL clients under Cygwin: + +=over + +=item - + +download the MySQL LINUX source from +http://www.mysql.com/downloads/index.html + +=item - + +unpack mysql-<version>.tar.gz into some tmp location + +=item - + +cd into the unpacked dir mysql-<version> + + ./configure --prefix=/usr/local/mysql --without-server + +This prepares the Makefile with the installed Cygwin features. It +takes some time, but should finish without error. The 'prefix', as +given, installs the whole Cygwin/MySQL thingy into a location not +normally in your PATH, so that you continue to use already installed +Windows binaries. The --without-server parameter tells configure to +only build the clients. + +=item - + + make + +This builds all MySQL client parts ... be patient. It should finish +finally without any error. + +=item - + + make install + +This installs the compiled client files under /usr/local/mysql/. +Remember, you don't need anything except the library under +/usr/local/mysql/lib and the headers under /usr/local/mysql/include! + +Essentially you are now done with this part. If you want, you may try +your compiled binaries shortly; for that, do: + +=item - + + cd /usr/local/mysql/bin + ./mysql -h 127.0.0.1 + +The host (-h) parameter 127.0.0.1 targets the local host, but forces +the mysql client to use a TCP/IP connection. The default would be a +pipe/socket connection (even if you say '-h localhost') and this +doesn't work between Cygwin and Windows (as far as I know). + +If you have your MySQL server running on some other box, then please +substitute '127.0.0.1' with the name or IP-number of that box. + +=back + +Please note, in my environment the 'mysql' client did not accept a +simple RETURN, I had to use CTRL-RETURN to send commands +... strange, +but I didn't attempt to fix that, as we are only interested in the +built lib and headers. + +At the 'mysql>' prompt do a quick check: + + mysql> use mysql + mysql> show tables; + mysql> select * from db; + mysql> exit + +You are now ready to build DBD::mysql! + + +Build DBD::mysql: + +=over + +=item - + +download DBD-mysql-<version>.tar.gz from CPAN + +=item - + +unpack DBD-mysql-<version>.tar.gz + +=item - + +cd into unpacked dir DBD-mysql-<version> +you probably did that already, if you are reading this! + +=item - + + cp /usr/local/mysql/bin/mysql_config . + +This copies the executable script mentioned in the DBD::mysql docs +from your just built Cywin/MySQL client directory; it knows about +your Cygwin installation, especially about the right libraries to link +with. + +=item - + + perl Makefile.PL --testhost=127.0.0.1 + +The --testhost=127.0.0.1 parameter again forces a TCP/IP connection +to the MySQL server on the local host instead of a pipe/socket +connection for the 'make test' phase. + +=item - + + make + +This should run without error + +=item - + + make test + +with DBD-mysql-2.1022 or earlier you will see several errors in +dbdadmin.t, mysql.t and mysql2.t; with later versions you should not +get errors (except possibly one, indicating, that some tables could +not be dropped. I'm hunting for a solution to that problem, but have +none yet). + +=item - + + make install + +This installs DBD::mysql into the Perl hierarchy. + +=back + +Notes: + +This was tested with MySQL version 3.23.54a and DBD::mysql version +2.1022. I patched the above mentioned test scripts and sent the +patches +to the author of DBD::mysql Jochen Wiedman. + +Georg Rehfeld 15. Jan. 2003 + + +=head1 KNOWN PROBLEMS + +=over + +=item 1.) + +Some Linux distributions don't come with a gzip library by default. +Running "make" terminates with an error message like + + LD_RUN_PATH="/usr/lib/mysql:/lib:/usr/lib" gcc + -o blib/arch/auto/DBD/mysql/mysql.so -shared + -L/usr/local/lib dbdimp.o mysql.o -L/usr/lib/mysql + -lmysqlclient -lm -L/usr/lib/gcc-lib/i386-redhat-linux/2.96 + -lgcc -lz + /usr/bin/ld: cannot find -lz + collect2: ld returned 1 exit status + make: *** [blib/arch/auto/DBD/mysql/mysql.so] Error 1 + +If this is the case for you, install an RPM archive like +libz-devel, libgz-devel, zlib-devel or gzlib-devel or something +similar. + +=item 2.) + +If Perl was compiled with gcc or egcs, but MySQL was compiled +with another compiler or on another system, an error message like +this is very likely when running "Make test": + + t/00base............install_driver(mysql) failed: Can't load + '../blib/arch/auto/DBD/mysql/mysql.so' for module DBD::mysql: + ../blib/arch/auto/DBD/mysql/mysql.so: undefined symbol: _umoddi3 + at /usr/local/perl-5.005/lib/5.005/i586-linux-thread/DynaLoader.pm + line 168. + +This means, that your linker doesn't include libgcc.a. You have +the following options: + +The solution is telling the linker to use libgcc. Run + + gcc --print-libgcc-file + +to determine the exact location of libgcc.a or for older versions +of gcc + + gcc -v + +to determine the directory. If you know the directory, add a + + -L<directory> -lgcc + +to the list of C compiler flags. L</Configuration>. L</Linker flags>. + +=item 3.) + +There are known problems with shared versions of libmysqlclient, +at least on some Linux boxes. If you receive an error message +similar to + + install_driver(mysql) failed: Can't load + '/usr/lib/perl5/site_perl/i586-linux/auto/DBD/mysql/mysql.so' + for module DBD::mysql: File not found at + /usr/lib/perl5/i586-linux/5.00404/DynaLoader.pm line 166 + +then this error message can be misleading: It's not mysql.so +that fails being loaded, but libmysqlclient.so! The usual +problem is that this file is located in a directory like + + /usr/lib/mysql + +where the linker doesn't look for it. + +The best workaround is using a statically linked mysqlclient +library, for example + + /usr/lib/mysql/libmysqlclient.a + +The use of a statically linked library is described in the +previous section on linker flags. L</Configuration>. +L</Linker flags>. + +=item 4.) + +Red Hat 8 & 9 set the Default locale to UTF which causes problems with +MakeMaker. To build DBD::mysql on these systems, do a 'unset LANG' +before runing 'perl Makefile.PL' + +=back + + +=head1 SUPPORT + +Finally, if everything else fails, you are not alone. First of +all, for an immediate answer, you should look into the archives +of the mailing list B<perl@lists.mysql.com>. See +http://www.mysql.com for archive locations. + +If you don't find an appropriate posting and reply in the +mailing list, please post a question. Typically a reply will +be seen within one or two days. |