dbix-easy-perl-0.21.orig/0000755000000000000000000000000012776163707012126 5ustar dbix-easy-perl-0.21.orig/Easy/0000755000000000000000000000000012776163707013027 5ustar dbix-easy-perl-0.21.orig/Easy/Import.pm0000644000000000000000000003023612271755224014632 0ustar # Import.pm - Easy to Use DBI import interface # Copyright (C) 2004-2012 Stefan Hornburg (Racke) # Authors: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. package DBIx::Easy::Import; use strict; use vars qw($VERSION @ISA @EXPORT @EXPORT_OK); require Exporter; @ISA = qw(Exporter); # Items to export into callers namespace by default. Note: do not export # names by default without a very good reason. Use EXPORT_OK instead. # Do not simply export all your public functions/methods/constants. @EXPORT = qw( ); # Public variables $VERSION = '0.19'; use DBI; use DBIx::Easy; sub new { my $proto = shift; my $class = ref($proto) || $proto; my $self = {driver => shift, database => shift}; bless ($self, $class); } sub update { my ($self, %params) = @_; $self->_do_import(%params); } sub initialize { my ($self, $file, $format) = @_; my ($sep_char); $format = uc($format); if ($format =~ /^CSV/) { $format = 'CSV'; if ($') { $sep_char = $'; $sep_char =~ s/^\s+//; $sep_char =~ s/\s+$//; } eval { require Text::CSV_XS; }; if ($@) { die "$0: couldn't load module Text::CSV_XS\n"; } $self->{func} = \&get_columns_csv; $self->{parser} = new Text::CSV_XS ({'binary' => 1, 'sep_char' => $sep_char}); } elsif ($format eq 'XLS') { eval { require Spreadsheet::ParseExcel; }; if ($@) { die "$0: couldn't load module Spreadsheet::ParseExcel\n"; } $self->{parser} = new Spreadsheet::ParseExcel; } elsif ($format eq 'TAB') { $self->{func} = \&get_columns_tab; } else { die qq{$0: unknown format "$format"}, "\n"; } if ($file) { # read input from file require IO::File; $self->{fd_input} = new IO::File; $self->{fd_input}->open($file) || die "$0: couldn't open $file: $!\n"; } else { # read input from standard input require IO::Handle; $self->{fd_input} = new IO::Handle; $self->{fd_input}->fdopen(fileno(STDIN),'r'); } } sub _do_import { my ($self, %params) = @_; my ($format, $sep_char, %colmap, %hcolmap); $self->{colflag} = 1; $self->initialize($params{file}, $params{'format'}); my @columns; if ($params{'columns'}) { $self->{colflag} = ! ($params{'columns'} =~ s/\s*[\!^]//); # setup positive/negative list for columns for (split(/\s*,\s*/, $params{'columns'})) { $self->{usecol}->{$_} = $self->{colflag}; } } if (ref($params{'map'}) eq 'HASH') { %colmap = %{$params{'map'}}; } elsif ($params{'map'}) { # parse column name mapping my ($head, $name); foreach (split (/;/, $params{'map'})) { ($head, $name) = split /=/; $colmap{$head} = $name; } } if (1) { my %hcolmap; my @columns; if ($self->{func}->($self, \@columns) <= 0) { die "$0: couldn't find headline\n"; } if ($params{'map_filter'} eq 'lc') { @columns = map {lc($_)} @columns; } # remove whitespace from column names and mark them map {s/^\s+//; s/\s+$//; $hcolmap{$_} = 1;} @columns; if ($params{'map'}) { my @newcolumns; # filter column names foreach (@columns) { if (exists $colmap{$_}) { push (@newcolumns, $colmap{$_}); $hcolmap{$colmap{$_}} = 1; } else { push (@newcolumns, $_); } } @columns = @newcolumns; } # add any other columns explicitly selected for (sort (keys %{$self->{usecol}})) { next if $hcolmap{$_}; next unless exists $self->{usecol}->{$_}; next unless $self->{usecol}->{$_}; push (@columns, $_); } $self->{fieldmap}->{$params{table}} = \@columns; } # database access my $dbif = $self->{dbif} = new DBIx::Easy ($self->{driver} || $params{driver}, $self->{database} || $params{database}); # determine column names my @names = $self->column_names ($params{table}); my $fieldnames = \@names; my @values; while ($self->{func}->($self, \@columns)) { my (@data); @values = @columns; # sanity checks on input data my $typeref = $dbif -> typemap ($params{table}); my $sizeref = $dbif -> sizemap ($params{table}); for (my $i = 0; $i <= $#$fieldnames; $i++) { # check for column exclusion if (keys %{$self->{usecol}}) { # note: we do not check the actual value !! if ($self->{colflag} && ! exists $self->{usecol}->{$$fieldnames[$i]}) { next; } if (! $self->{colflag} && exists $self->{usecol}->{$$fieldnames[$i]}) { next; } } # expand newlines and tabulators if (defined $values[$i]) { $values[$i] =~ s/\\n/\n/g; $values[$i] =~ s/\\t/\t/g; } # check if input exceeds column capacity unless (exists $$typeref{$$fieldnames[$i]}) { warn ("$0: No type information for column $$fieldnames[$i] found\n"); next; } unless (exists $$sizeref{$$fieldnames[$i]}) { warn ("$0: No size information for column $$fieldnames[$i] found\n"); next; } if ($$typeref{$$fieldnames[$i]} == DBI::SQL_CHAR) { if (defined $values[$i]) { if (length($values[$i]) > $$sizeref{$$fieldnames[$i]}) { warn ("$0: Data for field $$fieldnames[$i] truncated: $values[$i]\n"); $values[$i] = substr($values[$i], 0, $$sizeref{$$fieldnames[$i]}); } } else { # avoid insertion of NULL values $values[$i] = ''; } } elsif ($$typeref{$$fieldnames[$i]} == DBI::SQL_VARCHAR) { if (defined $values[$i]) { if (length($values[$i]) > $$sizeref{$$fieldnames[$i]}) { warn ("$0: Data for field $$fieldnames[$i] truncated: $values[$i]\n"); $values[$i] = substr($values[$i], 0, $$sizeref{$$fieldnames[$i]}); } } else { # avoid insertion of NULL values $values[$i] = ''; } } # push (@data, $$fieldnames[$i], $values[$i]); } # check if record exists my %keymap = $self->key_names ($params{table}, $params{'keys'} || 1, 1); my @keys = (keys(%keymap)); my @terms = map {$_ . ' = ' . $dbif->quote($values[$keymap{$_}])} (@keys); my $sth = $dbif -> process ('SELECT ' . join(', ', @keys) . " FROM $params{table} WHERE " . join (' AND ', @terms)); while ($sth -> fetch) {} if ($sth -> rows () > 1) { $" = ', '; die ("$0: duplicate key(s) @keys in table $params{table}\n"); } my $update = $sth -> rows (); $sth -> finish (); # generate SQL statement for (my $i = 0; $i <= $#$fieldnames; $i++) { # check for column exclusion if (keys %{$self->{usecol}}) { # note: we do not check the actual value !! if ($self->{colflag} && ! exists $self->{usecol}->{$$fieldnames[$i]}) { next; } if (! $self->{colflag} && exists $self->{usecol}->{$$fieldnames[$i]}) { next; } } # expand newlines if (defined $values[$i]) { $values[$i] =~ s/\\n/\n/g; } push (@data, $$fieldnames[$i], $values[$i]); } if ($update) { $dbif -> update ($params{table}, join (' AND ', @terms), @data); } else { if ($params{'update_only'}) { $" = ', '; die ("$0: key(s) @keys not found\n"); } $dbif -> insert ($params{table}, @data); } } } # ------------------------------------------------- # FUNCTION: column_names DBIF TABLE [START] # # Returns array with column names from table TABLE # using database connection DBIF. # Optional parameter START specifies column where # the array should start with. # ------------------------------------------------- sub column_names { my ($self, $table, $start) = @_; my ($names, $sth); $start = 0 unless $start; if (exists $self->{fieldmap}->{$table}) { $names = $self->{fieldmap}->{$table}; } else { $sth = $self->{dbif}-> process ("SELECT * FROM $table WHERE 0 = 1"); $names = $self->{fieldmap}->{$table} = $sth -> {NAME}; $sth -> finish (); } @$names[$start .. $#$names]; } # -------------------------------------------------- # FUNCTION: key_names DBIF TABLE KEYSPEC [HASH] # # Returns array with key names for table TABLE. # Database connection DBIF may be used to # retrieve necessary information. # KEYSPEC contains desired keys, either a numeric # value or a comma-separated list of keys. # If HASH is set, a mapping between key name # and position is returned. # -------------------------------------------------- sub key_names () { my ($self, $table, $keyspec, $hash) = @_; my ($numkeysleft, $i); my @columns = $self->column_names ($table); my (@keys, %kmap); $keyspec =~ s/^\s+//; $keyspec =~ s/\s+$//; if ($keyspec =~ /^\d+$/) { # # passed keys are numeric, figure out the names # $numkeysleft = $keyspec; for ($i = 0; $i < $numkeysleft && $i < @columns; $i++) { if (keys %{$self->{usecol}}) { # note: we do not check the actual value !! if ($self->{colflag} && ! exists $self->{usecol}->{$columns[$i]}) { $numkeysleft++; next; } if (! $self->{colflag} && exists $self->{usecol}->{$columns[$i]}) { $numkeysleft++; next; } } if ($hash) { $kmap{$columns[$i]} = $i; } else { push (@keys, $columns[$i]); } } } else { # # key names are passed explicitly # my %colmap; for ($i = 0; $i < @columns; $i++) { $colmap{$columns[$i]} = $i; } for (split (/\s*,\s*/, $keyspec)) { # sanity check unless (exists $colmap{$_}) { die "$0: key \"$_\" appears not in column list\n"; } if ($hash) { $kmap{$_} = $colmap{$_}; } else { push (@keys, $_); } } } return $hash ? %kmap : @keys; } # FUNCTION: get_columns_csv IREF FD COLREF sub get_columns_csv { my ($self, $colref) = @_; my $line; my $msg; my $fd = $self->{fd_input}; while (defined ($line = <$fd>)) { if ($self->{parser}->parse($line)) { # csv line completed, delete buffer @$colref = $self->{parser}->fields(); $self->{buffer} = ''; return @$colref; } else { if (($line =~ tr/"/"/) % 2) { # odd number of quotes, try again with next line $self->{buffer} = $line; } else { $msg = "$0: $.: line not in CSV format: " . $self->{parser}->error_input() . "\n"; die ($msg); } } } } # ---------------------------------------- # FUNCTION: get_columns_tab IREF FD COLREF # # Get columns from a tab separated file. # ---------------------------------------- sub get_columns_tab { my ($self, $colref) = @_; my $line; my $fd = $self->{fd_input}; while (defined ($line = <$fd>)) { # skip empty/blank/comment lines next if $line =~ /^\#/; next if $line =~ /^\s*$/; # remove newlines and carriage returns chomp ($line); $line =~ s/\r$//; @$colref = split (/\t/, $line); return @$colref; } } # ---------------------------------------- # FUNCTION: get_columns_xls IREF FD COLREF # # Get columns from a XLS spreadsheet. # ---------------------------------------- sub get_columns_xls { my ($iref, $fd, $colref) = @_; unless ($iref->{workbook}) { # parse the spreadsheet once $iref->{workbook} = $iref->{object}->Parse($fd); unless ($iref->{workbook}) { die "$0: couldn't parse spreadsheet\n"; } $iref->{worksheet} = $iref->{workbook}->{Worksheet}[0]; $iref->{row} = 0; } if ($iref->{row} <= $iref->{worksheet}->{MaxRow}) { @$colref = map {$_->Value()} @{$iref->{worksheet}->{Cells}[$iref->{row}++]}; return @$colref; } } sub get_columns { my ($self, $colref) = @_; return $self->{func}->($self, $colref); } 1; dbix-easy-perl-0.21.orig/Makefile.PL0000644000000000000000000000137012271755224014070 0ustar use ExtUtils::MakeMaker; # See lib/ExtUtils/MakeMaker.pm for details of how to influence # the contents of the Makefile that is written. # List here scripts that should be installed my @scripts = ("./scripts/dbs_empty", "./scripts/dbs_update", "./scripts/dbs_printtab", "./scripts/dbs_dumptabdata", "./scripts/dbs_dumptabstruct"); WriteMakefile( 'NAME' => 'DBIx::Easy', AUTHOR => q{Stefan Hornburg (Racke) }, 'VERSION_FROM' => 'Easy.pm', # finds $VERSION 'ABSTRACT_FROM' => 'Easy.pm', 'INSTALLMAN3DIR' => '/usr/share/man/man3', 'INSTALLHTMLSCRIPTDIR' => '/usr/share/doc', 'INSTALLHTMLSITELIBDIR' => '/usr/share/doc', 'EXE_FILES' => \@scripts, ); dbix-easy-perl-0.21.orig/Easy.pm0000644000000000000000000011151412271756372013364 0ustar # Easy.pm - Easy to Use DBI interface # Copyright (C) 1999-2002 Stefan Hornburg, Dennis Schön # Copyright (C) 2003-2013 Stefan Hornburg (Racke) # Authors: Stefan Hornburg (Racke) # Dennis Schön # Maintainer: Stefan Hornburg (Racke) # Version: 0.21 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. package DBIx::Easy; use strict; use vars qw($VERSION @ISA @EXPORT @EXPORT_OK); require Exporter; @ISA = qw(Exporter); # Items to export into callers namespace by default. Note: do not export # names by default without a very good reason. Use EXPORT_OK instead. # Do not simply export all your public functions/methods/constants. @EXPORT = qw( ); # Public variables use vars qw($cache_structs); $VERSION = '0.21'; $cache_structs = 1; use DBI; =head1 NAME DBIx::Easy - Easy to Use DBI interface =head1 SYNOPSIS use DBIx::Easy; my $dbi_interface = new DBIx::Easy qw(Pg template1); $dbi_interface -> insert ('transaction', id => serial ('transaction', 'transactionid'), time => \$dbi_interface -> now); $dbi_interface -> update ('components', "table='ram'", price => 100); $rows_deleted = $dbi_interface -> delete ('components', 'stock = 0'); $dbi_interface -> makemap ('components', 'id', 'price', 'price > 10'); $components = $dbi_interface -> rows ('components'); $components_needed = $dbi_interface -> rows ('components', 'stock = 0'); =head1 DESCRIPTION DBIx::Easy is an easy to use DBI interface. Currently the Pg, mSQL, mysql, Sybase, ODBC and XBase drivers are supported. =head1 CREATING A NEW DBI INTERFACE OBJECT $dbi_interface = new DBIx::Easy qw(Pg template1); $dbi_interface = new DBIx::Easy qw(Pg template1 racke); $dbi_interface = new DBIx::Easy qw(Pg template1 racke aF3xD4_i); $dbi_interface = new DBIx::Easy qw(Pg template1 racke@linuxia.de aF3xD4_i); $dbi_interface = new DBIx::Easy qw(Pg template1 racke@linuxia.de:3306 aF3xD4_i); The required parameters are the database driver and the database name. Additional parameters are the database user and the password to access the database. To specify the database host use the USER@HOST notation for the user parameter. If you want to specify the port to connect to use USER@HOST:PORT. =head1 DESTROYING A DBI INTERFACE OBJECT It is important that you commit all changes at the end of the interaction with the DBMS. You can either explicitly commit $dbi_interface -> commit (); or do it implicitly: undef $dbi_interface; =head1 ERROR HANDLING sub fatal { my ($statement, $err, $msg) = @_; die ("$0: Statement \"$statement\" failed (ERRNO: $err, ERRMSG: $msg)\n"); } $dbi_interface -> install_handler (\&fatal); If any of the DBI methods fails, either I will be invoked or an error handler installed with I will be called. =head1 CACHING ISSUES By default, this module caches table structures. This can be disabled by setting I<$DBIx::Easy::cache_structs> to 0. =head1 XBASE DRIVER The DBIx::Easy method rows fails to work with the DBD::XBase driver. =cut # Private Variables # ================= my $maintainer_adr = 'racke@linuxia.de'; # Keywords for connect() my %kwmap = (mSQL => 'database', mysql => 'database', Pg => 'dbname', Sybase => 'database', ODBC => '', XBase => ''); my %kwhostmap = (mSQL => 'host', mysql => 'host', Pg => 'host', Sybase => 'server', ODBC => '', XBase => ''); my %kwportmap = (mysql => 'port', Pg => 'port'); my %kwutf8map = (mysql => 'mysql_enable_utf8', Pg => 'pg_enable_utf8', SQLite => 'sqlite_unicode', Sybase => 'syb_enable_utf8'); # Whether the DBMS supports transactions my %transactmap = (mSQL => 0, mysql => 0, Pg => 1, Sybase => 'server', ODBC => 0, XBase => 0); # Statement generators for serial() my %serialstatmap = (mSQL => sub {"SELECT _seq FROM $_[0]";}, Pg => sub {"SELECT NEXTVAL ('$_[1]')";}); # Statement for obtaining the table structure my %obtstatmap = (mSQL => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE 0 = 1";}, mysql => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE 0 = 1";}, Pg => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE FALSE";}, Sybase => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE 0 = 1";}, ODBC => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE 0 = 1";}, XBase => sub {my $table = shift; "SELECT " . join (', ', @_) . " FROM $table WHERE 0 = 1";}); # Supported functions my %funcmap = (mSQL => {COUNT => 0}, mysql => {COUNT => 1}, Pg => {COUNT => 1}, Sybase => {COUNT => 1}, ODBC => {COUNT => 0}); # Cache my %structs; # Preloaded methods go here. sub new { my $proto = shift; my $class = ref ($proto) || $proto; my $self = {}; $self ->{DRIVER} = shift; $self ->{DATABASE} = shift; $self ->{USER} = shift; # check for a host part if (defined $self->{USER} && $self->{USER} =~ /@/) { $self->{HOST} = $'; $self->{USER} = $`; } if (defined $self->{HOST} && $self->{HOST} =~ /:/) { $self->{PORT} = $'; $self->{HOST} = $`; } $self ->{PASS} = shift; $self ->{CONN} = undef; $self ->{HANDLER} = undef; # error handler bless ($self, $class); # sanity check: driver unless (defined ($self -> {DRIVER}) && $self->{DRIVER} =~ /\S/) { $self -> fatal ("No driver selected for $class."); } unless (exists $kwmap{$self -> {DRIVER}}) { $self -> fatal ("Sorry, $class doesn't support the \"" . $self -> {DRIVER} . "\" driver.\n" . "Please send mail to $maintainer_adr for more information.\n"); } # sanity check: database name unless (defined ($self -> {DATABASE}) && $self->{DATABASE} =~ /\S/) { # ok for sybase with host unless ($self->{DRIVER} eq 'Sybase' && $self->{HOST}) { $self -> fatal ("No database selected for $class."); } } return $self if $^O eq 'MSWin32'; # we may try to get password from DBMS specific # configuration file unless (defined $self->{PASS}) { unless (defined $self->{'USER'} && $self->{'USER'} ne getpwuid($<)) { $self->passwd(); } } return ($self); } # ------------------------------------------------------ # DESTRUCTOR # # If called for an object with established connection we # commit any changes. # ------------------------------------------------------ sub DESTROY { my $self = shift; if (defined ($self -> {CONN})) { unless ($self -> {CONN} -> {AutoCommit}) { $self -> {CONN} -> commit; } $self -> {CONN} -> disconnect; } } # ------------------------------ # METHOD: fatal # # Error handler for this module. # ------------------------------ sub fatal { my ($self, $info, $err) = @_; my $errstr = ''; if (defined $self -> {CONN}) { $err = $DBI::err; $errstr = $DBI::errstr; unless ($self -> {CONN} -> {AutoCommit}) { # something has gone wrong, rollback anything $self -> {CONN} -> rollback (); } } if (defined $self -> {'HANDLER'}) { &{$self -> {'HANDLER'}} ($info, $err, $errstr); } elsif (defined $self -> {CONN}) { die "$info (DBERR: $err, DBMSG: $errstr)\n"; } elsif ($err) { die "$info ($err)\n"; } else { die "$info\n"; } } # --------------------------------------------------------------- # METHOD: connect # # Establishes the connection to the database if not already done. # Returns database handle if successful, dies otherwise. # --------------------------------------------------------------- sub connect () { my $self = shift; my ($dsn, $oldwarn, %dbi_params); my $msg = ''; unless (defined $self -> {CONN}) { # build the data source string for DBI # ... the driver name $dsn .= 'dbi:' . $self -> {DRIVER} . ':'; # ... optionally the var part (ODBC has no vars) if ($kwmap{$self -> {DRIVER}}) { $dsn .= $kwmap{$self -> {DRIVER}} . "="; } # ... database name $dsn .= $self -> {DATABASE}; # ... optionally the host part if ($self -> {HOST}) { $dsn .= ';' . $kwhostmap{$self->{DRIVER}} . '=' . $self -> {HOST}; } # ... optionally the host part if ($self -> {PORT}) { if ($self->{PORT} =~ m%/% && $self->{DRIVER} eq 'mysql') { # got socket passed as port $dsn .= ';' . 'mysql_socket' . '=' . $self -> {PORT}; } else { $dsn .= ';' . $kwportmap{$self->{DRIVER}} . '=' . $self -> {PORT}; } } # install warn() handler to catch DBI error messages $oldwarn = $SIG{__WARN__}; $SIG{__WARN__} = sub {$msg = "@_";}; $dbi_params{AutoCommit} = !$transactmap{$self->{DRIVER}}; if (exists $kwutf8map{$self->{DRIVER}}) { $dbi_params{$kwutf8map{$self->{DRIVER}}} = 1; } $self->{CONN} = DBI->connect ($dsn, $self -> {USER}, $self -> {PASS}, \%dbi_params, ); # deinstall warn() handler $SIG{__WARN__} = $oldwarn; unless (defined $self -> {CONN}) { # remove file/line information from error message $msg =~ s/\s+at .*?line \d+\s*$//; # print error message in any case $self -> fatal ("Connection to database \"" . $self -> {DATABASE} . "\" couldn't be established", $msg); return; } } # no need to see SQL errors twice $self -> {CONN} -> {'PrintError'} = 0; $self -> {CONN}; } # ------------------------- # METHOD: process STATEMENT # ------------------------- =head1 METHODS =head2 DATABASE ACCESS =over 4 =item process I $sth = $dbi_interface -> process ("SELECT * FROM foo"); print "Table foo contains ", $sth -> rows, " rows.\n"; Processes I by just combining the I and I steps of the DBI. Returns statement handle in case of success. =back =cut sub process { my ($self, $statement) = @_; my ($sth, $rv); return unless $self -> connect (); # prepare and execute it $sth = $self -> {CONN} -> prepare ($statement) || $self -> fatal ("Couldn't prepare statement \"$statement\""); $rv = $sth -> execute () || $self -> fatal ("Couldn't execute statement \"$statement\""); $sth; } # ------------------------------------------------------ # METHOD: insert TABLE COLUMN VALUE [COLUMN VALUE] ... # # Inserts the given COLUMN/VALUE pairs into TABLE. # ------------------------------------------------------ =over 4 =item insert I I I [I I] ... $sth = $dbi_interface -> insert ('bar', drink => 'Caipirinha'); Inserts the given I/I pairs into I
. Determines from the SQL data type which values has to been quoted. Just pass a reference to the value to protect values with SQL functions from quoting. =back =cut sub insert ($$$;@) { my $self = shift; my $table = shift; my (@columns, @values); my ($statement, $sthtest, $flags); my ($column, $value); return unless $self -> connect (); while ($#_ >= 0) { $column = shift; $value = shift; push (@columns, $column); push (@values, $value); } $flags = $self->typemap($table); for (my $i = 0; $i <= $#values; $i++) { if (ref ($values[$i]) eq 'SCALAR') { $values[$i] = ${$values[$i]}; } elsif ($flags->{$columns[$i]} == DBI::SQL_INTEGER || $flags->{$columns[$i]} == DBI::SQL_SMALLINT || $flags->{$columns[$i]} == DBI::SQL_DECIMAL || $flags->{$columns[$i]} == DBI::SQL_FLOAT || $flags->{$columns[$i]} == DBI::SQL_REAL || $flags->{$columns[$i]} == DBI::SQL_DOUBLE || $flags->{$columns[$i]} == DBI::SQL_NUMERIC) { # we don't need to quote numeric values, but # we have to check for empty input unless (defined $values[$i] && $values[$i] =~ /\S/) { $values[$i] = 'NULL'; } } elsif (defined $values[$i]) { $values[$i] = $self -> quote ($values[$i]); } else { $values[$i] = 'NULL'; } } # now the statement $statement = "INSERT INTO $table (" . join (', ', @columns) . ") VALUES (" . join (', ', @values) . ")"; # process it $self -> {CONN} -> do ($statement) || $self -> fatal ("Couldn't execute statement \"$statement\""); } # --------------------------------------------------------------- # METHOD: update TABLE CONDITIONS COLUMN VALUE [COLUMN VALUE] ... # # Updates the rows matching CONDITIONS with the given # COLUMN/VALUE pairs and returns the number of the # modified rows. # --------------------------------------------------------------- =over 4 =item update I
I I I [I I] ... $dbi_interface -> update ('components', "table='ram'", price => 100); $dbi_interface -> update ('components', "table='ram'", price => \"price + 20"); Updates any row of I
which fulfill the I by inserting the given I/I pairs. Scalar references can be used to embed strings without further quoting into the resulting SQL statement. Returns the number of rows modified. =back =cut sub update { my $self = shift; my $table = shift; my $conditions = shift; my (@columns); my ($statement, $rv); my ($column, $value); # ensure that connection is established return unless $self -> connect (); while ($#_ >= 0) { $column = shift; $value = shift; # avoid Perl warning if (defined $value) { if (ref($value) eq 'SCALAR') { $value = $$value; } else { $value = $self -> {CONN} -> quote ($value); } } else { $value = 'NULL'; } push (@columns, $column . ' = ' . $value); } # now the statement $statement = "UPDATE $table SET " . join (', ', @columns) . " WHERE $conditions"; # process it $rv = $self -> {CONN} -> do ($statement); if (defined $rv) { # return the number of rows changed $rv; } else { $self -> fatal ("Couldn't execute statement \"$statement\""); } } # --------------------------------------------------------------- # METHOD: put TABLE CONDITIONS COLUMN VALUE [COLUMN VALUE] ... # # Either updates the rows matching CONDITIONS with the given # COLUMN/VALUE pairs or puts (inserts) them into TABLE. # Returns the number of modified rows (1 in case of an insert). # --------------------------------------------------------------- =over 4 =item put I
I I I [I I] ... =back =cut sub put { my $self = shift; my $table = shift; my $conditions = shift; # ensure that connection is established return unless $self -> connect (); # check for existing rows if ($self->rows($table, $conditions)) { $self->update($table, $conditions, @_); } else { $self->insert($table, @_); 1; } } # --------------------------------- # METHOD: delete TABLE [CONDITIONS] # --------------------------------- =over 4 =item delete I
I $dbi_interface -> delete ('components', "stock=0"); Deletes any row of I
which fulfill the I. Without conditions all rows are deleted. Returns the number of rows deleted. =back =cut sub delete { my ($self, $table, $conditions) = @_; my $sth; if ($conditions) { $sth = $self -> process ("delete from $table where $conditions"); } else { $sth = $self -> process ("delete from $table"); } $sth -> rows(); } # ---------------------------------------- # METHOD: do_without_transaction STATEMENT # ---------------------------------------- =over 4 =item do_without_transaction I $sth = $dbi_interface -> do_without_transaction ("CREATE DATABASE foo"); Issues a DBI do statement while forcing autocommit. This is used for statements that can't be run in transaction mode (like CREATE DATABASE in PostgreSQL). =back =cut sub do_without_transaction { my ($self, $statement) = @_; my ($rv, $autocommit); return unless $self -> connect (); $autocommit = $self -> {CONN} -> {AutoCommit}; # Force autocommit $self -> {CONN} -> {AutoCommit} = 1; $rv = $self -> {CONN} -> do ($statement); # Restore autocommit setting $self -> {CONN} -> {AutoCommit} = $autocommit; unless (defined $rv) { $self -> fatal ("Couldn't do statement \"$statement\""); } return $rv; } # ------------------------------- # METHOD: rows TABLE [CONDITIONS] # ------------------------------- =over 4 =item rows I
[I] $components = $dbi_interface -> rows ('components'); $components_needed = $dbi_interface -> rows ('components', 'stock = 0'); Returns the number of rows within I
satisfying I if any. =back =cut sub rows { my $self = shift; my ($table, $conditions) = @_; my ($sth, $aref, $rows); my $where = ''; if (defined ($conditions)) { $where = " WHERE $conditions"; } # use COUNT(*) if available if ($funcmap{$self -> {DRIVER}}->{COUNT}) { $sth = $self -> process ("SELECT COUNT(*) FROM $table$where"); $aref = $sth->fetch; $rows = $$aref[0]; } else { $sth = $self -> process ("SELECT * FROM $table$where"); $rows = $sth -> rows; } $rows; } # ----------------------------------------------- # METHOD: makemap TABLE KEYCOL VALCOL [CONDITION] # ----------------------------------------------- =over 4 =item makemap I
I I [I] $dbi_interface -> makemap ('components', 'idf', 'price'); $dbi_interface -> makemap ('components', 'idf', 'price', 'price > 10'); $dbi_interface -> makemap ('components', 'idf', '*'); $dbi_interface -> makemap ('components', 'idf', '*', 'price > 10'); Produces a mapping between the values within column I and column I from I
. If an I is given, only rows matching this I are used for the mapping. In order to get the hash reference to the record as value of the mapping, use the asterisk as the I parameter. =back =cut sub makemap { my ($self, $table, $keycol, $valcol, $condition) = @_; my ($sth, $row, %map, $sel); my $condstring = ''; # check for condition if ($condition) { $condstring = " WHERE $condition"; } if ($valcol eq '*') { # need hash reference as value $sth = $self->process("SELECT * FROM $table$condstring"); while ($row = $sth -> fetchrow_hashref) { $map{$row->{$keycol}} = $row; } } else { # need particular field as value $sth = $self -> process ("SELECT $keycol, $valcol FROM $table$condstring"); while ($row = $sth -> fetch) { $map{$$row[0]} = $$row[1]; } } \%map; } # ----------------------------------------- # METHOD: random_row TABLE CONDITIONS [MAP] # ----------------------------------------- =over 4 =item random_row I
I [I] Returns random row of the specified I
. If I is set, the result is a hash reference of the selected row, otherwise an array reference. If the table doesn't contains rows, undefined is returned. =back =cut #' sub random_row { my ($self, $table, $conditions, $map) = @_; my ($sth, $aref, $row); if ($conditions) { $sth = $self -> process ("select * from $table where $conditions"); } else { $sth = $self -> process ("select * from $table"); } cache ($table, 'NAME', $sth); $aref = $sth -> fetchall_arrayref (); if (@$aref) { $row = $aref->[int(rand(@$aref))]; if ($map) { # pass back hash reference fold ([$self->columns($table)], $row); } else { # pass back array reference $row; } } } # ------------------------------- # METHOD: serial TABLE SEQUENCE # ------------------------------- =over 4 =item serial I
I Returns a serial number for I
by querying the next value from I. Depending on the DBMS one of the parameters is ignored. This is I for mSQL resp. I
for PostgreSQL. mysql doesn't support sequences, but the AUTO_INCREMENT keyword for fields. In this case this method returns 0 and mysql generates a serial number for this field. =back =cut #' sub serial { my $self = shift; my ($table, $sequence) = @_; my ($statement, $sth, $rv, $resref); return unless $self -> connect (); return ('0') if $self->{DRIVER} eq 'mysql'; # get the appropriate statement $statement = &{$serialstatmap{$self->{DRIVER}}}; # prepare and execute it $sth = $self -> process ($statement); unless (defined ($resref = $sth -> fetch)) { $self -> fatal ("Unexpected result for statement \"$statement\""); } $$resref[0]; } # --------------------------------------------------------- # METHOD: fill STH HASHREF [FLAG COLUMN ...] # # Fetches the next table row from the result stored in STH. # --------------------------------------------------------- =over 4 =item fill I I [I I ...] Fetches the next table row from the result stored into I and records the value of each field in I. If I is set, only the fields specified by the I arguments are considered, otherwise the fields specified by the I arguments are omitted. =back =cut sub fill { my ($dbi_interface, $sth, $hashref, $flag, @columns) = @_; my ($fetchref); $fetchref = $sth -> fetchrow_hashref; if ($flag) { foreach my $col (@columns) { $$hashref{$col} = $$fetchref{$col}; } } else { foreach my $col (@columns) { delete $$fetchref{$col}; } foreach my $col (keys %$fetchref) { $$hashref{$col} = $$fetchref{$col}; } } } # ------------------------------------------------------ # METHOD: view TABLE # # Produces text representation for database table TABLE. # ------------------------------------------------------ =over 4 =item view I
[I I ...] foreach my $table (sort $dbi_interface -> tables) { print $cgi -> h2 ('Contents of ', $cgi -> code ($table)); print $dbi_interface -> view ($table); } Produces plain text representation of the database table I
. This method accepts the following options as I/I pairs: B: Which columns to display. B: Which column to sort the row after. B: Maximum number of rows to display. B: Separator inserted between the columns. B: Display only rows matching this condition. print $dbi_interface -> view ($table, order => $cgi -> param ('order') || '', where => "price > 0"); =back =cut sub view { my ($self, $table, %options) = @_; my ($view, $sth); my ($orderstr, $condstr) = ('', ''); my (@fields); unless (exists $options{'limit'}) {$options{'limit'} = 0} unless (exists $options{'separator'}) {$options{'separator'} = "\t"} # anonymous function for cells in top row # get contents of the table if ((exists ($options{'order'}) && $options{'order'})) { $orderstr = " ORDER BY $options{'order'}"; } if ((exists ($options{'where'}) && $options{'where'})) { $condstr = " WHERE $options{'where'}"; } if ((exists ($options{'columns'}) && $options{'columns'})) { $sth = $self -> process ('SELECT ' . $options{'columns'} . " FROM $table$condstr$orderstr"); } else { $sth = $self -> process ("SELECT * FROM $table$condstr$orderstr"); } my $names = $sth -> {NAME}; $view = join($options{'separator'}, map {$_} @$names) . "\n"; my ($count, $ref); while($ref = $sth->fetch) { $count++; undef @fields; for (@$ref) { if (defined $_) { s/\n/\\n/sg; s/\t/\\t/g; push (@fields, $_); } else { push (@fields, ''); } } $view .= join($options{'separator'}, @fields) . "\n"; last if $count == $options{'limit'}; } # my $rows = $sth -> rows; # $view .="($rows rows)"; $view; } =head2 DATABASE INFORMATION =over 4 =item is_table I Returns truth value if there exists a table I in this database. =back =cut sub is_table { my ($self, $name) = @_; grep {$_ eq $name} ($self->tables); } =over 4 =item tables Returns list of all tables in this database. =back =cut sub tables { my $self = shift; my @t; if ($self->{DRIVER} eq 'mysql') { my $dbname = $self->{DATABASE}; @t = map {s/^(`\Q$dbname\E`\.)?`(.*)`$/$2/; $_} ($self -> connect () -> tables ()); } else { @t = $self -> connect () -> tables (); } return @t; } =over 4 =item sequences Returns list of all sequences in this database (Postgres only). =back =cut sub sequences { my $self = shift; my (@sequences, $sth, $row); if ($self->{DRIVER} eq 'Pg') { $sth = $self -> process ("SELECT relname FROM pg_class WHERE relkind = 'S'"); while ($row = $sth -> fetch ()) { push (@sequences, $$row[0]); } } return @sequences; } # ------------------------------------------ # METHOD: columns TABLE # # Returns list of the column names of TABLE. # ------------------------------------------ =over 4 =item columns I
Returns list of the column names of I
. =back =cut sub columns { my ($self, $table) = @_; my ($sth); my (@cols); if (@cols = cache($table, 'NAME')) { return @cols; } $sth = $self -> process ("SELECT * FROM $table WHERE 0 = 1"); cache($table, 'NAME', $sth); @{$sth->{NAME}}; } # ------------------- # METHOD: types TABLE # ------------------- =over 4 =item types I
Returns list of the column types of I
. =back =cut sub types { my ($self, $table) = @_; $self->info_proc($table, 'TYPE'); } # ------------------- # METHOD: sizes TABLE # ------------------- =over 4 =item sizes I
Returns list of the column sizes of I
. =back =cut sub sizes { my ($self, $table) = @_; $self->info_proc ($table, 'PRECISION'); } # --------------------- # METHOD: typemap TABLE # --------------------- =over 4 =item typemap I
Returns mapping between column names and column types for table I
. =back =cut sub typemap { my ($self, $table) = @_; $self->info_proc ($table, 'TYPE', 1); } # --------------------- # METHOD: sizemap TABLE # --------------------- =over 4 =item sizemap I
Returns mapping between column names and column sizes for table I
. =back =cut sub sizemap { my ($self, $table) = @_; $self->info_proc ($table, 'PRECISION', 1); } # --------------------------------------------------------------- # METHOD: add_columns # # Creates columns from a representation supplied by describe_table. # --------------------------------------------------------------- sub add_columns { my ($self, $table, $repref, @columns) = @_; my (%colref, $col, $cref, $null, $line, @stmts, $cmd); for $col (@columns) { $colref{$col} = 1; } for $cref (@{$repref->{columns}}) { next unless exists $colref{$cref->{Field}}; $colref{$cref->{Field}} = $cref; } for $col (@columns) { $cref = $colref{$col}; if ($cref->{Null} ne 'YES') { $null = ' NOT NULL '; } else { $null = ''; } $line = qq{alter table $table add $cref->{Field} $cref->{Type}$null $cref->{Extra} default '$cref->{Default}'}; push (@stmts, $line); } for $cmd (@stmts) { $self->process($cmd); } } # --------------------------------------------------------------- # METHOD: create_table # # Creates table from a representation supplied by describe_table. # --------------------------------------------------------------- sub create_table { my ($self, $table, $repref) = @_; my $crtstr = ''; my (@stmts, $line, $null, %icols, $colstr); for my $cref (@{$repref->{columns}}) { if ($cref->{Null} ne 'YES') { $null = ' NOT NULL '; } else { $null = ''; } $line = qq{$cref->{Field} $cref->{Type}$null $cref->{Extra} default '$cref->{Default}'}; push (@stmts, $line); } for my $cref (@{$repref->{indices}}) { push (@{$icols{$cref->{Key_name}}}, $cref->{Column_name}); } for my $cref (@{$repref->{indices}}) { next unless exists $icols{$cref->{Key_name}}; $colstr = join(', ', @{$icols{$cref->{Key_name}}}); if ($cref->{Key_name} eq 'PRIMARY') { $line = qq{PRIMARY KEY ($colstr)}; } else { $line = qq{KEY $cref->{Key_name} ($colstr)}; } push (@stmts, $line); delete $icols{$cref->{Key_name}}; } $crtstr = "create table $table (\n" . join(",\n", @stmts) . ")"; $self->process($crtstr); } # ------------------------------------------ # METHOD: describe_table # # Returns representation of the given table. # ------------------------------------------ sub describe_table { my ($self, $table) = @_; my ($sth, $href); my %rep = (columns => [], indices => []); $sth = $self->process("show columns from $table"); while (my $href = $sth->fetchrow_hashref()) { push (@{$rep{columns}}, $href); } $sth->finish(); $sth = $self->process("show index from $table"); while (my $href = $sth->fetchrow_hashref()) { push (@{$rep{indices}}, $href); } $sth->finish(); \%rep; } # -------------------------------------------- # METHOD: now # # Returns representation for the current time. # -------------------------------------------- =head2 TIME VALUES =over 4 =item now $dbi_interface -> insert ('transaction', id => serial ('transaction', 'transactionid'), time => \$dbi_interface -> now); Returns representation for the current time. Uses special values of the DBMS if possible. =back =cut sub now { my $self = shift; # Postgres and mysql have an special value for the current time if ($self->{DRIVER} eq 'Pg' || $self->{DRIVER} eq 'mysql') { return 'now()'; } # determine current time by yourself scalar (gmtime ()); } # -------------------------------------------------- # METHOD: money2num MONEY # # Converts the monetary value MONEY (e.g. $10.00) to # a numeric one. # -------------------------------------------------- =head2 MONETARY VALUES =over 4 =item money2num I Converts the monetary value I to a numeric one. =back =cut sub money2num { my ($self, $money) = @_; # strip leading dollar sign $money =~ s/\$//; # remove commas $money =~ s/,//g; # ignore empty pennies $money =~ s/\.00$//; $money; } # METHOD: filter HANDLE FUNC [TABLE] OPT my %filter_default_opts = (col_delim => "\t", col_delim_rep => '\t', prepend_key => undef, row_delim => "\n", row_delim_rep => '\n', return => '', prepend_row => undef); sub filter { my ($self, $sth, $opt) = @_; my (@keys, $row, @ret); for (keys %filter_default_opts) { $opt->{$_} = $filter_default_opts{$_} unless defined $opt->{$_}; } if ($opt->{prepend_key}) { @keys = @{$sth->{NAME}}; } while ($row = $sth->fetch()) { if ($opt->{return} eq 'keys') { push(@ret, $row->[0]); } my @f; my $i = 0; for my $f (@$row) { $f = '' unless defined $f; $f =~ s/$opt->{row_delim}/$opt->{row_delim_rep}/g; $f =~ s/$opt->{col_delim}/$opt->{col_delim_rep}/g; if (defined $opt->{prepend_key}) { $f = $keys[$i++] . $opt->{prepend_key} . $f; } push(@f, $f); } if (defined $opt->{prepend_row}) { print $opt->{prepend_row}; } print join($opt->{col_delim}, @f), $opt->{row_delim}; } @ret; } # ----------------------------------------------------- # METHOD: is_auth_error MSG # ----------------------------------------------------- =head2 MISCELLANEOUS =over 4 =item is_auth_error I This method decides if the error message I is caused by an authentification error or not. =back =cut sub is_auth_error { my ($self, $msg) = @_; if ($self->{DRIVER} eq 'mysql') { if ($msg =~ /^DBI\sconnect(\('database=.*?(;host=.*?)?',.*?\))? failed: Access denied for user\s/) { return 1; } if ($msg =~ /^DBI->connect(\(database=.*?(;host=.*?)?\))? failed: Access denied for user:/) { return 1; } } elsif ($self->{DRIVER} eq 'Pg') { if ($msg =~ /^DBI\sconnect(\('dbname=.*?(;host=.*?)?',.*?\))? failed:.+no password supplied/) { return 1; } if ($msg =~ /^DBI->connect failed.+no password supplied/) { return 1; } } } # ------------------------------------------ # METHOD: passwd # # Determines password for current user. # This method is implemented only for Mysql, # where we can look it up in ~/my.cnf. # ------------------------------------------ sub passwd { my ($self) = shift; my $clientsec = 0; my ($mycnf, $option, $value); # implemented only for mysql return unless $self->{'DRIVER'} eq 'mysql'; # makes sense only for the localhost return if $self->{'HOST'}; # determine home directory if (exists $ENV{'HOME'} && $ENV{'HOME'} =~ /\S/ && -d $ENV{'HOME'}) { $mycnf = $ENV{'HOME'}; } else { $mycnf = (getpwuid($>)) [7]; } $mycnf .= '/.my.cnf'; # just give up if file is not accessible open (CNF, $mycnf) || return; while () { # ignore comments and blank lines next if /^\#/ or /^;/; next unless /\S/; # section ? if (/\[(.*?)\]/) { if (lc($1) eq 'client') { $clientsec = 1; } else { $clientsec = 0; } } elsif ($clientsec) { # in the [client] section check for password option ($option, $value) = split (/=/, $_, 2); if ($option =~ /\s*password\s*/) { $value =~ s/^\s+//; $value =~ s/\s+$//; $self->{'PASS'} = $value; last; } } } close (CNF); } # install error handler sub install_handler {$_[0] -> {'HANDLER'} = $_[1];} # direct interface to DBI sub prepare {my $self = shift; $self -> connect () -> prepare (@_);} sub commit {my $self = shift; $self->connect (); return if $self->{CONN}->{AutoCommit}; $self->{CONN}->commit();} sub rollback {$_[0] -> connect () -> rollback ();} sub quote {$_[0] -> connect () -> quote ($_[1]);} # auxiliary functions # ---------------------------------------------------------------- # FUNCTION: cache TABLE TYPE [HANDLE] # # This function handles the internal caching of table informations # like column names and types. # # If HANDLE is provided, the information will be fetched from # HANDLE and stored cache, otherwise the information from the # cache will be returned. # ---------------------------------------------------------------- sub cache { my ($table, $type, $handle) = @_; my (@types); if ($cache_structs) { if ($handle) { $structs{$table}->{$type} = $handle->{$type}; } else { if (exists $structs{$table} && exists $structs{$table}->{$type}) { return @{$structs{$table}->{$type}}; } } } return; } # ---------------------------------------------- # FUNCTION: fold ARRAY1 ARRAY2 # # Returns mapping between the elements of ARRAY1 # and the elements fo ARRAY2. # ---------------------------------------------- sub fold { my ($array1, $array2) = @_; my (%hash); for (my $i = 0; $i < @$array1; $i++) { $hash{$$array1[$i]} = $$array2[$i]; } \%hash; } # ----------------------------------------------- # METHOD: info_proc TABLE TOKEN [WANTHASH] # # Returns information about the columns of TABLE. # TOKEN should be either NAME or PRECISION. # ----------------------------------------------- sub info_proc { my ($self, $table, $token, $wanthash) = @_; my $sth; if ($cache_structs) { unless (exists $structs{$table}) { $sth = $self -> process ("SELECT * FROM $table WHERE 0 = 1"); for ('NAME', 'PRECISION', 'TYPE') { $structs{$table}->{$_} = $sth->{$_}; } } if ($wanthash) { fold ($structs{$table}->{NAME}, $structs{$table}->{$token}); } else { @{$structs{$table}->{$token}}; } } else { $sth = $self -> process ("SELECT * FROM $table WHERE 0 = 1"); if ($wanthash) { fold ($sth->{NAME}, $sth->{PRECISION}); } else { @{$sth->{$token}}; } } } 1; __END__ =head1 AUTHORS Stefan Hornburg (Racke), racke@linuxia.de Dennis Sch\[:o]n, ds@1d10t.de Support for Sybase and ODBC provided by David B. Bitton . =head1 VERSION 0.20 =head1 SEE ALSO perl(1), DBI(3), DBD::Pg(3), DBD::mysql(3), DBD::msql(3), DBD::Sybase(3), DBD::ODBC(3). =cut dbix-easy-perl-0.21.orig/META.yml0000644000000000000000000000104612776163707013400 0ustar --- abstract: 'Easy to Use DBI interface' author: - 'Stefan Hornburg (Racke) ' build_requires: ExtUtils::MakeMaker: '0' configure_requires: ExtUtils::MakeMaker: '0' dynamic_config: 1 generated_by: 'ExtUtils::MakeMaker version 7.1002, CPAN::Meta::Converter version 2.150010' license: unknown meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: DBIx-Easy no_index: directory: - t - inc requires: {} version: '0.21' x_serialization_backend: 'CPAN::Meta::YAML version 0.018' dbix-easy-perl-0.21.orig/MANIFEST0000644000000000000000000000064412776163707013263 0ustar Changes MANIFEST README LIESMICH Easy.pm Easy/Import.pm Makefile.PL debian/changelog debian/compat debian/rules debian/control debian/copyright scripts/dbs_dumptabdata scripts/dbs_dumptabstruct scripts/dbs_empty scripts/dbs_printtab scripts/dbs_update META.yml Module YAML meta-data (added by MakeMaker) META.json Module JSON meta-data (added by MakeMaker) dbix-easy-perl-0.21.orig/Changes0000644000000000000000000003403612271757534013424 0ustar Revision history for Perl module DBIx::Easy. * Tue Jan 28 17:48:27 2014 CET DBIx::Easy v0.21 released. * Tue Jan 28 15:42:38 2014 CET Marco Pessotto Fix regular expression to extract table names. Fix bug with incoming parameters in is_table method. * Tue Nov 19 11:16:49 2013 CET DBIx::Easy v0.20 released. * Thu Jul 5 11:39:42 2012 CEST Stefan Hornburg (Racke) Use table structure cache for insert method as well to prevent one extra select query for each insert query. * Mon Jan 23 16:59:43 2012 CET Stefan Hornburg (Racke) Enable UTF8 flag by default for MySQL, PostgreSQL, Sqlite and Sybase drivers. * Sun Jan 22 14:02:29 2012 CET Stefan Hornburg (Racke) DBIx::Easy v0.19 released * Sun Jan 22 13:47:20 2012 CET Stefan Hornburg (Racke) (scripts/dbs_update): fix CSV imports * Tue Nov 1 17:07:06 2011 CET Stefan Hornburg (Racke) (scripts/dbs_update): use Val hash key instead of Value method to retrieve values from spreadsheet cells. * Tue Nov 1 16:49:32 2011 CET Stefan Hornburg (Racke) (scripts/dbs_update): fix crash with undefined values inside of spreadsheet cells. * Sat Oct 29 15:31:28 2011 CEST (scripts/dbs_update): add --insert-only commandline option. * Thu Oct 20 11:42:05 2011 CEST (Easy.pm): add special case for specifiying mysql_socket. * Thu Oct 20 10:56:52 2011 CEST (scripts): switch to /usr/bin/env in shebang. * Thu Jan 8 19:32:57 2009 CET Stefan Hornburg (Racke) DBIx::Easy v0.18 released * Mon Dec 15 09:14:31 2008 CET Stefan Hornburg (Racke) (Easy.pm): additional pattern for login failure to PostgreSQL databases * Fri Apr 18 15:21:08 2008 CEST Jure Kodzoman (Easy.pm): new method do_without_transaction * Mon May 21 11:38:02 2007 CEST DBIx::Easy v0.17 released * Fri May 18 17:27:15 2007 CEST (Easy.pm): MySQL 5 has a different message for bad credentials commit only if AutoCommit is turned off to avoid spurious warnings * Fri May 18 00:17:37 2007 CEST (Easy.pm): fixed now method for Postgresql * Thu May 3 09:53:42 2007 CEST (dbs_dumptabstruct): new option --dump-options * Thu Jul 13 13:31:15 2006 CEST (Easy.pm): escape umlauts in POD documentation * Wed May 3 12:01:07 2006 CEST (Easy.pm): remove object from argument list before calling DBI prepare method * Sat Oct 8 01:24:40 2005 CEST DBIx::Easy v0.16 released * Thu Jan 13 17:57:29 2005 CET Stefan Hornburg (Racke) (Easy.pm): connect first before issue prepare statement * Sun Jan 9 13:34:45 2005 CET Stefan Hornburg (Racke) (Easy.pm): remove quotes from table names in tables method (dbs_dumptabstruct): put table names into quotes in system commands * Mon Jul 5 15:30:56 2004 CEST Stefan Hornburg (Racke) (Easy.pm): don't call passwd method on Win32 machines * Tue Feb 10 14:10:04 2004 CET Stefan Hornburg (Racke) (Easy.pm): return on connections errors from various methods to allow failover with custom error handler (Import.pm): new module * Mon Feb 9 22:54:01 2004 CET Stefan Hornburg (Racke) (dbs_update): new option --map-filter (Easy.pm): Sybase support fixed, allow empty database name for Sybase, avoid empty parentheses on some fatal errors * Wed Jan 21 18:15:03 2004 CET Stefan Hornburg (Racke) (dbs_update): don't insert a column twice if used both in --map and --columns * Sat Oct 4 00:04:24 2003 CEST Stefan Hornburg (Racke) (Easy.pm): rollback method added * Wed Oct 1 13:08:46 2003 CEST Stefan Hornburg (Racke) (Easy.pm): delete with empty conditions parameter fails, new methods describe_table, create_table * Mon Jun 16 14:07:13 2003 CEST Stefan Hornburg (Racke) (Easy.pm): explicit return added to cache method avoids malfunction if method columns is called first * Tue May 13 14:40:21 2003 CEST Stefan Hornburg (Racke) (Easy.pm): scalar references allow embedding strings without further quoting in the update method as well * Thu Mar 13 17:38:09 2003 CET Stefan Hornburg (Racke) (Easy.pm): fixed fallback in passwd method * Wed Mar 5 18:15:09 2003 CET Stefan Hornburg (Racke) (Easy.pm): new method put * Sun Feb 2 11:28:27 2003 CET Stefan Hornburg (Racke) (Easy.pm): makemap now can be directed to put the whole record into the values of the resulting hash * Mon Dec 16 15:33:20 2002 CET Stefan Hornburg (Racke) (scripts/dbs_update): allow input files in XLS format * Fri Apr 19 19:23:49 2002 CEST Stefan Hornburg (Racke) (Easy.pm): new display method (scripts/dbs_dumptabstruct): --filter option added * Sat Mar 16 13:00:32 2002 CET Stefan Hornburg (Racke) DBIx::Easy v0.15 released * Sun Feb 24 22:00:32 2002 -0500 David B. Bitton (Easy.pm): support for Sybase and ODBC * Sat Dec 9 02:13:00 2001 CET Stefan Hornburg (Racke) DBIx::Easy v0.14 released * Sun Nov 18 14:09:59 2001 CET Stefan Hornburg (Racke) (Easy.pm): implemented the optional map parameter in the random_row() method which was already documented * Thu Sep 20 17:20:50 2001 CEST Stefan Hornburg (Racke) DBIx::Easy v0.13 released * Thu Sep 20 17:19:19 2001 CEST Stefan Hornburg (Racke) (scripts/dbs_update): --cleanse could fail if one of the keys is '0' * Wed Aug 8 00:15:25 2001 CEST Stefan Hornburg (Racke) (Easy.pm): don't use password from ~/.my.cnf for databases on other hosts (scripts/dbs_dumptabdata): dbs_printtab is emulated instead called as program * Tue Aug 7 23:50:54 2001 CEST Stefan Hornburg (Racke) (Easy.pm): new method is_auth_error * Tue Aug 7 22:20:36 2001 CEST Stefan Hornburg (Racke) (scripts/dbs_update): --headline and --format=CSV weren't fully compatible * Mon Aug 6 12:16:17 2001 CEST Stefan Hornburg (Racke) (scripts/dbs_printtab): new option --sort * Tue Jul 24 16:25:10 2001 CEST Dennis Schön (Easy.pm): database port is configurable now * Tue Jul 17 14:14:43 2001 CEST Stefan Hornburg (Racke) DBIx::Easy v0.12 released * Tue Jul 17 13:30:23 2001 CEST Stefan Hornburg (Racke) (Easy.pm): new method random_row * Tue Jul 17 11:51:08 2001 CEST Dennis Schön violation of Debian policy fixed (.packlist in binary package) * Fri Jun 29 11:10:34 2001 CEST Stefan Hornburg (Racke) DBIx::Easy v0.11 released * Sun Jun 24 23:54:34 2001 CEST Stefan Hornburg (Racke) (Easy.pm): new method delete * Wed Mar 28 12:44:31 2001 CEST Stefan Hornburg (Racke) (scripts/dbs_update): new option --match-sql * Thu Mar 15 23:54:23 EST 2001 Dennis Schön (scripts/{dbs_dumptabdata,dbs_dumptabstruct,dbs_printtab}): Database handler warning fixed * Mon Feb 12 10:40:37 2001 CET Dennis Schön (scripts/dbs_update): Perl 5.6 warnings fixed * Sat Feb 10 01:10:29 2001 CET Stefan Hornburg (scripts/dbs_update): Perl warning fixed * Tue Feb 6 16:46:51 2001 CET Stefan Hornburg DBIx::Easy v0.10 released * Thu Jan 25 10:18:43 2001 CET Dennis Schön (scripts/dbs_dumptabdata): pass user parameter to dbs_printtab * Mon Jan 15 23:20:43 2001 CET Stefan Hornburg DBIx::Easy v0.09 released * Tue Dec 26 15:07:47 2000 CET Stefan Hornburg (scripts/dbs_update): insert an empty string, even if undef is given * Thu Nov 2 04:55:45 2000 CET Stefan Hornburg (Easy.pm): escape tabulators in view method (scripts/dbs_update): expand tabulators, add columns explicitly selected to column list * Sun Oct 8 15:47:21 2000 CEST Stefan Hornburg (scripts/dbs_update): issue warning on missing type information * Thu Sep 28 14:07:50 2000 CEST Stefan Hornburg (scripts/dbs_update): return values of user-defined filters indicate if record is valid * Sun Sep 24 20:21:23 2000 CEST Stefan Hornburg (scripts/dbs_update): apply user-defined filter routine only once * Mon Sep 18 17:32:56 2000 CEST Stefan Hornburg (scripts/dbs_dumptabdata, scripts/dbs_dumptabstruct): pass options first to mysqldump for compatibility with older versions * Sun Sep 17 13:10:37 2000 CEST Stefan Hornburg (scripts/dbs_update): make --columns to work with inclusions again * Fri Sep 15 18:32:35 2000 CEST Stefan Hornburg (scripts/dbs_update): new option --rows, option --keys accepts key names now, new functions key_names and prefix (Easy.pm): caches table structures now, new methods columns, types, sizes, typemap, sizemap, fold and info_proc * Tue Sep 12 10:38:22 2000 CEST Stefan Hornburg (scripts/dbs_update): now really choosed comma as default field separator for CSV format * Fri Sep 8 21:58:04 2000 CEST Stefan Hornburg DBIx::Easy v0.08 released * Fri Sep 8 21:56:59 2000 CEST Stefan Hornburg (scripts/dbs_update): strip whitespace from column names found in the headline * Thu Sep 7 17:04:24 2000 CEST Stefan Hornburg (scripts/dbs_update): comma is now the default field separator for CSV format, field separator can selected from the commandline * Tue Sep 5 15:14:28 2000 CEST Stefan Hornburg (scripts/dbs_update): brown paper bug fixed that kept --cleanse from working by growing the SQL statement which each record to be cleansed * Sun Sep 3 20:37:41 2000 CEST Stefan Hornburg (scripts/dbs_update): bug fixed that may be trigged by the combination of --headline and --map, assorted error messages improved, example for --routine added to documentation * Sun Sep 3 19:03:08 2000 CEST Stefan Hornburg (scripts/dbs_update): strip blanks from parameter list for --columns * Sun Sep 3 17:03:46 2000 CEST Stefan Hornburg (scripts/dbs_update): --columns can be used to exclude columns explicitly * Fri Sep 1 02:20:09 2000 CEST Stefan Hornburg (scripts/dbs_update): first field may contain table name and start column * Mon Aug 28 23:42:36 2000 CEST Stefan Hornburg DBIx::Easy v0.07 released * Wed Jul 19 00:07:26 2000 CEST Stefan Hornburg (Easy.pm): documentation for is_table and tables added, new method sequences (scripts/dbs_dumptabstruct): dump sequences too * Mon Jul 17 12:55:53 2000 CEST Stefan Hornburg (scripts/dbs_dumptabstruct): new option --pipe * Sun Jul 16 16:24:49 2000 CEST Stefan Hornburg (scripts/dbs_update): --table and --columns are working together now, handle keys excessing the column size * Sun Jul 16 13:01:44 2000 CEST Stefan Hornburg (scripts/dbs_update): problems with --cleanse fixed * Tue Jul 4 16:18:32 2000 CEST Stefan Hornburg (Easy.pm:view, script/dbs_printtab, scripts/dbs_update): new option --columns (scripts/dbs_update): new option --cleanse * Sat Jul 1 12:34:28 2000 CEST Stefan Hornburg (scripts/dbs_update): handles special characters and multilines with CSV format now * Fri Jun 23 11:36:27 2000 CEST Stefan Hornburg (scripts/dbs_update): several new options * Mon May 8 02:29:47 2000 CEST Stefan Hornburg (scripts/dbs_update): remove carriage returns from input file * Sun May 7 16:47:59 2000 CEST Stefan Hornburg (scripts/dbs_update): renember table name if given explicitly * Sat Apr 1 15:39:16 2000 CEST Stefan Hornburg (scripts/dbs_dumptabstruct, scripts/dbs_dumptabdata): new option --tables * Thu Mar 23 20:15:13 2000 CET Stefan Hornburg (Easy.pm): better method to determine directory, force commit documented in POD (scripts/dbs_empty): ensure that all changes are commited * Thu Mar 23 16:05:33 2000 CET Stefan Hornburg (scripts/dbs_update): expand newlines (Easy.pm): escape newlines in view method * Wed Mar 22 16:16:22 2000 CET Stefan Hornburg (scripts/dbs_empty): new option --tables * Mon Mar 20 00:22:14 2000 CET Stefan Hornburg (scripts/dbs_update): more verbose error messages, ensure that all changes are committed * Sun Mar 19 11:20:25 2000 CET Stefan Hornburg (scripts/dbs_dumptab*): typo fixed (scripts/dbs_dumptabdata): new option --use-printtab * Sat Mar 18 23:15:22 2000 CET Stefan Hornburg (scripts/dbs_printtab): new script (Easy.pm): added option separator to view method * Tue Feb 29 19:33:13 2000 CET Stefan Hornburg (Easy.pm): made more verbose if connection cannot be established method view fixed in respect to limit option * Fri Jan 21 10:59:03 2000 CET Stefan Hornburg (Easy.pm): optional parameter CONDITION added to method makemap * Thu Jan 20 12:56:10 2000 CET Stefan Hornburg (scripts/dbs_empty, scripts/dbs_dumptabstruct, scripts/dumptabdata): new option --exclude-matching-tables * Sat Jan 15 15:24:30 2000 CET Stefan Hornburg (Easy.pm): don't call rollback within error handler if AutoCommit is enabled * Sun Jan 2 17:48:46 2000 CET Stefan Hornburg (Easy.pm): new method is_table (scripts/dbs_update): new option --map * Thu Dec 16 23:15:37 1999 CET Stefan Hornburg DBIx::Easy v0.06 released * Sat Dec 4 23:19:20 1999 CET Stefan Hornburg (scripts/dbs_update): asks for a password if authentification fails, option -f/--file added (Easy.pm): new method passwd * Wed Dec 1 11:45:00 1999 CET Stefan Hornburg (scripts/dbs_dumptabdata, scripts/dbs_empty): new script(s) * Fri Nov 26 01:01:22 1999 CET Stefan Hornburg (scripts/dbs_dumptabstruct): POD documentation added (scripts/dbs_update): fixed old script name in POD section dbix-easy-perl-0.21.orig/META.json0000644000000000000000000000161012776163707013545 0ustar { "abstract" : "Easy to Use DBI interface", "author" : [ "Stefan Hornburg (Racke) " ], "dynamic_config" : 1, "generated_by" : "ExtUtils::MakeMaker version 7.1002, CPAN::Meta::Converter version 2.150010", "license" : [ "unknown" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : "2" }, "name" : "DBIx-Easy", "no_index" : { "directory" : [ "t", "inc" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "runtime" : { "requires" : {} } }, "release_status" : "stable", "version" : "0.21", "x_serialization_backend" : "JSON::PP version 2.27300_01" } dbix-easy-perl-0.21.orig/debian/0000755000000000000000000000000012776163707013350 5ustar dbix-easy-perl-0.21.orig/debian/changelog0000644000000000000000000000675012776163707015232 0ustar dbix-easy-perl (0.21-1) unstable; urgency=low * New upstream version * Switch to generic debian/rules file (Closes: #831923). * Bump up Standards-Version. -- Stefan Hornburg (Racke) Sat, 08 Oct 2016 14:34:15 +0200 dbix-easy-perl (0.17-1.1) unstable; urgency=medium * Non-maintainer upload. * Fix "FTBFS with perl 5.22 in experimental (MakeMaker changes)": use DESTDIR in debian/rules. (Closes: #792376) -- gregor herrmann Wed, 26 Aug 2015 13:48:35 +0200 dbix-easy-perl (0.17-1) unstable; urgency=low * New upstream version * Use $(CURDIR) instead of $(PWD) in debian/rules * Increase debhelper compatibility level to 5 * Increase standards version to 3.7.2 * Use Build-Depends instead of Build-Depends-Indep for debhelper -- Stefan Hornburg (Racke) Mon, 21 May 2007 11:38:02 +0200 dbix-easy-perl (0.16-1) unstable; urgency=low * New upstream version which fixes filename with quotes produced by dbs_dumptabstruct (Closes: #276578) * Added Suggests for the supported DBD packages in Debian * Added new drivers from 0.15 release to package description * Bumped up Standards-Version * Changed section from interpreters to perl -- Stefan Hornburg (Racke) Sat, 8 Oct 2005 01:13:57 +0200 dbix-easy-perl (0.15-2) unstable; urgency=low * New maintainer -- Stefan Hornburg (Racke) Sat, 2 Oct 2004 10:50:02 +0200 dbix-easy-perl (0.15-1) unstable; urgency=low * New upstream version (Closes: #135638) -- Dennis Schoen Sat, 16 Mar 2002 12:54:12 +0100 dbix-easy-perl (0.14-1) unstable; urgency=low * New upstream version (Closes: #119282) -- Dennis Schoen Sun, 9 Dec 2001 02:13:00 +0100 dbix-easy-perl (0.13-2) unstable; urgency=low * removed manpage in /usr/man (Closes: #116051) -- Dennis Schoen Thu, 18 Oct 2001 11:45:37 +0200 dbix-easy-perl (0.13-1) unstable; urgency=medium * New upstream version -- Dennis Schoen Thu, 20 Sep 2001 21:07:31 +0200 dbix-easy-perl (0.12-1) unstable; urgency=low * New upstream version -- Dennis Schoen Tue, 17 Jul 2001 13:35:36 +0200 dbix-easy-perl (0.11-2) unstable; urgency=low * no longer ship .packlist file (Closes: #104756) -- Dennis Schoen Tue, 17 Jul 2001 11:51:08 +0200 dbix-easy-perl (0.11-1) unstable; urgency=low * New upstream version -- Dennis Schoen Sun, 24 Jun 2001 23:49:04 +0200 dbix-easy-perl (0.10-1) unstable; urgency=low * New upstream version -- Dennis Schoen Tue, 6 Feb 2001 14:07:09 +0100 dbix-easy-perl (0.09-1) unstable; urgency=medium * New upstream version -- Dennis Schoen Mon, 15 Jan 2001 23:18:36 +0100 dbix-easy-perl (0.08-1) unstable; urgency=high * New upstream version * HTMLized manual pages added -- Dennis Schoen Tue, 5 Sep 2000 15:20:44 +0200 dbix-easy-perl (0.07-1) unstable; urgency=low * New upstream Version and new maintainer * Closes:Bug#70348 -- Dennis Schoen Fri, 23 Jun 2000 18:29:55 +0200 dbix-easy-perl (0.06-2) unstable; urgency=low * Sponsoring upload for Dennis Schoen -- Dennis Schoen and Alexander Koch Mon, 1 Jan 2000 10:04:13 +0100 dbix-easy-perl (0.06-1) unstable; urgency=low * Initial Release. -- Dennis Schoen Thu, 25 Nov 1999 19:03:09 +0100 dbix-easy-perl-0.21.orig/debian/compat0000755000000000000000000000000112776163707014550 0ustar 9dbix-easy-perl-0.21.orig/debian/copyright0000644000000000000000000000065012271755224015273 0ustar This package was debianized by Dennis Schön on Thu, 25 Nov 1999 19:03:09 +0100. It was downloaded from http://www.linuxia.de/DBIx/Easy/DBIx-Easy.tar.gz Upstream Authors: Stefan Hornburg Dennis Schön Copyright: It may be redistributed under the terms of the GNU GPL, Version 2 or later, found on Debian systems in the file /usr/share/common-licenses/GPL. dbix-easy-perl-0.21.orig/debian/rules0000755000000000000000000000003612776163707014427 0ustar #!/usr/bin/make -f %: dh $@ dbix-easy-perl-0.21.orig/debian/control0000644000000000000000000000120112776163707014745 0ustar Source: dbix-easy-perl Section: perl Priority: optional Maintainer: Stefan Hornburg (Racke) Standards-Version: 3.9.8 Build-Depends: debhelper Package: libdbix-easy-perl Architecture: all Depends: ${perl:Depends}, ${misc:Depends}, libdbi-perl Recommends: libterm-readkey-perl, libtext-csv-perl Suggests: libdbd-mysql-perl | libdbd-pg-perl Description: Easy to Use DBI Interface DBIx::Easy is an easy to use DBI interface. Currently the Pg, mSQL, mysql, sybase and ODBC drivers are supported. Note that you also need one of the DBD::* drivers to use this module. For additional information please refer to the manpage. dbix-easy-perl-0.21.orig/README0000644000000000000000000001225212271755224012777 0ustar DBIx::Easy - Easy to Use DBI Interface ************************************** * The homepage of `DBIx::Easy' is http://www.linuxia.de/DBIx/Easy/. * The latest version (currently 0.15) is always available as http://www.linuxia.de/DBIx/Easy/DBIx-Easy.tar.gz. * See the manual page for more informations. Requirements ************ * DBI (URL: http://www.symbolstone.org/technology/perl/DBI/index.html) * `DBD::Pg', `DBD::mysql', `DBD::mSQL', `DBD::ODBC' or `DBD::sybase' (ask for other database drivers) Installation ************ tar -xzf DBIx-Easy.tar.gz cd DBIx-Easy-0.15 perl Makefile.PL make make install Scripts ******* The `DBIx::Easy' package contains the following scripts: Name Description Remarks `dbs_dumptabdata' creates file set with SQL table data Manual page `dbs_dumptabstruct' creates file set with SQL table schemas Manual page `dbs_empty' erases all data from a SQL database Manual page `dbs_printtab' prints data from SQL table New in 0.07 Manual page `dbs_update' update SQL database from text file Manual page Debian Package ************** This software is also available as Debian package, maintained by Dennis Schön . The download address is http://www.linuxia.de/DBIx/Easy/libdbix-easy-perl_0.15-1_all.deb. Changes ******* 0.15 **** `Easy.pm' ========= * support for ODBC and Sybase added 0.14 **** `Easy.pm' ========= * implemented the optional `map' parameter in the `random_row' method which was already documented 0.13 **** `Easy.pm' ========= * don't use password from `~/.my.cnf' for databases on other hosts * database port is configurable now * new method `is_auth_error' `dbs_dumptabdata' ================= * `--cleanse' failed in some case if one of the keys was '`0'' `dbs_printtab' ============== * new option `--sort' `dbs_update' ============ * `dbs_printtab' is emulated instead called as program * `--headline' and `--format=CSV' weren't fully compatible 0.12 **** `Easy.pm' ========= * new method `random_row' 0.11 **** * several warnings fixed (most introduced in conjunction with Perl 5.6) `Easy.pm' ========= * new method `delete' `dbs_update' ============ * new option `--match-sql' 0.10 **** `dbs_dumptabdata' ================= * pass the `user' parameter to `dbs_printtab' 0.09 **** `Easy.pm' ========= * escape tabulator in `view' method * caches table structures * new methods `columns', `types', `sizes', `typemap', `sizemap' `dbs_dumptabdata', `dbs_dumptabstruct' ====================================== * pass options first to `mysqldump', needed for compatibility with older MySQL versions `dbs_update' ============ * return values of user-defined filters indicate if record is valid * inclusion mode of `--columns' fixed * expand tabulators * apply user-defined filter specified by `--routine' only once * choosed comma as default field separator for CSV format * insert an empty string instead of NULL for string columns * new option `--rows' * option `--keys' accepts key names too * issue warning on missing type information 0.08 **** `dbs_update' ============ * brown paper bug fixed that kept --cleanse from working by growing the SQL statement which each record to be cleansed * bug fixed that may be triggered by the combination of `--headline' and ` --map' * strip whitespace from column names found in the headline of the input * field separator for CSV format can selected from the commandline now * `--columns' can be used to exclude columns explicitly * strip blanks from parameter list for `--columns' * first field may contain table name and start column instead of table name only * example for `--routine' added to documentation 0.07 **** * new script `dbs_printtab' * made more verbose if connection cannot be established * method `view' fixed in respect to `limit' option, option `separator' added, separator defaults to the tabulator now, escape newlines * new option `--exclude-matching-tables' for `dbs_dumptabdata', ` dbs_dumptabstruct' and `dbs_empty' * new option `--tables' for `dbs_empty', `dbs_dumptabstruct' and ` dbs_dumptabdata' * new options `--cleanse', `--columns', `--format', `--keys', `--map', ` --skipbadlines' and `--update-only' for `dbs_update' * new option `--use-printtab' for `dbs_dumptabdata' * new option `--pipe' for `dbs_dumptabstruct' * `dbs_dumptabstruct' dumps sequences too * optional parameter CONDITION added to method `makemap' * optional parameter COLUMNS added to method `view' * made `dbs_update' more verbose on errors, ensure commit, newlines will be regenerated, remembers table name if specified with `--table', carriage returns are removed from input * don't call `rollback' within error handler if `AutoCommit' is enabled * new methods `is_table', `sequences' Authors ******* Stefan Hornburg Dennis Schön Support for Sybase and ODBC provided by David B. Bitton . dbix-easy-perl-0.21.orig/LIESMICH0000644000000000000000000001377012271755224013245 0ustar DBIx::Easy - Einfache DBI-Schnittstelle *************************************** * Die Homepage von `DBIx::Easy' ist http://www.linuxia.de/DBIx/Easy/. * Die aktuelle Version (zur Zeit 0.15) ist erhältlich als http://www.linuxia.de/DBIx/Easy/DBIx-Easy.tar.gz. * Für weitere Informationen ist die Manpage heranzuziehen. Anforderungen ************* * DBI (URL: http://www.symbolstone.org/technology/perl/DBI/index.html) * `DBD::Pg', `DBD::mysql', `DBD::mSQL', `DBD::ODBC' or `DBD::sybase' (andere Datenbanktreiber auf Anfrage) Installation ************ tar -xzf DBIx-Easy.tar.gz cd DBIx-Easy-0.15 perl Makefile.PL make make install Skripte ******* Das `DBIx::Easy'-Paket enthält die folgenden Skripte: Name Beschreibung Bemerkungen `dbs_dumptabdata' erzeugt Dateisatz mit SQL-Tabellendaten Manual page `dbs_dumptabstruct' erzeugt Dateisatz mit SQL-Tabellenstrukturen Manual page `dbs_empty' löscht alle Daten aus einer SQL-Datenbank Manual page `dbs_printtab' gibt Daten einer SQL-Tabelle aus Neu in 0.07 Manual page `dbs_update' aktualisiert SQL-Datenbank aus einer Textdatei Manual page Debianpaket *********** Diese Anwendung ist auch als Debianpaket verfügbar. Der Maintainer ist Dennis Schön . Die Downloadadresse ist http://www.linuxia.de/DBIx/Easy/libdbix-easy-perl_0.15-1_all.deb. Änderungen ********** 0.15 **** `Easy.pm' ========= * Unterstützung für ODBC und Sybase hinzugefügt 0.14 **** `Easy.pm' ========= * Optionalen Parameter `map' der Methode `random_row' implementiert. Dieser war bereits in früheren Versionen dokumentiert. 0.13 **** `Easy.pm' ========= * Das Passwort in `~/.my.cnf' wird nicht mehr für Datenbanken auf anderen Rechnern verwendet * Port der Datenbank kann nun auch konfiguriert werden * Neue Methode `is_auth_error' `dbs_dumptabdata' ================= * `--cleanse' schlug in einigen Fällen fehl, wenn einer der Schlüssel '`0'' war `dbs_printtab' ============== * neue Option `--sort' `dbs_update' ============ * `dbs_printtab' wird nicht mehr als Programm aufgerufen, sondern imitiert * `--headline' und `--format=CSV' waren nicht vollkommen kompatibel 0.12 **** `Easy.pm' ========= * neue Methode `random_row' 0.11 **** * verschiedene Warnungen, die vor allem im Zusammenhang mit Perl 5.6 aufgetreten sind, wurden behoben `Easy.pm' ========= * neue Methode `delete' `dbs_update' ============ * neue Option `--match-sql' 0.10 **** `dbs_dumptabdata' ================= * Der `user'-Parameter wird an `dbs_printtab' übergeben. 0.09 **** `Easy.pm' ========= * Tabulator wird geschützt in Methode `view' * Tabellenstrukturen werden in einem Cache abgelegt * neue Methoden `columns', `types', `sizes', `typemap', `sizemap' `dbs_dumptabdata', `dbs_dumptabstruct' ====================================== * um die Kompatibilität mit älteren MySQL-Versionen zu erhalten, werden die Optionen zuerst an `mysqldump' übergeben `dbs_update' ============ * Rückgabewerte von benutzerdefinierten Filter zeigen an, ob ein Datensatz gültig ist * Fehler bei der explizite Spaltenangabe mit `--columns' behoben * Tabulatoren werden wieder hergestellt * der durch `--routine' angegebene benutzerdefinierter Filter wird nur einmal angewendet * Komma ist jetzt voreingestelltes Trennzeichen für das CSV-Format * leerer String wird für Stringtypen anstatt eines NULL-Wertes verwendet * neue Option `--rows' * Option `--keys' läßt Schlüsselnamen zu * Ausgabe einer Warnung bei fehlenden Typinformationen 0.08 **** `dbs_update' ============ * Fehler behoben, der `--cleanse' durch Anwachsen der SQL-Anweisung mit jedem zu löschenden Datensatz unbrauchbar gemacht hat * Fehler behoben, der bei der Kombination von `--headline' und `--map' auftreten kann * Leerzeichen werden aus Spaltennamen entfernt, die der Kopfzeile der Eingabe entstammen * Trennzeichen für das CSV-Format kann von der Kommandozeile festgelegt werden * `--columns' kann zum expliziten Ausschluß von Tabellenspalten benutzt werden * Leerzeichen werden aus der Parameterliste für `--columns' entfernt * Das erste Feld kann sowohl den Tabellennamen als auch eine Startspalte enthalten anstatt nur den Tabellennamen. * Dokumentation um Beispiel für `--routine' erweitert 0.07 **** * neues Skript `dbs_printtab' * aussagekräftiger im Falle einer fehlgeschlagenen Verbindung * Methode `view' verbessert im Hinblick auf die `limit'-Option, neue Option `separator' hinzugefügt, Tabulator ist jetzt das voreingestellte Trennzeichen, Zeilenendezeichen werden umgewandelt * neue Option `--exclude-matching-tables' für `dbs_dumptabdata', ` dbs_dumptabstruct' und `dbs_empty' * neue Option `--tables' für `dbs_empty', `dbs_dumptabstruct' and ` dbs_dumptabdata' * neue Optionen `--cleanse', `--columns', `--format', `--keys', `--map', ` --skipbadlines' und `--update-only' für `dbs_update' * neue Option `--use-printtab' für `dbs_dumptabdata' * neue Option `--pipe' für `dbs_dumptabstruct' * `dbs_dumptabstruct' speichert nun auch Sequenzen * zusätzlicher optionaler Parameter CONDITION für Methode `makemap' * zusätzlicher optionaler Parameter COLUMNS für Methode `view' * aussagekräftigere Fehlermeldungen von `dbs_update', Bestätigung der Verä nderungen, Zeilenendezeichen werden wieder hergestellt, Tabellenname wird gemerkt wenn er mit `--table' angegeben wurde, Wagenrückläufe werden aus der Eingabe entfernt * Aufruf von `rollback' in der Fehlerbehandlungsroutine wird nicht getä tigt, sollte `AutoCommit' aktiviert sein. * neue Methode `is_table', `sequences' Autoren ******* Stefan Hornburg Dennis Schön Unterstützung für Sybase und ODBC wurde von David B. Bitton zur Verfügung gestellt. dbix-easy-perl-0.21.orig/scripts/0000755000000000000000000000000012776163707013615 5ustar dbix-easy-perl-0.21.orig/scripts/dbs_update0000755000000000000000000006460012271755224015652 0ustar #! /usr/bin/env perl # Copyright (C) 1999-2012 Stefan Hornburg # Author: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. use strict; use warnings; use DBIx::Easy; use Getopt::Long; use Term::ReadKey; # process commandline parameters my %opts; $opts{'keys'} = 1; my $whandler = $SIG{__WARN__}; $SIG{__WARN__} = sub {print STDERR "$0: @_";}; unless (GetOptions (\%opts, 'cleanse', 'columns|c=s', 'file|f=s', 'format=s', 'headline|h', 'insert-only|i', 'rows=s', 'keys|k=s', 'map|m=s', 'map-filter=s', 'match-sql=s', 'routine|r=s', 'skipbadlines', 'table|t=s', 'update-only|o')) { exit 1; } $SIG{__WARN__} = $whandler; # sanity checks my $format = 'TAB'; my %inforef = (); my %funcref = (CSV => {get_columns => \&get_columns_csv}, TAB => {get_columns => \&get_columns_tab}, XLS => {get_columns => \&get_columns_xls}); my %mfref = (lc => sub {lc(shift)}); my $sep_char = ','; my $mfsub; if ($opts{'cleanse'} || $opts{'headline'}) { unless ($opts{'table'}) { die ("$0: missing table name\n"); } } if ($opts{'map-filter'}) { unless (exists($mfref{$opts{'map-filter'}})) { die qq{$0: unknown column name filter "$opts{'map-filter'}"}, "\n"; } $mfsub = $mfref{$opts{'map-filter'}}; } if ($opts{'format'}) { $format = uc($opts{'format'}); if ($format =~ /^CSV/) { $format = 'CSV'; if ($') { $sep_char = $'; $sep_char =~ s/^\s+//; $sep_char =~ s/\s+$//; } eval { require Text::CSV_XS; }; if ($@) { die "$0: couldn't load module Text::CSV_XS\n"; } $inforef{object} = new Text::CSV_XS ({'binary' => 1, 'sep_char' => $sep_char}); } elsif ($format eq 'XLS') { eval { require Spreadsheet::ParseExcel; }; if ($@) { die "$0: couldn't load module Spreadsheet::ParseExcel\n"; } $inforef{object} = new Spreadsheet::ParseExcel; } else { die ("$0: unknown format \"" . $opts{'format'} . "\"\n"); } } my %fieldmap; my $fd_input; my ($sth, $keyfield, $update, $msg); my ($table, $fieldnames, @values, $headline); my (@columns, $routine, %colmap); my $linebuf = ''; # whether to consider column my $colflag = 1; my %usecol; # whether to consider rows my $rowflag; my %userow; # current row my $currow = 0; # input with table as first field may specify start column my $startcol; # variables for --match-sql option my (%matchmap, $matchcol); if ($opts{'columns'}) { $colflag = ! ($opts{'columns'} =~ s/\s*[\!^]//); # setup positive/negative list for columns for (@columns = split(/\s*,\s*/, $opts{'columns'})) { $usecol{$_} = $colflag; } } if ($opts{'rows'}) { my @rows; $rowflag = ! ($opts{'rows'} =~ s/\s*[^\!]//); # setup positive/negative list for rows for (@rows = split(/\s*,\s*/, $opts{'rows'})) { unless (/^\d+$/) { die "$0: row number \"$_\" is not numeric\n"; } $userow{$_} = $rowflag; } } if ($opts{'file'}) { # read input from file require IO::File; $fd_input = new IO::File; $fd_input->open($opts{'file'}) || die "$0: couldn't open $opts{'file'}: $!\n"; } else { # read input from standard input require IO::Handle; $fd_input = new IO::Handle; $fd_input->fdopen(fileno(STDIN),'r'); } if ($opts{'map'}) { # parse column name mapping my ($head, $name); foreach (split (/;/, $opts{'map'})) { ($head, $name) = split /=/; $colmap{$head} = $name; } } my $csv; if ($opts{'headline'}) { my %hcolmap; # the first row consists of the column names # unless (defined ($headline = <$fd_input>)) { # die ("$0: empty input file\n"); # } my @columns; if ($funcref{$format}->{get_columns}(\%inforef, $fd_input,\@columns) <= 0) { die "$0: couldn't find headline\n"; } if ($opts{'map-filter'}) { @columns = map {$mfsub->($_)} @columns; } # remove whitespace from column names and mark them map {s/^\s+//; s/\s+$//; $hcolmap{$_} = 1;} @columns; if ($opts{'map'}) { my @newcolumns; # filter column names foreach (@columns) { if (exists $colmap{$_}) { push (@newcolumns, $colmap{$_}); $hcolmap{$colmap{$_}} = 1; } else { push (@newcolumns, $_); } } @columns = @newcolumns; } # add any other columns explicitly selected for (sort (keys %usecol)) { next if $hcolmap{$_}; next unless exists $usecol{$_}; next unless $usecol{$_}; push (@columns, $_); } # fixed table name $table = $opts{'table'}; $fieldmap{$table} = \@columns; } if ($opts{'routine'}) { # read Perl subroutine for filtering the input $routine = eval $opts{'routine'}; if ($@) { die "$0: invalid filter routine: $@: \n"; } if (ref($routine) ne 'CODE') { die "$0: invalid filter routine\n"; } } if ($opts{'table'}) { # set fixed table name $table = $opts{'table'}; # use defined columns if (! $opts{'headline'} && $opts{'columns'}) { $fieldmap{$table} = \@columns; } } my $dbif; my $pwdused = 0; my ($driver, $database, $user) = @ARGV; $dbif = new DBIx::Easy ($driver, $database, $user); # handler for DBI error messages and missing password $dbif -> install_handler (\&fatal); # we need to explicitly establish the connection # for the case that a password is needed $dbif -> connect; my (@keys, @cleansekeys, %cleansemap, $numkeysleft, %recmap, @names); if ($opts{'cleanse'}) { # determine column names @names = &column_names ($dbif, $table); $fieldnames = \@names; # determine keys %cleansemap = &key_names ($dbif, $table, $opts{'keys'}, 1); @cleansekeys = sort (keys %cleansemap); # get records my ($row, $href, $i); $sth = $dbif -> process ('SELECT ' . join(', ', @cleansekeys) . " FROM $table"); while ($row = $sth -> fetch()) { # build chain of all keys but the last $href = \%recmap; for ($i = 0; $i < $#cleansekeys; $i++) { unless (exists $href->{$$row[$i]}) { $href->{$$row[$i]} = {}; } $href = $href->{$$row[$i]}; } # stop if key kombination occurs multiple if (exists $href->{$$row[$i]}) { die "$0: duplicate key: ", join (",", @$row), "\n"; } # record last key if (defined $$row[$i]) { $href->{$$row[$i]} = 1; } else { $href->{$$row[$i]} = ''; } } } if ($opts{'match-sql'}) { unless ($opts{'match-sql'} =~ /^(.*?):\{(.*?)\}$/) { die "$0: invalid format for option --match-sql: $opts{'match-sql'}\n"; } $matchcol = $1; $sth = $dbif -> process ($2); my $row; while ($row = $sth->fetch()) { $matchmap{$$row[0]} = 1; } } my $gcsub = $funcref{$format}->{get_columns}; MAIN: while ($gcsub->(\%inforef, $fd_input, \@columns)) { my (@data); if ($opts{'headline'} || $opts{'table'}) { # table name already known @values = @columns; } else { # table name is the first column if ($format eq 'TAB') { ($table, @values) = split /\t/; } elsif ($format eq 'CSV') { next unless csv_parseline ($csv, \$linebuf, $_, [$table, @values]); } # extract optional start column parameter if ($table =~ /(.+?)\.(.+)/) { $table = $1; $startcol = $2; unless ($startcol =~ /^\d+$/) { $msg = "$0: $.: start column not a number: \"" . $startcol . "\"\n"; if ($opts{'skipbadlines'}) { warn ($msg); next; } else { die ($msg); } } } # sanity check on the table name if ($table =~ /\s/) { warn ("$0: $.: skipping record (\"$table\" not accepted as table name)\n"); next; } } # check for row ex/inclusion $currow++; if (defined $rowflag) { if ($rowflag && ! exists $userow{$currow}) { # print "Skipping row $currow due to inclusion\n"; next; } if (! $rowflag && exists $userow{$currow}) { # print "Skipping row $currow due to exclusion\n"; next; } } # determine column names @names = &column_names ($dbif, $table, $startcol); $fieldnames = \@names; if ($opts{'routine'}) { # filter input first next unless filter_input ($routine, $table, $fieldnames, \@values); } # filter out non-matching rows MATCHSQL: { if ($opts{'match-sql'}) { for (my $i = 0; $i < @$fieldnames; $i++) { if ($$fieldnames[$i] eq $matchcol) { last MATCHSQL if $matchmap{$values[$i]}; print "Not accepted record @values\n"; next MAIN; } } } } # sanity checks on input data my $typeref = $dbif -> typemap ($table); my $sizeref = $dbif -> sizemap ($table); for (my $i = 0; $i <= $#$fieldnames; $i++) { # check for column exclusion if (keys %usecol) { # note: we do not check the actual value !! if ($colflag && ! exists $usecol{$$fieldnames[$i]}) { next; } if (! $colflag && exists $usecol{$$fieldnames[$i]}) { next; } } # expand newlines and tabulators if (defined $values[$i]) { $values[$i] =~ s/\\n/\n/g; $values[$i] =~ s/\\t/\t/g; } # check if input exceeds column capacity unless (exists $$typeref{$$fieldnames[$i]}) { warn ("$0: No type information for column $$fieldnames[$i] found\n"); next; } unless (exists $$sizeref{$$fieldnames[$i]}) { warn ("$0: No size information for column $$fieldnames[$i] found\n"); next; } if ($$typeref{$$fieldnames[$i]} == DBI::SQL_CHAR) { if (defined $values[$i]) { if (length($values[$i]) > $$sizeref{$$fieldnames[$i]}) { warn (prefix() . "Data for field $$fieldnames[$i] truncated: $values[$i]\n"); $values[$i] = substr($values[$i], 0, $$sizeref{$$fieldnames[$i]}); } } else { # avoid insertion of NULL values $values[$i] = ''; } } elsif ($$typeref{$$fieldnames[$i]} == DBI::SQL_VARCHAR) { if (defined $values[$i]) { if (length($values[$i]) > $$sizeref{$$fieldnames[$i]}) { warn (prefix() . "Data for field $$fieldnames[$i] truncated: $values[$i]\n"); $values[$i] = substr($values[$i], 0, $$sizeref{$$fieldnames[$i]}); } } else { # avoid insertion of NULL values $values[$i] = ''; } } # push (@data, $$fieldnames[$i], $values[$i]); } # check if record exists my %keymap = &key_names ($dbif, $table, $opts{'keys'}, 1); @keys = (keys(%keymap)); my @terms = map {$_ . ' = ' . $dbif->quote($values[$keymap{$_}])} (@keys); $sth = $dbif -> process ('SELECT ' . join(', ', @keys) . " FROM $table WHERE " . join (' AND ', @terms)); while ($sth -> fetch) {} if ($sth -> rows () > 1) { $" = ', '; die ("$0: duplicate key(s) @keys in table $table\n"); } $update = $sth -> rows (); $sth -> finish (); # generate SQL statement for (my $i = 0; $i <= $#$fieldnames; $i++) { # check for column exclusion if (keys %usecol) { # note: we do not check the actual value !! if ($colflag && ! exists $usecol{$$fieldnames[$i]}) { next; } if (! $colflag && exists $usecol{$$fieldnames[$i]}) { next; } } # expand newlines if (defined $values[$i]) { $values[$i] =~ s/\\n/\n/g; } push (@data, $$fieldnames[$i], $values[$i]); } if ($update) { if ($opts{'insert-only'}) { # print "SKIP UPDATING $.\n"; next; } # print "UPDATING $.\n"; $dbif -> update ($table, join (' AND ', @terms), @data); } else { if ($opts{'update-only'}) { # print "SKIP INSERTING $.\n"; next; } # print "INSERTING $.\n"; $dbif -> insert ($table, @data); } if ($opts{'cleanse'} && $update) { my ($href, $i); # now unregister key combination $href = \%recmap; # Mysql strips trailing blanks from VARCHAR fields, so we do if ($dbif->{DRIVER} eq 'mysql') { for ($i = 0; $i < @cleansekeys; $i++) { if ($$typeref{$cleansekeys[$i]} == DBI::SQL_VARCHAR) { $values[$cleansemap{$cleansekeys[$i]}] =~ s/\s+$//; } } } # data from input file may exceed column capacity for ($i = 0; $i < @cleansekeys; $i++) { if ($$typeref{$cleansekeys[$i]} == DBI::SQL_CHAR) { $values[$cleansemap{$cleansekeys[$i]}] = substr($values[$cleansemap{$cleansekeys[$i]}], 0,$$sizeref{$cleansekeys[$i]}); } } for ($i = 0; $i < $#cleansekeys; $i++) { unless (exists $href->{$values[$cleansemap{$cleansekeys[$i]}]}) { die ("$0: internal error: key $cleansekeys[$i] not found: ", join (",", @values), "\n"); } $href = $href->{$values[$cleansemap{$cleansekeys[$i]}]}; } unless (exists $href->{$values[$cleansemap{$cleansekeys[$i]}]}) { die ("$0: internal error: key $cleansekeys[$i] not found: ", join (",", @values), "\n"); } if ($href->{$values[$cleansemap{$cleansekeys[$i]}]} == 0) { my $j = 0; warn (prefix () . "duplicate key(s) in input: ", join (", ", map {"$_ = \"" . $values[$cleansemap{$cleansekeys[$j++]}] . "\""} @cleansekeys) . "\n"); } $href->{$values[$cleansemap{$cleansekeys[$i]}]} = 0; } } if ($opts{'cleanse'} && ! $opts{'insert-only'}) { my $href; # now start to eliminate old records $href = \%recmap; my @keylist = keys %recmap; my (@tmpkeys, @reckeys, $thiskey, $keyval, @conds); for (keys %recmap) { push (@reckeys, [$recmap{$_}, $_]); } for (my $i = 1; $i < @cleansekeys; $i++) { @tmpkeys = @reckeys; undef @reckeys; for $thiskey (@tmpkeys) { $href = shift @$thiskey; for (keys %$href) { push (@reckeys, [$href->{$_}, @$thiskey, $_]); } } } for (@reckeys) { undef @conds; # finally delete the record next unless shift (@$_); for (my $i = 0; $i < @cleansekeys; $i++) { push (@conds, $cleansekeys[$i] . ' = ' . $dbif->quote ($_->[$i])); } $dbif -> process ("DELETE FROM $table WHERE " . join (' AND ', @conds)); } } if (length $linebuf) { if ($opts{'skipbadlines'}) { warn ("$0: unexpected EOF"); } else { die ("$0: unexpected EOF"); } } undef $dbif; if ($opts{'file'}) { $fd_input->close; } # ---------------------------------------- # FUNCTION: get_columns_tab IREF FD COLREF # # Get columns from a tab separated file. # ---------------------------------------- sub get_columns_tab { my ($iref, $fd, $colref) = @_; my $line; while (defined ($line = <$fd>)) { # skip empty/blank/comment lines next if $line =~ /^\#/; next if $line =~ /^\s*$/; # remove newlines and carriage returns chomp ($line); $line =~ s/\r$//; @$colref = split (/\t/, $line); return @$colref; } } # ---------------------------------------- # FUNCTION: get_columns_csv IREF FD COLREF # # Get columns from a CSV file. # ---------------------------------------- sub get_columns_csv { my ($iref, $fd, $colref) = @_; my ($line, $buffer); unless ($iref->{parser}) { $iref->{parser} = Text::CSV_XS->new ({'binary' => 1, 'sep_char' => ','}); } while (defined ($line = <$fd>)) { if ($iref->{parser}->parse($line)) { # csv line completed, delete buffer @$colref = $iref->{parser}->fields(); $buffer = ''; return @$colref; } if (($line =~ tr/"/"/) % 2) { # odd number of quotes, try again with next line $buffer = $line; } else { $msg = "$0: $.: line not in CSV format: " . $iref->{parser}->error_input() . "\n"; die ($msg); } } } # ---------------------------------------- # FUNCTION: get_columns_xls IREF FD COLREF # # Get columns from a XLS spreadsheet. # ---------------------------------------- sub get_columns_xls { my ($iref, $fd, $colref) = @_; unless ($iref->{workbook}) { # parse the spreadsheet once $iref->{workbook} = $iref->{object}->Parse($fd); unless ($iref->{workbook}) { die "$0: couldn't parse spreadsheet\n"; } $iref->{worksheet} = $iref->{workbook}->{Worksheet}[0]; $iref->{row} = 0; } if ($iref->{row} <= $iref->{worksheet}->{MaxRow}) { @$colref = map {defined $_ ? $_->{Val} : undef} @{$iref->{worksheet}->{Cells}[$iref->{row}++]}; return @$colref; } } # ------------------------------------------------- # FUNCTION: column_names DBIF TABLE [START] # # Returns array with column names from table TABLE # using database connection DBIF. # Optional parameter START specifies column where # the array should start with. # ------------------------------------------------- sub column_names ($$) { my ($dbif, $table, $start) = @_; my ($names, $sth); $start = 0 unless $start; if (exists $fieldmap{$table}) { $names = $fieldmap{$table}; } else { $sth = $dbif -> process ("SELECT * FROM $table WHERE 0 = 1"); $names = $fieldmap{$table} = $sth -> {NAME}; $sth -> finish (); } @$names[$start .. $#$names]; } # -------------------------------------------------- # FUNCTION: key_names DBIF TABLE KEYSPEC [HASH] # # Returns array with key names for table TABLE. # Database connection DBIF may be used to # retrieve necessary information. # KEYSPEC contains desired keys, either a numeric # value or a comma-separated list of keys. # If HASH is set, a mapping between key name # and position is returned. # -------------------------------------------------- sub key_names () { my ($dbif, $table, $keyspec, $hash) = @_; my ($numkeysleft, $i); my @columns = column_names ($dbif, $table); my (@keys, %kmap); $keyspec =~ s/^\s+//; $keyspec =~ s/\s+$//; if ($keyspec =~ /^\d+$/) { # # passed keys are numeric, figure out the names # $numkeysleft = $keyspec; for ($i = 0; $i < $numkeysleft && $i < @columns; $i++) { if (keys %usecol) { # note: we do not check the actual value !! if ($colflag && ! exists $usecol{$columns[$i]}) { $numkeysleft++; next; } if (! $colflag && exists $usecol{$columns[$i]}) { $numkeysleft++; next; } } if ($hash) { $kmap{$columns[$i]} = $i; } else { push (@keys, $columns[$i]); } } } else { # # key names are passed explicitly # my %colmap; for ($i = 0; $i < @columns; $i++) { $colmap{$columns[$i]} = $i; } for (split (/\s*,\s*/, $keyspec)) { # sanity check unless (exists $colmap{$_}) { die "$0: key \"$_\" appears not in column list\n"; } if ($hash) { $kmap{$_} = $colmap{$_}; } else { push (@keys, $_); } } } return $hash ? %kmap : @keys; } # --------------------------------------------------------- # FUNCTION: filter_input ROUTINE TABLE FIELDNAMES VALREF # # Filters data input with ROUTINE. Produces first a mapping # between FIELDNAMES and the data pointed to by VALREF # and passes the table name TABLE and the mapping to the # ROUTINE. # --------------------------------------------------------- sub filter_input { my ($routine, $table, $fieldnames, $valref) = @_; my (%colmap, $ret); # produce mapping for (my $i = 0; $i <= $#$fieldnames; $i++) { $colmap{$$fieldnames[$i]} = $$valref[$i]; } # apply filter routine $ret = &$routine ($table, \%colmap); # write new values for (my $i = 0; $i <= $#$fieldnames; $i++) { $$valref[$i] = $colmap{$$fieldnames[$i]}; } $ret; } # ------------------------------------ # FUNCTION: prefix # # Generates prefix for error messages. # ------------------------------------ sub prefix { my @frags = ($0); if ($.) { if ($opts{'file'}) { push (@frags, $opts{'file'}); } push (@frags, $.); } join (': ', @frags, ''); } # ----------------------------------- # FUNCTION: fatal # # Error handler called by DBIx::Easy. # ----------------------------------- sub fatal { my ($statement, $err, $msg) = @_; my $pwd; my $prefix = prefix (); if ($dbif->is_auth_error ($err)) { unless ($pwdused) { print "We need a password.\n"; $pwd = querypwd(); $pwdused = 1; # retry the connection if (length ($pwd)) { $dbif = new DBIx::Easy ($driver, $database, $user, $pwd); $dbif -> install_handler (\&fatal); $dbif -> connect (); return; } else { die ("$prefix$statement: $msg\n"); } } } die ("$prefix$statement: $msg\n"); } # ---------------------------- # FUNCTION: querypwd # # Queries user for a password. # ---------------------------- sub querypwd () { my $pwd; print "Password: "; ReadMode ('noecho'); # turn echo off $pwd = ReadLine (0); ReadMode ('restore'); # restore terminal print "\n"; chomp ($pwd); $pwd; } # script documentation (POD style) =head1 NAME dbs_update - Update SQL Databases =head1 DESCRIPTION dbs_update is an utility to update SQL databases from text files. =head2 FORMAT OF THE TEXT FILES dbs_update assumes that each line of the input contains a data record and that the field within the records are separated by tabulators. You can tell dbs_update about the input format with the B<--format> option. The first field of the data record is used as table specification. These consists of the table name and optionally the index of starting column, separated by a dot. Alternatively dbs_update can read the column names from the first line of input (see the B<-h>/B<--headline> option). These can even be aliases for the real column names (see the B<-m>/B<--map> option). =head1 COMMAND LINE PARAMETERS Required command line parameters are the DBI driver (C for Postgres or C for MySQL) and the database name. The third parameter is optionally and specifies the database user and/or the host where the database resides (C, C or C<@linuxia.de>). =head1 OPTIONS =head2 B<--cleanse> I all records which remain unaffected from the update process. The same result as deleting all records from the table first and then running dbs_update, but the table is not empty in the meantime. =head2 B<-c> I, B<--columns>=I Update only the table columns given by the I parameters. To exclude columns from the update prepend C or C<^> to the parameters. =head2 B<--rows>=I Update only the input rows given by the I parameters. The first row is 1 where headlines doesn't count. To exclude rows from the update prepend C or C<^> to the parameters. =head2 B<-f> I, B<--file>=I Reads records from file I instead of from standard input. =head2 B<--format>=I Assumes I as format for the input. Only B can be specified for now, default is B. The default field separator for B is a comma, you may change this by appending the separator to the format. =head2 B<-h>, B<--headline> Reads the column names from the first line of the input instead of dedicting them from the database layout. Requires the B<-t/--table> option. =head2 B<-i>, B<--insert-only> Insert new database entries only, skip others. =head2 B<-k> I, B<-k> I, B<--keys>=I, B<--keys>=I Specifies the keys for the table(s) either as the number of columns used as keys or by specifying them explicitly as comma separated arguments to the option. This is used for the detection of existing records. =head2 B<-m> I, B<--map>=I Maps the names found in the first line of input to the actual column names in the database. The alias and the column name are separated with C<=> signs and the different entries are separated by C<;> signs, e.g. C. =head2 B<--map-filter>=I Applies a filter to the column names read from the input file. Currently there is only the C filter available. =head2 B<--match-sql>=I Updates only records where the value of the column I is in the result set of the SQL statement I, e.g. C. =head2 B<-o>, B<--update-only> Updates existing database entries only, stops if it detects new ones. =head2 B<-r> I, B<--routine>=I Applies I to any data record. I must be a subroutine. dbs_update passes the table name and a hash reference to this subroutine. The keys of the hash are the column names and the values are the corresponding field values. If the return value of I is not a truth value, the data record will be skipped. C =head2 B<--skipbadlines> Lines not matching the assumed format are ignored. Without this option, dbs_update simply stops. =head2 B<-t> I
, B<--table>=I
Uses I
as table name for all records instead of the first field name. =head1 AUTHOR Stefan Hornburg (Racke), racke@linuxia.de =head1 SEE ALSO perl(1), DBIx::Easy(3) =cut dbix-easy-perl-0.21.orig/scripts/dbs_dumptabdata0000755000000000000000000001410212271755224016646 0ustar #! /usr/bin/env perl # dbs_dumptabdata - creates file set with SQL table data # This script queries a SQL database and creates one file for each # table in the database containing the data of the table. # Copyright (C) 1999-2012 Stefan Hornburg # Author: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. use strict; use warnings; use DBIx::Easy; use FindBin; use Getopt::Long; use Term::ReadKey; # process commandline parameters my %opts; my $printtabopts = ''; my $whandler = $SIG{__WARN__}; $SIG{__WARN__} = sub {print STDERR "$0: @_";}; unless (GetOptions (\%opts, 'exclude-matching-tables=s', 'sort=s', 'tables|t=s', 'use-printtab')) { exit 1; } $SIG{__WARN__} = $whandler; # sanity check on commandline parameters if (exists $opts{'exclude-matching-tables'}) { eval {'' =~ /$opts{'exclude-matching-tables'}/;}; if ($@) { $@ =~ s%at /.*$%%; die "$0: $@"; } } my $dbif; my $pwdused = 0; my $pwd; my @tables; my ($driver, $database, $user) = @ARGV; $dbif = new DBIx::Easy ($driver, $database, $user); # handler for DBI error messages and missing password $dbif -> install_handler (\&fatal); # we need to explicitly establish the connection # for the case that a password is needed $dbif -> connect; # customize dump procedure my $dumpproc; if ($opts{'use-printtab'}) { # generic dump procedure $dumpproc = sub { my ($database, $table, $user) = @_; unless (defined $user) {$user = ''}; print $dbif->view ($table); } } else { # dump procedure provided by DBMS if ($driver eq 'mysql') { $dumpproc = sub { my ($database, $table) = @_; my $header; my $optstr = ''; if ($dbif -> {USER}) { $optstr = " -u " . $dbif -> {USER}; } if ($dbif -> {HOST}) { $optstr .= " -h " . $dbif -> {HOST}; } if (defined $pwd) { $optstr .= " -p"; } # -c: Use complete insert statements. open (DUMP, "mysqldump -t -c $optstr $database $table |") || die ("$0: Couldn't launch mysqldump: $!\n"); while () { if ($header) { print; next; } # skip header $header = 1 unless (/\S/); } close (DUMP) || die ("$0: mysqldump execution errors\n"); } } elsif ($driver eq 'Pg') { $dumpproc = sub { my ($database, $table) = @_; # -D => dump data as inserts with attribute names open (DUMP, "pg_dump $database -a -D -t $table |") || die ("$0: Couldn't launch pg_dump: $!\n"); while () { print; } close (DUMP) || die ("$0: pg_dump execution errors\n"); } } } if ($opts{'tables'}) { @tables = split (/,/, $opts{'tables'}); } else { @tables = $dbif->tables (); } foreach my $table (@tables) { my $outfile = lc($table) . '.sql'; # skip tables unwanted by user if (exists $opts{'exclude-matching-tables'}) { next if $table =~ /$opts{'exclude-matching-tables'}/; } # call driver dependent dump procedure and fill # output file with the results open (OUT, ">$outfile") || die ("$0: Couldn't open $outfile: $!\n"); select (OUT); &$dumpproc ($database, $table, $user); close (OUT); } # Destroy Database Object as written in manpage undef $dbif; # ----------------------------------- # FUNCTION: fatal # # Error handler called by DBIx::Easy. # ----------------------------------- sub fatal { my ($statement, $err, $msg) = @_; if ($dbif->is_auth_error ($err)) { unless ($pwdused) { print "We need a password.\n"; $pwd = querypwd(); $pwdused = 1; # retry the connection if (length ($pwd)) { $dbif = new DBIx::Easy ($driver, $database, $user, $pwd); $dbif -> install_handler (\&fatal); $dbif -> connect (); return; } else { die ("$statement.\n"); } } } die ("$statement.\n"); } # ---------------------------- # FUNCTION: querypwd # # Queries user for a password. # ---------------------------- sub querypwd () { my $pwd; print "Password: "; ReadMode ('noecho'); # turn echo off $pwd = ReadLine (0); ReadMode ('restore'); # restore terminal print "\n"; chomp ($pwd); $pwd; } =head1 NAME dbs_dumptabdata - Creates file set with SQL table data =head1 DESCRIPTION dbs_dumptabdata is an utility to create a file set with SQL table data. For each table in the database dbs_dumptabdata calls the appropriate dumper utility with the output directed to a file named I
.sql in the current directory. dbs_dumptabdata asks for a password if necessary. =head1 COMMAND LINE PARAMETERS Required command line parameters are the DBI driver (C for Postgres or C for MySQL) and the database name. The third parameter is optionally and specifies the database user and/or the host where the database resides (C, C or C<@linuxia.de>). =head1 COMMAND LINE OPTIONS =head2 B<-t> TABLE[,TABLE,...], B<--tables>=TABLE[,TABLE,...] Comma-separated list of tables to dump. =head2 B<--exclude-matching-tables>=REGEXP Excludes any table matching the regular expression REGEXP from dumping. =head2 B<--use-printtab> Uses generic dump (similar to C) instead of the dumper utility provided by the DBMS. =head1 BUGS msql is not supported. =head1 AUTHOR Stefan Hornburg (Racke), racke@linuxia.de =head1 SEE ALSO perl(1), DBIx::Easy(3) =cut dbix-easy-perl-0.21.orig/scripts/dbs_empty0000755000000000000000000001001612271755224015516 0ustar #! /usr/bin/env perl # dbs_empty - erases any data from a given SQL database # Copyright (C) 1999-2012 Stefan Hornburg # Author: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. use strict; use warnings; use DBIx::Easy; use Getopt::Long; use Term::ReadKey; # process commandline parameters my %opts; my $whandler = $SIG{__WARN__}; $SIG{__WARN__} = sub {print STDERR "$0: @_";}; unless (GetOptions (\%opts, 'exclude-matching-tables=s', 'tables|t')) { exit 1; } $SIG{__WARN__} = $whandler; # sanity check on commandline parameters if (exists $opts{'exclude-matching-tables'}) { eval {'' =~ /$opts{'exclude-matching-tables'}/;}; if ($@) { $@ =~ s%at /.*$%%; die "$0: $@"; } } my $dbif; my $pwdused = 0; my ($driver, $database, $user) = @ARGV; $dbif = new DBIx::Easy ($driver, $database, $user); # handler for DBI error messages and missing password $dbif -> install_handler (\&fatal); # we need to explicitly establish the connection # for the case that a password is needed $dbif -> connect; my %fieldmap; my ($sth, $keyfield, $update); my ($table, $key, $fieldnames, @values, $headline); my (@columns, $routine); for ($dbif->tables) { # skip tables preserved by user if (exists $opts{'exclude-matching-tables'}) { next if $table =~ /$opts{'exclude-matching-tables'}/; } if ($opts{'tables'}) { # remove table from database $dbif -> process ("DROP TABLE $_"); } else { # empty table $dbif -> process ("DELETE FROM $_ WHERE 1 = 1"); } } # ensures commit undef $dbif; # ----------------------------------- # FUNCTION: fatal # # Error handler called by DBIx::Easy. # ----------------------------------- sub fatal { my ($statement, $err, $msg) = @_; my $pwd; if ($dbif->is_auth_error ($err)) { unless ($pwdused) { print "We need a password.\n"; $pwd = querypwd(); $pwdused = 1; # retry the connection if (length ($pwd)) { $dbif = new DBIx::Easy ($driver, $database, $user, $pwd); $dbif -> install_handler (\&fatal); $dbif -> connect (); return; } else { die ("$statement: $msg.\n"); } } } die ("$statement: $msg.\n"); } # ---------------------------- # FUNCTION: querypwd # # Queries user for a password. # ---------------------------- sub querypwd () { my $pwd; print "Password: "; ReadMode ('noecho'); # turn echo off $pwd = ReadLine (0); ReadMode ('restore'); # restore terminal print "\n"; chomp ($pwd); $pwd; } # script documentation (POD style) =head1 NAME dbs_empty - Empty SQL Databases =head1 DESCRIPTION dbs_empty is an utility that B any data from a given SQL database. =head1 COMMAND LINE PARAMETERS Required command line parameters are the DBI driver (C for Postgres or C for MySQL) and the database name. The third parameter is optionally and specifies the database user and/or the host where the database resides (C, C or C<@linuxia.de>). =head1 COMMAND LINE OPTIONS =head2 B<--exclude-matching-tables>=REGEXP Excludes any table matching the regular expression REGEXP from dumping. =head2 B<-t, --tables> Remove tables from database too. =head1 AUTHOR Stefan Hornburg (Racke), racke@linuxia.de =head1 SEE ALSO perl(1), DBIx::Easy(3) =cut dbix-easy-perl-0.21.orig/scripts/dbs_printtab0000755000000000000000000000744612271755224016220 0ustar #! /usr/bin/env perl # dbs_printtab - creates file set with SQL table data # This script queries a SQL database and creates one file for each # table in the database containing the data of the table. # Copyright (C) 2000-2012 Stefan Hornburg # Author: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. use strict; use warnings; use DBIx::Easy; use Getopt::Long; use Term::ReadKey; # process commandline parameters my %opts; my $whandler = $SIG{__WARN__}; $SIG{__WARN__} = sub {print STDERR "$0: @_";}; unless (GetOptions (\%opts, 'columns|c=s', 'sort=s')) { exit 1; } $SIG{__WARN__} = $whandler; my $USAGE = < install_handler (\&fatal); # we need to explicitly establish the connection # for the case that a password is needed $dbif -> connect; # query records in table print $dbif -> view ($table, columns => $opts{'columns'}, order => $opts{'sort'}); # Destroy database object as written in manpage undef $dbif; # ----------------------------------- # FUNCTION: fatal # # Error handler called by DBIx::Easy. # ----------------------------------- sub fatal { my ($statement, $err, $msg) = @_; my $pwd; if ($dbif->is_auth_error ($err)) { unless ($pwdused) { print "We need a password.\n"; $pwd = querypwd(); $pwdused = 1; # retry the connection if (length ($pwd)) { $dbif = new DBIx::Easy ($driver, $database, $user, $pwd); $dbif -> install_handler (\&fatal); $dbif -> connect (); return; } else { die ("$statement.\n"); } } } die ("$statement.\n"); } # ---------------------------- # FUNCTION: querypwd # # Queries user for a password. # ---------------------------- sub querypwd () { my $pwd; print "Password: "; ReadMode ('noecho'); # turn echo off $pwd = ReadLine (0); ReadMode ('restore'); # restore terminal print "\n"; chomp ($pwd); $pwd; } =head1 NAME dbs_printtab - Prints SQL table data on standard output =head1 DESCRIPTION dbs_printtab is an utility to print SQL table data on standard output. dbs_printtab asks for a password if necessary. =head1 COMMAND LINE PARAMETERS Required command line parameters are the DBI driver (C for Postgres or C for MySQL), the database name and the table name. The fourth parameter is optionally and specifies the database user and/or the host where the database resides (C, C or C<@linuxia.de>). =head1 COMMAND LINE OPTIONS =head2 B<-c>=COLUMN[,COLUMN], B<--columns>=COLUMN[,COLUMN] The output is restricted to the given COLUMN arguments. =head2 B<--sort>=KEY[,KEY] The output is sorted by the KEY arguments. =head1 BUGS msql is not fully supported. =head1 AUTHOR Stefan Hornburg (Racke), racke@linuxia.de =head1 SEE ALSO perl(1), DBIx::Easy(3) =cut dbix-easy-perl-0.21.orig/scripts/dbs_dumptabstruct0000755000000000000000000001505412271755224017270 0ustar #! /usr/bin/env perl # dbs_dumptabstruct - creates file set with SQL table schemas # This script queries a SQL database and creates one file for each # table in the database containing the schema of the table. # Copyright (C) 1999-2012 Stefan Hornburg # Author: Stefan Hornburg (Racke) # Maintainer: Stefan Hornburg (Racke) # Version: 0.19 # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any # later version. # This file is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this file; see the file COPYING. If not, write to the Free # Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. use strict; use warnings; use DBIx::Easy; use Getopt::Long; use Term::ReadKey; # process commandline parameters my %opts; my $whandler = $SIG{__WARN__}; $SIG{__WARN__} = sub {print STDERR "$0: @_";}; unless (GetOptions (\%opts, 'dump-options|d|o=s', 'exclude-matching-tables=s', 'filter', 'pipe|p', 'tables|t=s')) { exit 1; } $SIG{__WARN__} = $whandler; # sanity check on commandline parameters if (exists $opts{'exclude-matching-tables'}) { eval {'' =~ /$opts{'exclude-matching-tables'}/;}; if ($@) { $@ =~ s%at /.*$%%; die "$0: $@"; } } my $dbif; my $pwdused = 0; my $pwd; my @tables; my ($driver, $database, $user) = @ARGV; $dbif = new DBIx::Easy ($driver, $database, $user); # handler for DBI error messages and missing password $dbif -> install_handler (\&fatal); # we need to explicitly establish the connection # for the case that a password is needed $dbif -> connect; # dump procedures for the different drivers my $dumpproc; if ($driver eq 'mysql') { if ($opts{filter}) { $dumpproc = \&kvfilter; } else { $dumpproc = sub { my ($database, $table) = @_; my $header; my $optstr = ''; if ($dbif -> {USER}) { $optstr = " -u " . $dbif -> {USER}; } if ($dbif -> {HOST}) { $optstr .= " -h " . $dbif -> {HOST}; } if (defined $pwd) { $optstr .= " -p"; } if ($opts{'dump-options'}) { $optstr .= " $opts{'dump-options'}"; } open (DUMP, qq{mysqldump -d $optstr "$database" "$table" |}) || die ("$0: Couldn't launch mysqldump: $!\n"); while () { if ($header) { print; next; } # skip header $header = 1 unless (/\S/); } close (DUMP) || die ("$0: mysqldump execution errors\n"); } } } elsif ($driver eq 'Pg') { if ($opts{filter}) { die "$0: --filter option not supported for PostgreSQL databases\n"; } else { $dumpproc = sub { my ($database, $table) = @_; my $optstr = ''; if ($opts{'dump-options'}) { $optstr = " $opts{'dump-options'}"; } open (DUMP, qq{pg_dump "$database" -x -s $optstr -t "$table" |}) || die ("$0: Couldn't launch pg_dump: $!\n"); while () { print; } close (DUMP) || die ("$0: pg_dump execution errors\n"); } } } if ($opts{'tables'}) { @tables = split (/,/, $opts{'tables'}); } else { @tables = ($dbif->tables(), $dbif->sequences()); } foreach my $table (@tables) { my $outfile = lc($table) . '.sql'; my $regexp; # skip tables unwanted by user if (exists $opts{'exclude-matching-tables'}) { next if $table =~ /$opts{'exclude-matching-tables'}/; } # call driver dependent dump procedure and fill # output file with the results unless ($opts{'pipe'}) { open (OUT, ">$outfile") || die ("$0: Couldn't open $outfile: $!\n"); select (OUT); } &$dumpproc ($database, $table); unless ($opts{'pipe'}) { close (OUT); } select (STDOUT); } # Destroy Database object as written in manpage undef $dbif; # ----------------------------------- # FUNCTION: fatal # # Error handler called by DBIx::Easy. # ----------------------------------- sub fatal { my ($statement, $err, $msg) = @_; if ($dbif->is_auth_error ($err)) { unless ($pwdused) { print "We need a password.\n"; $pwd = querypwd(); $pwdused = 1; # retry the connection if (length ($pwd)) { $dbif = new DBIx::Easy ($driver, $database, $user, $pwd); $dbif -> install_handler (\&fatal); $dbif -> connect (); return; } else { die ("$statement.\n"); } } } die ("$statement.\n"); } # ---------------------------- # FUNCTION: querypwd # # Queries user for a password. # ---------------------------- sub querypwd () { my $pwd; print "Password: "; ReadMode ('noecho'); # turn echo off $pwd = ReadLine (0); ReadMode ('restore'); # restore terminal print "\n"; chomp ($pwd); $pwd; } # FUNCTION: kvfilter sub kvfilter { my ($database, $table) = @_; my $dbif = new DBIx::Easy ('mysql', $database); my $sth = $dbif->process("describe $table"); my $row; print join ("\t", @{$sth->{NAME}}), "\n"; while ($row = $sth->fetch()) { print join("\t", map {$_ || ''} @$row), "\n"; } } =head1 NAME dbs_dumptabstruct - Creates file set with SQL table schemas =head1 DESCRIPTION dbs_dumptabstruct is an utility to create a file set with SQL table schemas. For each table in the database dbs_dumptabstruct calls the appropriate dumper utility with the output directed to a file named I
.sql in the current directory. dbs_dumptabstruct asks for a password if necessary. =head1 COMMAND LINE PARAMETERS Required command line parameters are the DBI driver (C for Postgres or C for MySQL) and the database name. The third parameter is optionally and specifies the database user and/or the host where the database resides (C, C or C<@linuxia.de>). =head1 COMMAND LINE OPTIONS =head2 B<-d>=OPTIONS, B<-o>=OPTIONS, B<--dump-options>=OPTIONS Pass options to the dumper utility, e.g. C<--compatible=mysql40>. =head2 B<-p>, B<--pipe> Prints the table dumps to standard output. =head2 B<-t> TABLE[,TABLE,...], B<--tables>=TABLE[,TABLE,...] Comma-separated list of tables to dump. =head2 B<--exclude-matching-tables>=REGEXP Excludes any table matching the regular expression REGEXP from dumping. =head1 BUGS Only mysql and Pg drivers are supported. =head1 AUTHOR Stefan Hornburg (Racke), racke@linuxia.de =head1 VERSION 0.17 =head1 SEE ALSO perl(1), DBIx::Easy(3) =cut