SQL-Translator-1.60/0000755000000000000000000000000013473557373014245 5ustar00rootroot00000000000000SQL-Translator-1.60/Makefile.PL0000644000000000000000000000746613461332374016221 0ustar00rootroot00000000000000use strict; use warnings; use ExtUtils::MakeMaker; use File::ShareDir::Install; do './maint/Makefile.PL.include' or die $@ unless -f 'META.yml'; my $eumm_version = eval $ExtUtils::MakeMaker::VERSION; my %eumm_args = ( NAME => 'SQL::Translator', ABSTRACT => 'SQL DDL transformations and more', VERSION_FROM => 'lib/SQL/Translator.pm', LICENSE => 'perl', MIN_PERL_VERSION => '5.008001', CONFIGURE_REQUIRES => { 'ExtUtils::MakeMaker' => '6.54', # to deal with x.y.z versions properly 'File::ShareDir::Install' => '0', }, TEST_REQUIRES => { 'JSON::MaybeXS' => '1.003003', 'YAML' => '0.66', 'XML::Writer' => '0.500', 'Test::More' => '0.88', 'Test::Differences' => '0', 'Test::Exception' => '0.31', 'Text::ParseWords' => '0', }, PREREQ_PM => { 'Digest::SHA' => '0', 'Carp::Clan' => '0', 'Parse::RecDescent' => '1.967009', 'DBI' => '1.54', 'File::ShareDir' => '1.0', 'Moo' => '1.000003', 'Package::Variant' => '1.001001', 'Sub::Quote' => '0', 'Try::Tiny' => '0.04', 'Scalar::Util' => '0', }, realclean => { FILES => 't/data/roundtrip_autogen.yaml' }, EXE_FILES => [ qw| script/sqlt-diagram script/sqlt-diff script/sqlt-diff-old script/sqlt-dumper script/sqlt-graph script/sqlt | ], META_MERGE => { "meta-spec" => { version => 2 }, dynamic_config => 0, resources => { bugtracker => { web => 'https://rt.cpan.org/NoAuth/Bugs.html?Dist=SQL-Translator', mailto => 'bug-SQL-Translator@rt.cpan.org', }, repository => { type => 'git', url => 'git@github.com/dbsrgits/sql-translator.git', web => 'https://github.com/dbsrgits/sql-translator/', }, license => [ 'http://dev.perl.org/licenses/' ], x_IRC => 'irc://irc.perl.org/#sql-translator', x_Ratings => 'http://cpanratings.perl.org/d/SQL-Translator', }, x_authority => 'cpan:JROBINSON', no_index => { directory => [qw(maint share xt)], }, prereqs => { runtime => { recommends => { 'Template' => '2.20', 'GD' => '0', 'GraphViz' => '0', 'Graph::Directed' => '0', 'Spreadsheet::ParseExcel' => '0.41', 'Text::RecordParser' => '0.02', 'XML::LibXML' => '1.69', }, }, develop => { requires => { 'Template' => '2.20', 'GD' => '0', 'GraphViz' => '0', 'Graph::Directed' => '0', 'Spreadsheet::ParseExcel' => '0.41', 'Text::RecordParser' => '0.02', 'XML::LibXML' => '1.69', 'Test::EOL' => '1.1', 'Test::NoTabs' => '1.1', 'Software::LicenseUtils' => '0', # for Distar }, }, }, }, ); install_share 'share'; sub _move_to { my ($hash, $fromkey, $tokey) = @_; $hash->{$tokey} = { %{ $hash->{$tokey} || {} }, %{ delete($hash->{$fromkey}) || {} }, }; } delete $eumm_args{META_MERGE} if $eumm_version < 6.45_01; delete $eumm_args{CONFIGURE_REQUIRES} if $eumm_version < 6.51_03; # too late to use so just delete _move_to(\%eumm_args, 'TEST_REQUIRES', 'BUILD_REQUIRES') if $eumm_version < 6.63_03; _move_to(\%eumm_args, 'BUILD_REQUIRES', 'PREREQ_PM') if $eumm_version < 6.55_01; $eumm_args{NO_MYMETA} = 1 if $eumm_version >= 6.57_02 and $eumm_version < 6.57_07; WriteMakefile(%eumm_args); package MY; use File::ShareDir::Install qw(postamble); SQL-Translator-1.60/script/0000755000000000000000000000000013473557372015550 5ustar00rootroot00000000000000SQL-Translator-1.60/script/sqlt-diff-old0000755000000000000000000004525613473550070020143 0ustar00rootroot00000000000000#!/usr/bin/env perl # vim: set ft=perl: # ------------------------------------------------------------------- # Copyright (C) 2002-2009 The SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt-diff - find the differences b/w two schemas =head1 SYNOPSIS For help: sqlt-diff -h|--help For a list of all valid parsers: sqlt -l|--list To diff two schemas: sqlt-diff [options] file_name1=parser file_name2=parser Options: -d|--debug Show debugging info =head1 DESCRIPTION sqlt-diff is a utility for creating a file of SQL commands necessary to transform the first schema provided to the second. While not yet exhaustive in its ability to mutate the entire schema, it will report the following =over =item * New tables Using the Producer class of the target (second) schema, any tables missing in the first schema will be generated in their entirety (fields, constraints, indices). =item * Missing/altered fields Any fields missing or altered between the two schemas will be reported as: ALTER TABLE [DROP ] [CHANGE ()] ; =item * Missing/altered indices Any indices missing or of a different type or on different fields will be indicated. Indices that should be dropped will be reported as such: DROP INDEX ON ; An index of a different type or on different fields will be reported as a new index as such: CREATE [] INDEX [] ON ( [,] ) ; =back "ALTER/DROP TABLE" and "CREATE INDEX" statements B generated by the Producer, unfortunately, and may require massaging before being passed to your target database. =cut # ------------------------------------------------------------------- use strict; use warnings; use Pod::Usage; use Data::Dumper; use SQL::Translator; use SQL::Translator::Schema::Constants; use vars qw( $VERSION ); $VERSION = '1.60'; my ( @input, $list, $help, $debug ); for my $arg ( @ARGV ) { if ( $arg =~ m/^-?-l(ist)?$/ ) { $list = 1; } elsif ( $arg =~ m/^-?-h(elp)?$/ ) { $help = 1; } elsif ( $arg =~ m/^-?-d(ebug)?$/ ) { $debug = 1; } elsif ( $arg =~ m/^([^=]+)=(.+)$/ ) { push @input, { file => $1, parser => $2 }; } else { pod2usage( msg => "Unknown argument '$arg'" ); } } pod2usage(1) if $help; pod2usage('Please specify only two schemas to diff') if scalar @input > 2; pod2usage('No input') if !@input; if ( my $interactive = -t STDIN && -t STDOUT ) { print STDERR join("\n", "sqlt-diff-old is deprecated. Please sqlt-diff, and tell us ", "about any problems or patch SQL::Translator::Diff", '', ); } my $tr = SQL::Translator->new; my @parsers = $tr->list_parsers; my %valid_parsers = map { $_, 1 } @parsers; if ( $list ) { print "\nParsers:\n", map { "\t$_\n" } sort @parsers; print "\n"; exit(0); } pod2usage( msg => 'Too many file args' ) if @input > 2; my ( $source_schema, $source_db, $target_schema, $target_db ); my $i = 2; for my $in ( @input ) { my $file = $in->{'file'}; my $parser = $in->{'parser'}; die "Unable to read file '$file'\n" unless -r $file; die "'$parser' is an invalid parser\n" unless $valid_parsers{ $parser }; my $t = SQL::Translator->new; $t->debug( $debug ); $t->parser( $parser ) or die $tr->error; my $out = $t->translate( $file ) or die $tr->error; my $schema = $t->schema; unless ( $schema->name ) { $schema->name( $file ); } if ( $i == 1 ) { $source_schema = $schema; $source_db = $parser; } else { $target_schema = $schema; $target_db = $parser; } $i--; } my $case_insensitive = $target_db =~ /SQLServer/; my $s1_name = $source_schema->name; my $s2_name = $target_schema->name; my ( @new_tables, @diffs , @diffs_at_end); for my $t1 ( $source_schema->get_tables ) { my $t1_name = $t1->name; my $t2 = $target_schema->get_table( $t1_name, $case_insensitive ); warn "TABLE '$s1_name.$t1_name'\n" if $debug; unless ( $t2 ) { warn "Couldn't find table '$s1_name.$t1_name' in '$s2_name'\n" if $debug; if ( $target_db =~ /(SQLServer|Oracle)/ ) { for my $constraint ( $t1->get_constraints ) { next if $constraint->type ne FOREIGN_KEY; push @diffs_at_end, "ALTER TABLE $t1_name ADD ". constraint_to_string($constraint, $source_schema).";"; $t1->drop_constraint($constraint); } } push @new_tables, $t1; next; } # Go through our options my $options_different = 0; my %checkedOptions; OPTION: for my $t1_option_ref ( $t1->options ) { my($key1, $value1) = %{$t1_option_ref}; for my $t2_option_ref ( $t2->options ) { my($key2, $value2) = %{$t2_option_ref}; if ( $key1 eq $key2 ) { if ( defined $value1 != defined $value2 ) { $options_different = 1; last OPTION; } if ( defined $value1 && $value1 ne $value2 ) { $options_different = 1; last OPTION; } $checkedOptions{$key1} = 1; next OPTION; } } $options_different = 1; last OPTION; } # Go through the other table's options unless ( $options_different ) { for my $t2_option_ref ( $t2->options ) { my($key, $value) = %{$t2_option_ref}; next if $checkedOptions{$key}; $options_different = 1; last; } } # If there's a difference, just re-set all the options my @diffs_table_options; if ( $options_different ) { my @options = (); foreach my $option_ref ( $t1->options ) { my($key, $value) = %{$option_ref}; push(@options, defined $value ? "$key=$value" : $key); } my $options = join(' ', @options); @diffs_table_options = ("ALTER TABLE $t1_name $options;"); } my $t2_name = $t2->name; my(@diffs_table_adds, @diffs_table_changes); for my $t1_field ( $t1->get_fields ) { my $f1_type = $t1_field->data_type; my $f1_size = $t1_field->size; my $f1_name = $t1_field->name; my $f1_nullable = $t1_field->is_nullable; my $f1_default = $t1_field->default_value; my $f1_auto_inc = $t1_field->is_auto_increment; my $t2_field = $t2->get_field( $f1_name, $case_insensitive ); my $f1_full_name = "$s1_name.$t1_name.$t1_name"; warn "FIELD '$f1_full_name'\n" if $debug; my $f2_full_name = "$s2_name.$t2_name.$f1_name"; unless ( $t2_field ) { warn "Couldn't find field '$f2_full_name' in '$t2_name'\n" if $debug; my $temp_default_value = 0; if ( $target_db =~ /SQLServer/ && !$f1_nullable && !defined $f1_default ) { # SQL Server doesn't allow adding non-nullable, non-default columns # so we add it with a default value, then remove the default value $temp_default_value = 1; my(@numeric_types) = qw(decimal numeric float real int bigint smallint tinyint); $f1_default = grep($_ eq $f1_type, @numeric_types) ? 0 : ''; } push @diffs_table_adds, sprintf( "ALTER TABLE %s ADD %s%s %s%s%s%s%s%s;", $t1_name, $target_db =~ /Oracle/ ? '(' : '', $f1_name, $f1_type, ($f1_size && $f1_type !~ /(blob|text)$/) ? "($f1_size)" : '', !defined $f1_default ? '' : uc $f1_default eq 'NULL' ? ' DEFAULT NULL' : uc $f1_default eq 'CURRENT_TIMESTAMP' ? ' DEFAULT CURRENT_TIMESTAMP' : " DEFAULT '$f1_default'", $f1_nullable ? '' : ' NOT NULL', $f1_auto_inc ? ' AUTO_INCREMENT' : '', $target_db =~ /Oracle/ ? ')' : '', ); if ( $temp_default_value ) { undef $f1_default; push @diffs_table_adds, sprintf( <data_type; my $f2_size = $t2_field->size || ''; my $f2_nullable = $t2_field->is_nullable; my $f2_default = $t2_field->default_value; my $f2_auto_inc = $t2_field->is_auto_increment; if ( !$t1_field->equals($t2_field, $case_insensitive) ) { # SQLServer timestamp fields can't be altered, so we drop and add instead if ( $target_db =~ /SQLServer/ && $f2_type eq "timestamp" ) { push @diffs_table_changes, "ALTER TABLE $t1_name DROP COLUMN $f1_name;"; push @diffs_table_changes, sprintf( "ALTER TABLE %s ADD %s%s %s%s%s%s%s%s;", $t1_name, $target_db =~ /Oracle/ ? '(' : '', $f1_name, $f1_type, ($f1_size && $f1_type !~ /(blob|text)$/) ? "($f1_size)" : '', !defined $f1_default ? '' : uc $f1_default eq 'NULL' ? ' DEFAULT NULL' : uc $f1_default eq 'CURRENT_TIMESTAMP' ? ' DEFAULT CURRENT_TIMESTAMP' : " DEFAULT '$f1_default'", $f1_nullable ? '' : ' NOT NULL', $f1_auto_inc ? ' AUTO_INCREMENT' : '', $target_db =~ /Oracle/ ? ')' : '', ); next; } my $changeText = $target_db =~ /SQLServer/ ? 'ALTER COLUMN' : $target_db =~ /Oracle/ ? 'MODIFY (' : 'CHANGE'; my $nullText = $f1_nullable ? '' : ' NOT NULL'; $nullText = '' if $target_db =~ /Oracle/ && $f1_nullable == $f2_nullable; push @diffs_table_changes, sprintf( "ALTER TABLE %s %s %s%s %s%s%s%s%s%s;", $t1_name, $changeText, $f1_name, $target_db =~ /MySQL/ ? " $f1_name" : '', $f1_type, ($f1_size && $f1_type !~ /(blob|text)$/) ? "($f1_size)" : '', $nullText, !defined $f1_default || $target_db =~ /SQLServer/ ? '' : uc $f1_default eq 'NULL' ? ' DEFAULT NULL' : uc $f1_default eq 'CURRENT_TIMESTAMP' ? ' DEFAULT CURRENT_TIMESTAMP' : " DEFAULT '$f1_default'", $f1_auto_inc ? ' AUTO_INCREMENT' : '', $target_db =~ /Oracle/ ? ')' : '', ); if ( defined $f1_default && $target_db =~ /SQLServer/ ) { # Adding a column with a default value for SQL Server means adding a # constraint and setting existing NULLs to the default value push @diffs_table_changes, sprintf( "ALTER TABLE %s ADD CONSTRAINT DF_%s_%s %s FOR %s;", $t1_name, $t1_name, $f1_name, uc $f1_default eq 'NULL' ? 'DEFAULT NULL' : uc $f1_default eq 'CURRENT_TIMESTAMP' ? 'DEFAULT CURRENT_TIMESTAMP' : "DEFAULT '$f1_default'", $f1_name, ); push @diffs_table_changes, sprintf( "UPDATE %s SET %s = %s WHERE %s IS NULL;", $t1_name, $f1_name, uc $f1_default eq 'NULL' ? 'NULL' : uc $f1_default eq 'CURRENT_TIMESTAMP' ? 'CURRENT_TIMESTAMP' : "'$f1_default'", $f1_name, ); } } } my(%checked_indices, @diffs_index_creates, @diffs_index_drops); INDEX: for my $i1 ( $t1->get_indices ) { for my $i2 ( $t2->get_indices ) { if ( $i1->equals($i2, $case_insensitive) ) { $checked_indices{$i2} = 1; next INDEX; } } push @diffs_index_creates, sprintf( "CREATE %sINDEX%s ON %s (%s);", $i1->type eq NORMAL ? '' : $i1->type." ", $i1->name ? " ".$i1->name : '', $t1_name, join(",", $i1->fields), ); } INDEX2: for my $i2 ( $t2->get_indices ) { next if $checked_indices{$i2}; for my $i1 ( $t1->get_indices ) { next INDEX2 if $i2->equals($i1, $case_insensitive); } $target_db =~ /SQLServer/ ? push @diffs_index_drops, "DROP INDEX $t1_name.".$i2->name.";" : push @diffs_index_drops, "DROP INDEX ".$i2->name." on $t1_name;"; } my(%checked_constraints, @diffs_constraint_drops); CONSTRAINT: for my $c1 ( $t1->get_constraints ) { next if $source_db =~ /Oracle/ && $c1->type eq UNIQUE && $c1->name =~ /^SYS_/i; for my $c2 ( $t2->get_constraints ) { if ( $c1->equals($c2, $case_insensitive) ) { $checked_constraints{$c2} = 1; next CONSTRAINT; } } push @diffs_at_end, "ALTER TABLE $t1_name ADD ". constraint_to_string($c1, $source_schema).";"; } CONSTRAINT2: for my $c2 ( $t2->get_constraints ) { next if $checked_constraints{$c2}; for my $c1 ( $t1->get_constraints ) { next CONSTRAINT2 if $c2->equals($c1, $case_insensitive); } if ( $c2->type eq UNIQUE ) { push @diffs_constraint_drops, "ALTER TABLE $t1_name DROP INDEX ". $c2->name.";"; } elsif ( $target_db =~ /SQLServer/ ) { push @diffs_constraint_drops, "ALTER TABLE $t1_name DROP ".$c2->name.";"; } else { push @diffs_constraint_drops, "ALTER TABLE $t1_name DROP ".$c2->type. ($c2->type eq FOREIGN_KEY ? " ".$c2->name : '').";"; } } push @diffs, @diffs_index_drops, @diffs_constraint_drops, @diffs_table_options, @diffs_table_adds, @diffs_table_changes, @diffs_index_creates; } for my $t2 ( $target_schema->get_tables ) { my $t2_name = $t2->name; my $t1 = $source_schema->get_table( $t2_name, $target_db =~ /SQLServer/ ); unless ( $t1 ) { if ( $target_db =~ /SQLServer/ ) { for my $constraint ( $t2->get_constraints ) { next if $constraint->type eq PRIMARY_KEY; push @diffs, "ALTER TABLE $t2_name DROP ".$constraint->name.";"; } } push @diffs_at_end, "DROP TABLE $t2_name;"; next; } for my $t2_field ( $t2->get_fields ) { my $f2_name = $t2_field->name; my $t1_field = $t1->get_field( $f2_name ); unless ( $t1_field ) { my $modifier = $target_db =~ /SQLServer/ ? "COLUMN " : ''; push @diffs, "ALTER TABLE $t2_name DROP $modifier$f2_name;"; } } } if ( @new_tables ) { my $dummy_tr = SQL::Translator->new; $dummy_tr->schema->add_table( $_ ) for @new_tables; my $producer = $dummy_tr->producer( $target_db ); unshift @diffs, $producer->( $dummy_tr ); } push(@diffs, @diffs_at_end); if ( @diffs ) { if ( $source_db !~ /^(MySQL|SQLServer|Oracle)$/ ) { unshift(@diffs, "-- Target database $target_db is untested/unsupported!!!"); } } if ( @diffs ) { print join( "\n", "-- Convert schema '$s2_name' to '$s1_name':\n", @diffs, "\n" ); exit(1); } else { print "There were no differences.\n"; } sub constraint_to_string { my $c = shift; my $schema = shift or die "No schema given"; my @fields = $c->field_names or return ''; if ( $c->type eq PRIMARY_KEY ) { if ( $target_db =~ /Oracle/ ) { return (defined $c->name ? 'CONSTRAINT '.$c->name.' ' : '') . 'PRIMARY KEY (' . join(', ', @fields). ')'; } else { return 'PRIMARY KEY (' . join(', ', @fields). ')'; } } elsif ( $c->type eq UNIQUE ) { if ( $target_db =~ /Oracle/ ) { return (defined $c->name ? 'CONSTRAINT '.$c->name.' ' : '') . 'UNIQUE (' . join(', ', @fields). ')'; } else { return 'UNIQUE '. (defined $c->name ? $c->name.' ' : ''). '(' . join(', ', @fields). ')'; } } elsif ( $c->type eq FOREIGN_KEY ) { my $def = join(' ', map { $_ || () } 'CONSTRAINT', $c->name, 'FOREIGN KEY' ); $def .= ' (' . join( ', ', @fields ) . ')'; $def .= ' REFERENCES ' . $c->reference_table; my @rfields = map { $_ || () } $c->reference_fields; unless ( @rfields ) { my $rtable_name = $c->reference_table; if ( my $ref_table = $schema->get_table( $rtable_name ) ) { push @rfields, $ref_table->primary_key; } else { warn "Can't find reference table '$rtable_name' " . "in schema\n"; } } if ( @rfields ) { $def .= ' (' . join( ', ', @rfields ) . ')'; } else { warn "FK constraint on " . 'some table' . '.' . join('', @fields) . " has no reference fields\n"; } if ( $c->match_type ) { $def .= ' MATCH ' . ( $c->match_type =~ /full/i ) ? 'FULL' : 'PARTIAL'; } if ( $c->on_delete ) { $def .= ' ON DELETE '.join( ' ', $c->on_delete ); } if ( $c->on_update ) { $def .= ' ON UPDATE '.join( ' ', $c->on_update ); } return $def; } } # ------------------------------------------------------------------- # Bring out number weight & measure in a year of dearth. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 SEE ALSO SQL::Translator, L. =cut SQL-Translator-1.60/script/sqlt-diff0000755000000000000000000001503613473550070017360 0ustar00rootroot00000000000000#!/usr/bin/env perl # vim: set ft=perl: # ------------------------------------------------------------------- # Copyright (C) 2002-2009 The SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MAb # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt-diff - find the differences b/w two schemas =head1 SYNOPSIS For help: sqlt-diff -h|--help For a list of all valid parsers: sqlt -l|--list To diff two schemas: sqlt-diff [options] file_name1=parser1 file_name2=parser2 Options: -d|--debug Show debugging info -t|--trace Turn on tracing for Parse::RecDescent -c|--case-insensitive Compare tables/columns case-insensitively --ignore-index-names Ignore index name differences --ignore-constraint-names Ignore constraint name differences --mysql_parser_version=<#####> Specify a target MySQL parser version for dealing with /*! comments --output-db= This Producer will be used instead of one corresponding to parser1 to format output for new tables --ignore-view-sql Ignore view SQL differences --ignore-proc-sql Ignore procedure SQL differences --no-batch-alters Do not clump multile alters to the same table into a single ALTER TABLE statement where possible. --quote= Use to quote all table and field names in statements =head1 DESCRIPTION sqlt-diff is a utility for creating a file of SQL commands necessary to transform the first schema provided to the second. While not yet exhaustive in its ability to mutate the entire schema, it will report the following =over =item * New tables Using the Producer class of the target (second) schema, any tables missing in the first schema will be generated in their entirety (fields, constraints, indices). =item * Missing/altered fields Any fields missing or altered between the two schemas will be reported as: ALTER TABLE [DROP ] [CHANGE ()] ; =item * Missing/altered indices Any indices missing or of a different type or on different fields will be indicated. Indices that should be dropped will be reported as such: DROP INDEX ON ; An index of a different type or on different fields will be reported as a new index as such: CREATE [] INDEX [] ON ( [,] ) ; =back ALTER, CREATE, DROP statements are created by SQL::Translator::Producer::*, see there for support/problems. Currently (v0.0900), only MySQL is supported by this code. =cut # ------------------------------------------------------------------- use strict; use warnings; use Pod::Usage; use Data::Dumper; use Getopt::Long; use SQL::Translator; use SQL::Translator::Diff; use SQL::Translator::Schema::Constants; use vars qw( $VERSION ); $VERSION = '1.60'; my ( @input, $list, $help, $debug, $trace, $caseopt, $ignore_index_names, $ignore_constraint_names, $output_db, $mysql_parser_version, $ignore_view_sql, $ignore_proc_sql, $no_batch_alters, $quote ); GetOptions( 'l|list' => \$list, 'h|help' => \$help, 'd|debug' => \$debug, 't|trace' => \$trace, 'c|case-insensitive' => \$caseopt, 'ignore-index-names' => \$ignore_index_names, 'ignore-constraint-names' => \$ignore_constraint_names, 'mysql_parser_version:s' => \$mysql_parser_version, 'output-db:s' => \$output_db, 'ignore-view-sql' => \$ignore_view_sql, 'ignore-proc-sql' => \$ignore_proc_sql, 'quote:s' => \$quote, 'no-batch-alters' => \$no_batch_alters, ) or pod2usage(2); for my $arg ( @ARGV ) { if ( $arg =~ m/^([^=]+)=(.+)$/ ) { push @input, { file => $1, parser => $2 }; } } my $tr = SQL::Translator->new; my @parsers = $tr->list_parsers; my %valid_parsers = map { $_, 1 } @parsers; if ( $list ) { print "\nParsers:\n", map { "\t$_\n" } sort @parsers; print "\n"; exit(0); } pod2usage(1) if $help || !@input; pod2usage(msg => 'Please specify two schemas to diff') if scalar @input != 2; my ( $source_schema, $source_db, $target_schema, $target_db ) = map { my $file = $_->{'file'}; my $parser = $_->{'parser'}; die "Unable to read file '$file'\n" unless -r $file; die "'$parser' is an invalid parser\n" unless $valid_parsers{ $parser }; my $t = SQL::Translator->new(parser_args => { mysql_parser_version => $mysql_parser_version }); $t->debug( $debug ); $t->trace( $trace ); $t->parser( $parser ) or die $tr->error; my $out = $t->translate( $file ) or die $tr->error; my $schema = $t->schema; unless ( $schema->name ) { $schema->name( $file ); } ($schema, $parser); } @input; my $result = SQL::Translator::Diff::schema_diff( $source_schema, $source_db, $target_schema, $target_db, { caseopt => $caseopt || 0, ignore_index_names => $ignore_index_names || 0, ignore_constraint_names => $ignore_constraint_names || 0, ignore_view_sql => $ignore_view_sql || 0, ignore_proc_sql => $ignore_proc_sql || 0, output_db => $output_db, no_batch_alters => $no_batch_alters || 0, debug => $debug || 0, trace => $trace || 0, producer_args => { quote_table_names => $quote || '', quote_field_names => $quote || '', }, } ); if($result) { print $result; } else { print "No differences found."; } # ------------------------------------------------------------------- # Bring out number weight & measure in a year of dearth. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 SEE ALSO SQL::Translator, L. =cut SQL-Translator-1.60/script/sqlt-graph0000755000000000000000000002006613473550070017550 0ustar00rootroot00000000000000#!/usr/bin/env perl # ------------------------------------------------------------------- # Copyright (C) 2002-2009 SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt-graph - Automatically create a graph from a database schema =head1 SYNOPSIS ./sqlt-graph -d|--db|-f|--from=db_parser [options] schema.sql Options: -l|--layout Layout schema for GraphViz ("dot," "neato," "twopi"; default "dot") -n|--node-shape Shape of the nodes ("record," "plaintext," "ellipse," "circle," "egg," "triangle," "box," "diamond," "trapezium," "parallelogram," "house," "hexagon," "octagon," default "record") -o|--output Output file name (default STDOUT) -t|--output-type Output file type ("canon", "text," "ps," "hpgl," "pcl," "mif," "pic," "gd," "gd2," "gif," "jpeg," "png," "wbmp," "cmap," "ismap," "imap," "vrml," "vtx," "mp," "fig," "svg," "plain," default "png") -c|--color Add colors --cluster Cluster tables --no-fields Don't show field names --height Image height (in inches, default "11", set to "0" to undefine) --width Image width (in inches, default "8.5", set to "0" to undefine) --fontsize custom font size for node and edge labels --fontname name of custom font (or full path to font file) for node, edge, and graph labels --nodeattr attribute name and value (in key=val syntax) for nodes; this option may be repeated to specify multiple node attributes --edgeattr same as --nodeattr, but for edge attributes --graphattr same as --nodeattr, but for graph attributes --natural-join Perform natural joins --natural-join-pk Perform natural joins from primary keys only --show-datatypes Show datatype of each field --show-sizes Show column sizes for VARCHAR and CHAR fields --show-constraints Show list of constraints for each field -s|--skip Fields to skip in natural joins --skip-tables Comma-separated list of table names to exclude --skip-tables-like Comma-separated list of regexen to exclude tables --debug Print debugging information --trace Print parser trace info =head1 DESCRIPTION This script will create a graph of your schema. Only the database driver argument (for SQL::Translator) is required. If no output file name is given, then image will be printed to STDOUT, so you should redirect the output into a file. The default action is to assume the presence of foreign key relationships defined via "REFERNCES" or "FOREIGN KEY" constraints on the tables. If you are parsing the schema of a file that does not have these, you will find the natural join options helpful. With natural joins, like-named fields will be considered foreign keys. This can prove too permissive, however, as you probably don't want a field called "name" to be considered a foreign key, so you could include it in the "skip" option, and all fields called "name" will be excluded from natural joins. A more efficient method, however, might be to simply deduce the foreign keys from primary keys to other fields named the same in other tables. Use the "natural-join-pk" option to achieve this. If the schema defines foreign keys, then the graph produced will be directed showing the direction of the relationship. If the foreign keys are intuited via natural joins, the graph will be undirected. Clustering of tables allows you to group and box tables according to function or domain or whatever criteria you choose. The syntax for clustering tables is: cluster1=table1,table2;cluster2=table3,table4 =cut # ------------------------------------------------------------------- use strict; use warnings; use Data::Dumper; use Getopt::Long; use GraphViz; use Pod::Usage; use SQL::Translator; use vars '$VERSION'; $VERSION = '1.60'; # # Get arguments. # my ( $layout, $node_shape, $out_file, $output_type, $db_driver, $add_color, $natural_join, $join_pk_only, $skip_fields, $show_datatypes, $show_sizes, $show_constraints, $debug, $help, $height, $width, $no_fields, $fontsize, $fontname, $skip_tables, $skip_tables_like, $cluster, $trace ); # multi-valued options: my %edgeattrs = (); my %nodeattrs = (); my %graphattrs = (); GetOptions( 'd|db|f|from=s' => \$db_driver, 'o|output:s' => \$out_file, 'l|layout:s' => \$layout, 'n|node-shape:s' => \$node_shape, 't|output-type:s' => \$output_type, 'height:f' => \$height, 'width:f' => \$width, 'fontsize=i' => \$fontsize, 'fontname=s' => \$fontname, 'nodeattr=s' => \%nodeattrs, 'edgeattr=s' => \%edgeattrs, 'graphattr=s' => \%graphattrs, 'c|color' => \$add_color, 'cluster:s' => \$cluster, 'no-fields' => \$no_fields, 'natural-join' => \$natural_join, 'natural-join-pk' => \$join_pk_only, 's|skip:s' => \$skip_fields, 'skip-tables:s' => \$skip_tables, 'skip-tables-like:s' => \$skip_tables_like, 'show-datatypes' => \$show_datatypes, 'show-sizes' => \$show_sizes, 'show-constraints' => \$show_constraints, 'debug' => \$debug, 'trace' => \$trace, 'h|help' => \$help, ) or die pod2usage; my @files = @ARGV; # the create script(s) for the original db pod2usage(1) if $help; pod2usage( -message => "No db driver specified" ) unless $db_driver; pod2usage( -message => 'No input file' ) unless @files; my $translator = SQL::Translator->new( from => $db_driver, to => 'GraphViz', debug => $debug || 0, trace => $trace || 0, producer_args => { out_file => $out_file, layout => $layout, node_shape => $node_shape, output_type => $output_type, add_color => $add_color, natural_join => $natural_join, natural_join_pk => $join_pk_only, skip_fields => $skip_fields, skip_tables => $skip_tables, skip_tables_like => $skip_tables_like, show_datatypes => $show_datatypes, show_sizes => $show_sizes, show_constraints => $show_constraints, cluster => $cluster, height => $height || 0, width => $width || 0, fontsize => $fontsize, fontname => $fontname, nodeattrs => \%nodeattrs, edgeattrs => \%edgeattrs, graphattrs => \%graphattrs, show_fields => $no_fields ? 0 : 1, }, ) or die SQL::Translator->error; for my $file (@files) { my $output = $translator->translate( $file ) or die "Error: " . $translator->error; if ( $out_file ) { print "Image written to '$out_file'. Done.\n"; } else { print $output; } } # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 SEE ALSO perl, SQL::Translator. =cut SQL-Translator-1.60/script/sqlt0000755000000000000000000002776413473550070016465 0ustar00rootroot00000000000000#!/usr/bin/env perl # vim: set ft=perl: # ------------------------------------------------------------------- # Copyright (C) 2002-2009 SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt - convert SQL schema using SQL::Translator =head1 SYNOPSIS For help: sqlt -h|--help For a list of all parsers and producers: sqlt -l|--list To translate a schema: sqlt -f|--from|--parser MySQL -t|--to|--producer Oracle [options] file [file2 ...] General Options: -d|--debug Print debug info -v|--validate Validate the schema --version Show the version of SQL::Translator --trace Print parser trace info --show-warnings Print warnings to STDERR General Parser Options: --skip Comma-separated list of tables to skip (only implemented in some parsers) --ignore_opts Comma-separated list of table options to ignore DBI Parser Options: --dsn DSN for connecting to database (see also --use-same-auth below) --db-user Database user --db-password Database password xSV Parser Options: --fs The field separator --rs The record separator --no-trim Don't trim whitespace on fields --no-scan Don't scan fields for data types and sizes MySQL Parser Options: --mysql-parser-version Target MySQL parser version for dealing with /*! comments; default = 30000 MySQL Producer Options: --mysql-version MySQL server version General Producer Options --producer-db-user Database user for producer --producer-db-pass Database password for producer --producer-dsn DSN for producer --use-same-auth Use these DSN, user, password for producer output DB Producer Options: --add-drop-table Add 'DROP TABLE' statements before creates --quote-table-names Quote all table names in statements --quote-field-names Qjuote all field names in statements --no-comments Don't include comments in SQL output PostgreSQL Producer Options: --postgres-version PostgreSQL server version Diagram Producer Options: --imap-file Filename to put image map data --imap-url URL to use for image map Dumper Producer Options: --skip Comma-separated list of tables to skip --skiplike Regex for tables to skip --add-truncate Add "TRUNCATE TABLE" statements for each table HTML/POD Producer Options: --pretty Use CGI::Pretty for the output --title Title of schema TTSchema Producer Options: --template The path to the template --tt-var var=value Pass extra variables to the template --tt-conf option=value Pass extra config options to Template XML-SQLFairy Producer Options: --add-prefix Use an explicit namespace prefix of 'sqlf:' --prefix=

Use the namespace prefix given as argument. --no-newlines Write the XML as a single line. --indent= Use characters of whitespace to indent the XML. ClassDBI Producer Options: --package Base package name for Class::DBI modules. =head1 DESCRIPTION This script is part of the SQL Fairy project. It will try to convert any source file for which it has a grammar into any format for which it has a producer. If using "show-warnings," be sure to redirect STDERR to a separate file. In bash, you could do this: $ sql_translator.pl -f MySQL -t PostgreSQL --show-warnings \ file.sql 1>out 2>err You can specify a parser or producer located in any module that Perl knows about, allowing you to easily substitute your own. =cut # ------------------------------------------------------------------- use strict; use warnings; use Getopt::Long; use Pod::Usage; use SQL::Translator; use vars qw( $VERSION ); $VERSION = '1.60'; my $from; # the original database my $to; # the destination database my $help; # show POD and bail my $stdin; # whether to read STDIN for create script my $no_comments; # whether to put comments in out file my $show_warnings; # whether to show warnings from SQL::Translator my $add_drop_table; # whether to add "DROP table" statements my $quote_table_names; # whether to quote table names my $quote_field_names; # whether to quote field names my $debug; # whether to print debug info my $trace; # whether to print parser trace my $list; # list all parsers and producers my $no_trim; # don't trim whitespace on xSV fields my $no_scan; # don't scan xSV fields for data types and sizes my $field_separator; # for xSV files my $record_separator; # for xSV files my $validate; # whether to validate the parsed document my $imap_file; # filename where to place image map coords my $imap_url; # URL to use in making image map my $pretty; # use CGI::Pretty instead of CGI (HTML producer) my $template; # template to pass to TTSchema producer my %tt_vars; # additional template vars to pass the TTSchema producer my %tt_conf; # additional template conf to pass the TTSchema producer my $title; # title for HTML/POD producer my $add_prefix; # Use explicit namespace prefix (XML producer) my $prefix; # Set explicit namespace prefix (XML producer) my $newlines; # Add newlines around tags (XML producer) my $indent; # Number of indent chars for XML my $package_name; # Base class name for ClassDBI my $use_same_auth =0; # producer uses same DSN, user, password as parser my $dsn; # DBI parser my $db_user; # DBI parser my $db_password; # DBI parser my $show_version; # Show version and exit script my $skip; my $skiplike; my $ignore_opts; my $producer_db_user; # DSN for producer (e.g. Dumper, ClassDBI) my $producer_db_password; # db_pass " my $producer_dsn; # db_user " my $add_truncate; my $mysql_parser_version; # MySQL parser arg for /*! comments my $postgres_version; # PostgreSQL version my $mysql_version; # MySQL version GetOptions( 'add-drop-table' => \$add_drop_table, 'quote-table-names|quote_table_names' => \$quote_table_names, 'quote-field-names|quote_field_names' => \$quote_field_names, 'd|debug' => \$debug, 'f|from|parser:s' => \$from, 'fs:s' => \$field_separator, 'h|help' => \$help, 'imap-file:s' => \$imap_file, 'imap-url:s' => \$imap_url, 't|to|producer:s' => \$to, 'l|list' => \$list, 'pretty!' => \$pretty, 'no-comments' => \$no_comments, 'no-scan' => \$no_scan, 'no-trim' => \$no_trim, 'rs:s' => \$record_separator, 'show-warnings' => \$show_warnings, 'template:s' => \$template, 'tt-var=s' => \%tt_vars, 'tt-conf=s' => \%tt_conf, 'title:s' => \$title, 'trace' => \$trace, 'v|validate' => \$validate, 'dsn:s' => \$dsn, 'db-user:s' => \$db_user, 'db-password:s' => \$db_password, 'producer-dsn:s' => \$producer_dsn, 'producer-db-user:s'=> \$producer_db_user, 'producer-db-pass:s'=> \$producer_db_password, 'skip:s' => \$skip, 'skiplike:s' => \$skiplike, 'ignore_opts:s' => \$ignore_opts, 'add_truncate' => \$add_truncate, 'add-prefix' => \$add_prefix, 'prefix:s' => \$prefix, 'indent:s' => \$indent, 'newlines!' => \$newlines, 'package=s' => \$package_name, 'use-same-auth' => \$use_same_auth, 'version' => \$show_version, 'mysql-parser-version=i' => \$mysql_parser_version, 'postgres-version=f' => \$postgres_version, 'mysql-version=f' => \$mysql_version, ) or pod2usage(2); if ($use_same_auth) { $producer_dsn = $dsn; $producer_db_user = $db_user; $producer_db_password = $db_password; } if ( ( !defined $from && defined $dsn ) || $from =~ /^DBI.*/ ) { $from = 'DBI'; } my @files = @ARGV; # source files unless ( @files ) { if ( defined($from) && $from eq 'DBI' ) { @files = ('!'); } else { @files = ('-'); } } pod2usage(1) if $help; if ( $show_version ) { print "SQL::Translator v", $SQL::Translator::VERSION, "\n"; exit(0); } my $translator = SQL::Translator->new( debug => $debug || 0, trace => $trace || 0, no_comments => $no_comments || 0, show_warnings => $show_warnings || 0, add_drop_table => $add_drop_table || 0, quote_table_names => defined $quote_table_names ? $quote_table_names : 1, quote_field_names => defined $quote_field_names ? $quote_field_names : 1, validate => $validate || 0, parser_args => { trim_fields => $no_trim ? 0 : 1, scan_fields => $no_scan ? 0 : 1, field_separator => $field_separator, record_separator => $record_separator, dsn => $dsn, db_user => $db_user, db_password => $db_password, mysql_parser_version => $mysql_parser_version, skip => $skip, ignore_opts => $ignore_opts, }, producer_args => { imap_file => $imap_file, imap_url => $imap_url, pretty => $pretty, ttfile => $template, tt_vars => \%tt_vars, tt_conf => \%tt_conf, title => $title, dsn => $producer_dsn, db_user => $producer_db_user, db_password => $producer_db_password, skip => $skip, skiplike => $skiplike, add_truncate => $add_truncate, add_prefix => $add_prefix, prefix => $prefix, indent => $indent, newlines => $newlines, postgres_version => $postgres_version, mysql_version => $mysql_version, package_name => $package_name, }, ); if ( $list ) { my @parsers = $translator->list_parsers; my @producers = $translator->list_producers; for ( @parsers, @producers ) { if ( $_ =~ m/.+::(\w+)\.pm/ ) { $_ = $1; } } print "\nParsers:\n", map { "\t$_\n" } sort @parsers; print "\nProducers:\n", map { "\t$_\n" } sort @producers; print "\n"; exit(0); } pod2usage( msg => 'Please supply "from" and "to" arguments' ) unless $from && $to; $translator->parser($from); $translator->producer($to); for my $file (@files) { my @args = ($file eq '-') ? (data => \*STDIN) : ($file eq '!') ? (data => '') : (file => $file); my $output = $translator->translate(@args) or die "Error: " . $translator->error; print $output; } # ---------------------------------------------------- # It is not all books that are as dull as their readers. # Henry David Thoreau # ---------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE, darren chamberlain Edarren@cpan.orgE. =head1 SEE ALSO SQL::Translator, L. =cut SQL-Translator-1.60/script/sqlt-dumper0000755000000000000000000000633313473550070017744 0ustar00rootroot00000000000000#!/usr/bin/env perl # ------------------------------------------------------------------- # Copyright (C) 2002-2009 SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # # ------------------------------------------------------------------- =head1 NAME sqlt-dumper - create a dumper script from a schema =head1 SYNOPSIS sqlt-dumper -d Oracle [options] schema.sql > dumper.pl ./dumper.pl > data.sql Options: -h|--help Show help and exit --skip=t1[,t2] Skip tables in comma-separated list --skiplike=regex Skip tables matching the regular expression -u|--user Database username -p|--password Database password --dsn DSN for DBI =head1 DESCRIPTION This script uses SQL::Translator to parse the SQL schema and create a Perl script that can connect to the database and dump the data as INSERT statements (a la mysqldump) or MySQL's LOAD FILE syntax. You may specify tables to "skip" (also using a "skiplike" regular expression) and the generated dumper script will not have those tables. However, these will also be options in the generated dumper, so you can wait to specify these options when you dump your database. The database username, password, and DSN can be hardcoded into the generated script, or part of the DSN can be intuited from the "database" argument. =cut # ------------------------------------------------------------------- use strict; use warnings; use Pod::Usage; use Getopt::Long; use SQL::Translator; use File::Basename qw(basename); use vars '$VERSION'; $VERSION = '1.60'; my ( $help, $db, $skip, $skiplike, $db_user, $db_pass, $dsn ); GetOptions( 'h|help' => \$help, 'd|f|from|db=s' => \$db, 'skip:s' => \$skip, 'skiplike:s' => \$skiplike, 'u|user:s' => \$db_user, 'p|password:s' => \$db_pass, 'dsn:s' => \$dsn, ) or pod2usage; pod2usage(0) if $help; pod2usage( 'No database driver specified' ) unless $db; $db_user ||= 'username'; $db_pass ||= 'password'; $dsn ||= "dbi:$db:_"; my $file = shift @ARGV or pod2usage( -msg => 'No input file' ); my $t = SQL::Translator->new( from => $db, to => 'Dumper', producer_args => { skip => $skip, skiplike => $skiplike, db_user => $db_user, db_password => $db_pass, dsn => $dsn, } ); print $t->translate( $file ); exit(0); # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 SEE ALSO perl, SQL::Translator, SQL::Translator::Producer::Dumper. =cut SQL-Translator-1.60/script/sqlt.cgi0000755000000000000000000004431013473550070017210 0ustar00rootroot00000000000000#!/usr/bin/env perl # ------------------------------------------------------------------- # Copyright (C) 2002-2009 SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt.cgi - CGI front-end for SQL::Translator =head1 DESCRIPTION Place this script in your "cgi-bin" directory and point your browser to it. This script is meant to be a simple graphical interface to all the parsers and producers of SQL::Translator. =cut # ------------------------------------------------------------------- use strict; use warnings; use CGI; use SQL::Translator; use vars '$VERSION'; $VERSION = '1.60'; my $q = CGI->new; eval { if ( $q->param ) { my $data; if ( $q->param('schema') ) { $data = $q->param('schema'); } elsif ( my $fh = $q->upload('schema_file') ) { local $/; $data = <$fh>; } die "No schema provided!\n" unless $data; my $producer = $q->param('producer'); my $output_type = $producer eq 'Diagram' ? $q->param('diagram_output_type') : $producer eq 'GraphViz' ? $q->param('graphviz_output_type') : '' ; my $t = SQL::Translator->new( from => $q->param('parser'), producer_args => { add_drop_table => $q->param('add_drop_table'), output_type => $output_type, title => $q->param('title') || 'Schema', natural_join => $q->param('natural_join') eq 'no' ? 0 : 1, join_pk_only => $q->param('natural_join') eq 'pk_only' ? 1 : 0, add_color => $q->param('add_color'), skip_fields => $q->param('skip_fields'), show_fk_only => $q->param('show_fk_only'), font_size => $q->param('font_size'), no_columns => $q->param('no_columns'), node_shape => $q->param('node_shape'), layout => $q->param('layout') || '', height => $q->param('height') || 0, width => $q->param('width') || 0, show_fields => $q->param('show_fields') || 0, ttfile => $q->upload('template'), validate => $q->param('validate'), emit_empty_tags => $q->param('emit_empty_tags'), attrib_values => $q->param('attrib_values'), no_comments => !$q->param('comments'), }, parser_args => { trim_fields => $q->param('trim_fields'), scan_fields => $q->param('scan_fields'), field_separator => $q->param('fs'), record_separator => $q->param('rs'), }, ) or die SQL::Translator->error; my $image_type = ''; my $text_type = 'plain'; if ( $output_type =~ /(gif|png|jpeg)/ ) { $image_type = $output_type; } elsif ( $output_type eq 'svg' ) { $image_type = 'svg+xml'; } elsif ( $output_type =~ /gd/ ) { $image_type = 'png'; } elsif ( $output_type eq 'ps' ) { $text_type = 'postscript'; } elsif ( $producer eq 'HTML' ) { $text_type = 'html'; } my $header_type = $image_type ? "image/$image_type" : "text/$text_type"; $t->data( $data ); $t->producer( $producer ); my $output = $t->translate or die $t->error; print $q->header( -type => $header_type ), $output; } else { show_form( $q ); } }; if ( my $error = $@ ) { print $q->header, $q->start_html('Error'), $q->h1('Error'), $error, $q->end_html; } # ------------------------------------------------------------------- sub show_form { my $q = shift; my $title = 'SQL::Translator'; print $q->header, $q->start_html( -title => $title ), $q->h1( qq[$title] ), $q->start_form(-enctype => 'multipart/form-data'), $q->table( { -border => 1 }, $q->Tr( $q->td( [ 'Upload your schema file:', $q->filefield( -name => 'schema_file'), ] ), ), $q->Tr( $q->td( [ 'Or paste your schema here:', $q->textarea( -name => 'schema', -rows => 5, -columns => 60, ), ] ), ), $q->Tr( $q->td( [ 'Parser:', $q->radio_group( -name => 'parser', -values => [ qw( MySQL PostgreSQL Oracle Sybase Excel XML-SQLFairy xSV ) ], -default => 'MySQL', -rows => 3, ), ] ), ), $q->Tr( $q->td( [ 'Producer:', $q->radio_group( -name => 'producer', -values => [ qw[ ClassDBI Diagram GraphViz HTML MySQL Oracle POD PostgreSQL SQLite Sybase TTSchema XML-SQLFairy ] ], -default => 'GraphViz', -rows => 3, ), ] ), ), $q->Tr( $q->td( { -colspan => 2, -align => 'center' }, $q->submit( -name => 'submit', -value => 'Submit', ) ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'General Options:' ), ), $q->Tr( $q->td( [ 'Validate Schema:', $q->radio_group( -name => 'validate', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 0, -rows => 2, ), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'DB Producer Options:' ), ), $q->Tr( $q->td( [ 'Add "DROP TABLE" statements:', $q->radio_group( -name => 'add_drop_table', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 0, -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Include comments:', $q->radio_group( -name => 'comments', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 1, -rows => 2, ), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'HTML/POD/Diagram Producer Options:' ), ), $q->Tr( $q->td( [ 'Title:', $q->textfield('title'), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'TTSchema Producer Options:' ), ), $q->Tr( $q->td( [ 'Template:', $q->filefield( -name => 'template'), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'Graphical Producer Options' ), ), $q->Tr( $q->td( [ 'Perform Natural Joins:', $q->radio_group( -name => 'natural_join', -values => [ 'no', 'yes', 'pk_only' ], -labels => { no => 'No', yes => 'Yes, on all like-named fields', pk_only => 'Yes, but only from primary keys' }, -default => 'no', -rows => 3, ), ] ), ), $q->Tr( $q->td( [ 'Skip These Fields in Natural Joins:', $q->textarea( -name => 'skip_fields', -rows => 3, -columns => 60, ), ] ), ), $q->Tr( $q->td( [ 'Show Only Foreign Keys:', $q->radio_group( -name => 'show_fk_only', -values => [ 1, 0 ], -default => 0, -labels => { 1 => 'Yes', 0 => 'No', }, -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Add Color:', $q->radio_group( -name => 'add_color', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 1, -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Show Field Names:', $q->radio_group( -name => 'show_fields', -values => [ 1, 0 ], -default => 1, -labels => { 1 => 'Yes', 0 => 'No', }, -rows => 2, ), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'Diagram Producer Options' ), ), $q->Tr( $q->td( [ 'Output Type:', $q->radio_group( -name => 'diagram_output_type', -values => [ 'png', 'jpeg' ], -default => 'png', -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Font Size:', $q->radio_group( -name => 'font_size', -values => [ qw( small medium large ) ], -default => 'medium', -rows => 3, ), ] ), ), $q->Tr( $q->td( [ 'Number of Columns:', $q->textfield('no_columns'), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'GraphViz Producer Options' ), ), $q->Tr( $q->td( [ 'Output Type:', $q->radio_group( -name => 'graphviz_output_type', -values => [ qw( canon text ps hpgl pcl mif pic gd gd2 gif jpeg png wbmp cmap ismap imap vrml vtx mp fig svg plain ) ], -default => 'png', -rows => 4, ), ] ), ), $q->Tr( $q->td( [ 'Layout:', $q->radio_group( -name => 'layout', -values => [ qw( dot neato twopi ) ], -default => 'dot', -rows => 3, ), ] ), ), $q->Tr( $q->td( [ 'Node Shape:', $q->radio_group( -name => 'node_shape', -values => [ qw( record plaintext ellipse circle egg triangle box diamond trapezium parallelogram house hexagon octagon ) ], -default => 'record', -rows => 4, ), ] ), ), $q->Tr( $q->td( [ 'Height:', $q->textfield( -name => 'height' ), ] ), ), $q->Tr( $q->td( [ 'Width:', $q->textfield( -name => 'width' ), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'XML Producer Options:' ), ), $q->Tr( $q->td( [ 'Use attributes for values:', $q->radio_group( -name => 'attrib-values', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 0, -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Emit Empty Tags:', $q->radio_group( -name => 'emit-empty-tags', -values => [ 1, 0 ], -labels => { 1 => 'Yes', 0 => 'No' }, -default => 0, -rows => 2, ), ] ), ), $q->Tr( $q->th( { align => 'left', bgcolor => 'lightgrey', colspan => 2 }, 'xSV Parser Options' ), ), $q->Tr( $q->td( [ 'Field Separator:', $q->textfield( -name => 'fs' ), ] ), ), $q->Tr( $q->td( [ 'Record Separator:', $q->textfield( -name => 'rs' ), ] ), ), $q->Tr( $q->td( [ 'Trim Whitespace Around Fields:', $q->radio_group( -name => 'trim_fields', -values => [ 1, 0 ], -default => 1, -labels => { 1 => 'Yes', 0 => 'No', }, -rows => 2, ), ] ), ), $q->Tr( $q->td( [ 'Scan Fields for Data Type:', $q->radio_group( -name => 'scan_fields', -values => [ 1, 0 ], -default => 1, -labels => { 1 => 'Yes', 0 => 'No', }, -rows => 2, ), ] ), ), $q->Tr( $q->td( { -colspan => 2, -align => 'center' }, $q->submit( -name => 'submit', -value => 'Submit', ) ), ), ), $q->end_form, $q->end_html; } # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head1 SEE ALSO L, L =cut SQL-Translator-1.60/script/sqlt-diagram0000755000000000000000000001266513473550070020061 0ustar00rootroot00000000000000#!/usr/bin/env perl # ------------------------------------------------------------------- # Copyright (C) 2002-2011 SQLFairy Authors # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA. # ------------------------------------------------------------------- =head1 NAME sqlt-diagram - Automatically create a diagram from a database schema =head1 SYNOPSIS ./sqlt-diagram -d|-f|--from|--db=db_parser [options] schema.sql Options: -o|--output Output file name (default STDOUT) -i|--image Output image type ("png" or "jpeg," default "png") -t|--title Title to give schema -c|--cols Number of columns -n|--no-lines Don't draw lines --font-size Font size ("small," "medium," "large," or "huge," default "medium") --gutter Gutter size between tables --color Add colors --show-fk-only Only show fields that act as primary or foreign keys --natural-join Perform natural joins --natural-join-pk Perform natural joins from primary keys only -s|--skip Fields to skip in natural joins --skip-tables Comma-separated list of table names to exclude --skip-tables-like Comma-separated list of regexen to exclude tables --debug Print debugging information =head1 DESCRIPTION This script will create a picture of your schema. Only the database driver argument (for SQL::Translator) is required. If no output file name is given, then image will be printed to STDOUT, so you should redirect the output into a file. The default action is to assume the presence of foreign key relationships defined via "REFERENCES" or "FOREIGN KEY" constraints on the tables. If you are parsing the schema of a file that does not have these, you will find the natural join options helpful. With natural joins, like-named fields will be considered foreign keys. This can prove too permissive, however, as you probably don't want a field called "name" to be considered a foreign key, so you could include it in the "skip" option, and all fields called "name" will be excluded from natural joins. A more efficient method, however, might be to simply deduce the foreign keys from primary keys to other fields named the same in other tables. Use the "natural-join-pk" option to achieve this. =cut use strict; use warnings; use Data::Dumper; use Getopt::Long; use Pod::Usage; use SQL::Translator; use vars '$VERSION'; $VERSION = '1.60'; # # Get arguments. # my ( $out_file, $output_type, $db_driver, $title, $num_columns, $no_lines, $font_size, $add_color, $debug, $show_fk_only, $gutter, $natural_join, $join_pk_only, $skip_fields, $skip_tables, $skip_tables_like, $help ); GetOptions( 'd|db|f|from=s' => \$db_driver, 'o|output:s' => \$out_file, 'i|image:s' => \$output_type, 't|title:s' => \$title, 'c|columns:i' => \$num_columns, 'n|no-lines' => \$no_lines, 'font-size:s' => \$font_size, 'gutter:i' => \$gutter, 'color' => \$add_color, 'show-fk-only' => \$show_fk_only, 'natural-join' => \$natural_join, 'natural-join-pk' => \$join_pk_only, 's|skip:s' => \$skip_fields, 'skip-tables:s' => \$skip_tables, 'skip-tables-like:s' => \$skip_tables_like, 'debug' => \$debug, 'h|help' => \$help, ) or die pod2usage; my @files = @ARGV; # the create script(s) for the original db pod2usage(1) if $help; pod2usage( -message => "No db driver specified" ) unless $db_driver; pod2usage( -message => 'No input file' ) unless @files; my $translator = SQL::Translator->new( from => $db_driver, to => 'Diagram', debug => $debug || 0, producer_args => { out_file => $out_file, output_type => $output_type, gutter => $gutter || 0, title => $title, num_columns => $num_columns, no_lines => $no_lines, font_size => $font_size, add_color => $add_color, show_fk_only => $show_fk_only, natural_join => $natural_join, join_pk_only => $join_pk_only, skip_fields => $skip_fields, skip_tables => $skip_tables, skip_tables_like => $skip_tables_like, }, ) or die SQL::Translator->error; binmode STDOUT unless $out_file; for my $file (@files) { my $output = $translator->translate( $file ) or die "Error: " . $translator->error; if ( $out_file ) { print "Image written to '$out_file'. Done.\n"; } else { print $output; } } # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/MANIFEST0000644000000000000000000001432313473557373015401 0ustar00rootroot00000000000000AUTHORS Changes lib/SQL/Translator.pm lib/SQL/Translator/Diff.pm lib/SQL/Translator/Filter/DefaultExtra.pm lib/SQL/Translator/Filter/Globals.pm lib/SQL/Translator/Filter/Names.pm lib/SQL/Translator/Generator/DDL/MySQL.pm lib/SQL/Translator/Generator/DDL/PostgreSQL.pm lib/SQL/Translator/Generator/DDL/SQLite.pm lib/SQL/Translator/Generator/DDL/SQLServer.pm lib/SQL/Translator/Generator/Role/DDL.pm lib/SQL/Translator/Generator/Role/Quote.pm lib/SQL/Translator/Manual.pod lib/SQL/Translator/Parser.pm lib/SQL/Translator/Parser/Access.pm lib/SQL/Translator/Parser/DB2.pm lib/SQL/Translator/Parser/DB2/Grammar.pm lib/SQL/Translator/Parser/DBI.pm lib/SQL/Translator/Parser/DBI/DB2.pm lib/SQL/Translator/Parser/DBI/MySQL.pm lib/SQL/Translator/Parser/DBI/Oracle.pm lib/SQL/Translator/Parser/DBI/PostgreSQL.pm lib/SQL/Translator/Parser/DBI/SQLite.pm lib/SQL/Translator/Parser/DBI/SQLServer.pm lib/SQL/Translator/Parser/DBI/Sybase.pm lib/SQL/Translator/Parser/Excel.pm lib/SQL/Translator/Parser/JSON.pm lib/SQL/Translator/Parser/MySQL.pm lib/SQL/Translator/Parser/Oracle.pm lib/SQL/Translator/Parser/PostgreSQL.pm lib/SQL/Translator/Parser/SQLite.pm lib/SQL/Translator/Parser/SQLServer.pm lib/SQL/Translator/Parser/Storable.pm lib/SQL/Translator/Parser/Sybase.pm lib/SQL/Translator/Parser/XML.pm lib/SQL/Translator/Parser/XML/SQLFairy.pm lib/SQL/Translator/Parser/xSV.pm lib/SQL/Translator/Parser/YAML.pm lib/SQL/Translator/Producer.pm lib/SQL/Translator/Producer/ClassDBI.pm lib/SQL/Translator/Producer/DB2.pm lib/SQL/Translator/Producer/Diagram.pm lib/SQL/Translator/Producer/DiaUml.pm lib/SQL/Translator/Producer/Dumper.pm lib/SQL/Translator/Producer/GraphViz.pm lib/SQL/Translator/Producer/HTML.pm lib/SQL/Translator/Producer/JSON.pm lib/SQL/Translator/Producer/Latex.pm lib/SQL/Translator/Producer/MySQL.pm lib/SQL/Translator/Producer/Oracle.pm lib/SQL/Translator/Producer/POD.pm lib/SQL/Translator/Producer/PostgreSQL.pm lib/SQL/Translator/Producer/SQLite.pm lib/SQL/Translator/Producer/SQLServer.pm lib/SQL/Translator/Producer/Storable.pm lib/SQL/Translator/Producer/Sybase.pm lib/SQL/Translator/Producer/TT/Base.pm lib/SQL/Translator/Producer/TT/Table.pm lib/SQL/Translator/Producer/TTSchema.pm lib/SQL/Translator/Producer/XML.pm lib/SQL/Translator/Producer/XML/SQLFairy.pm lib/SQL/Translator/Producer/YAML.pm lib/SQL/Translator/Role/BuildArgs.pm lib/SQL/Translator/Role/Debug.pm lib/SQL/Translator/Role/Error.pm lib/SQL/Translator/Role/ListAttr.pm lib/SQL/Translator/Schema.pm lib/SQL/Translator/Schema/Constants.pm lib/SQL/Translator/Schema/Constraint.pm lib/SQL/Translator/Schema/Field.pm lib/SQL/Translator/Schema/Index.pm lib/SQL/Translator/Schema/Object.pm lib/SQL/Translator/Schema/Procedure.pm lib/SQL/Translator/Schema/Role/Compare.pm lib/SQL/Translator/Schema/Role/Extra.pm lib/SQL/Translator/Schema/Table.pm lib/SQL/Translator/Schema/Trigger.pm lib/SQL/Translator/Schema/View.pm lib/SQL/Translator/Types.pm lib/SQL/Translator/Utils.pm lib/Test/SQL/Translator.pm maint/Makefile.PL.include Makefile.PL MANIFEST This list of files script/sqlt script/sqlt-diagram script/sqlt-diff script/sqlt-diff-old script/sqlt-dumper script/sqlt-graph script/sqlt.cgi share/DiaUml/diagram.tt2 share/DiaUml/layer.tt2 share/DiaUml/schema.tt2 share/DiaUml/uml-attribute.tt2 share/DiaUml/uml-class-all.tt2 share/DiaUml/uml-class-end.tt2 share/DiaUml/uml-class-start.tt2 share/DiaUml/uml-class.tt2 share/PrecompiledParsers/Parse/RecDescent/DDL/SQLT/README t/02mysql-parser.t t/03mysql-to-oracle.t t/04file,fh,string.t t/05bgep-re.t t/06xsv.t t/07p_args.t t/08postgres-to-mysql.t t/09sqlt-diagram.t t/10excel.t t/11normalize.t t/12header_comment.t t/13schema.t t/14postgres-parser.t t/15oracle-parser.t t/16xml-parser.t t/17sqlfxml-producer.t t/18ttschema-producer.t t/19sybase-parser.t t/20format_X_name.t t/23json.t t/24yaml.t t/25storable.t t/26sybase.t t/27sqlite-parser.t t/29html.t t/30sqlt-new-diff-mysql.t t/30sqlt-new-diff-pgsql.t t/30sqlt-new-diff-sqlite.t t/30sqlt-new-diff.t t/31dumper.t t/32schema-lookups.t t/33tt-table-producer.t t/34tt-base.t t/35-access-parser.t t/36-filters.t t/38-filter-names.t t/38-mysql-producer.t t/39-filter-globals.t t/40oracle-parser-dbi.t t/43xml-to-db2.t t/44-xml-to-db2-array.t t/45db2-producer.t t/46xml-to-pg.t t/47postgres-producer.t t/48xml-to-sqlite.t t/49xml-to-pg-samefield.t t/50-sqlserver-parser.t t/51-xml-to-oracle.t t/51-xml-to-oracle_quoted.t t/52-oracle-options.t t/53-oracle-delay-constraints.t t/53-oracle-delay-constraints_quoted.t t/54-oracle-alter-field.t t/55-oracle-add-field.t t/55-oracle-producer.t t/56-sqlite-producer.t t/57-class-dbi.t t/60roundtrip.t t/61translator_agnostic.t t/62roundtrip_datacheck.t t/63-spacial-pgsql.t t/64xml-to-mysql.t t/66-postgres-dbi-parser.t t/70sqlt-diff_script.t t/70sqlt-diff_script_old.t t/71-generator-sql_server.t t/72-sqlite-add-drop-fields.t t/73-sqlite-respects-quote.t t/74-filename-arrayref.t t/data/access/gdpdm.ddl t/data/diff/create1.yml t/data/diff/create2.yml t/data/diff/pgsql/create1.yml t/data/diff/pgsql/create2.yml t/data/Excel/t.xls t/data/mysql/Apache-Session-MySQL.sql t/data/mysql/BGEP-RE-create.sql t/data/mysql/cashmusic_db.sql t/data/mysql/create.sql t/data/mysql/create2.sql t/data/mysql/entire_syntax.sql t/data/mysql/sqlfxml-producer-basic.sql t/data/oracle/create.sql t/data/oracle/create2.sql t/data/oracle/schema_diff_a.yaml t/data/oracle/schema_diff_b.yaml t/data/oracle/schema_diff_c.yaml t/data/oracle/schema_with_options.yaml t/data/pgsql/Chado-CV-PostGreSQL.sql t/data/pgsql/entire_syntax.sql t/data/pgsql/turnkey.sql t/data/roundtrip.xml t/data/roundtrip_autogen.yaml t/data/sqlite/create.sql t/data/sqlite/create2.sql t/data/sqlite/named.sql t/data/sqlserver/create.sql t/data/sybase/create.sql t/data/template/basic.tt t/data/template/table.tt t/data/template/testresult_basic.txt t/data/template/testresult_table.txt t/data/xml/samefield.xml t/data/xml/schema.xml t/lib/Producer/BaseTest.pm t/mysql-sqlite-translate.t t/postgresql-rename-table-and-field.t t/sqlite-rename-field.t xt/eol.t xt/notabs.t xt/pod.t META.yml Module YAML meta-data (added by MakeMaker) META.json Module JSON meta-data (added by MakeMaker) README README file (added by Distar) LICENSE LICENSE file (added by Distar) SQL-Translator-1.60/README0000644000000000000000000003447113473557373015136 0ustar00rootroot00000000000000NAME SQL::Translator - manipulate structured data definitions (SQL and more) SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( # Print debug info debug => 1, # Print Parse::RecDescent trace trace => 0, # Don't include comments in output no_comments => 0, # Print name mutations, conflicts show_warnings => 0, # Add "drop table" statements add_drop_table => 1, # to quote or not to quote, thats the question quote_identifiers => 1, # Validate schema object validate => 1, # Make all table names CAPS in producers which support this option format_table_name => sub {my $tablename = shift; return uc($tablename)}, # Null-op formatting, only here for documentation's sake format_package_name => sub {return shift}, format_fk_name => sub {return shift}, format_pk_name => sub {return shift}, ); my $output = $translator->translate( from => 'MySQL', to => 'Oracle', # Or an arrayref of filenames, i.e. [ $file1, $file2, $file3 ] filename => $file, ) or die $translator->error; print $output; DESCRIPTION This documentation covers the API for SQL::Translator. For a more general discussion of how to use the modules and scripts, please see SQL::Translator::Manual. SQL::Translator is a group of Perl modules that converts vendor-specific SQL table definitions into other formats, such as other vendor-specific SQL, ER diagrams, documentation (POD and HTML), XML, and Class::DBI classes. The main focus of SQL::Translator is SQL, but parsers exist for other structured data formats, including Excel spreadsheets and arbitrarily delimited text files. Through the separation of the code into parsers and producers with an object model in between, it's possible to combine any parser with any producer, to plug in custom parsers or producers, or to manipulate the parsed data via the built-in object model. Presently only the definition parts of SQL are handled (CREATE, ALTER), not the manipulation of data (INSERT, UPDATE, DELETE). CONSTRUCTOR new The constructor is called "new", and accepts a optional hash of options. Valid options are: * parser / from * parser_args * producer / to * producer_args * filters * filename / file * data * debug * add_drop_table * quote_identifiers * quote_table_names (DEPRECATED) * quote_field_names (DEPRECATED) * no_comments * trace * validate All options are, well, optional; these attributes can be set via instance methods. Internally, they are; no (non-syntactical) advantage is gained by passing options to the constructor. METHODS add_drop_table Toggles whether or not to add "DROP TABLE" statements just before the create definitions. quote_identifiers Toggles whether or not to quote identifiers (table, column, constraint, etc.) with a quoting mechanism suitable for the chosen Producer. The default (true) is to quote them. quote_table_names DEPRECATED - A legacy proxy to "quote_identifiers" quote_field_names DEPRECATED - A legacy proxy to "quote_identifiers" no_comments Toggles whether to print comments in the output. Accepts a true or false value, returns the current value. producer The "producer" method is an accessor/mutator, used to retrieve or define what subroutine is called to produce the output. A subroutine defined as a producer will be invoked as a function (*not a method*) and passed its container "SQL::Translator" instance, which it should call the "schema" method on, to get the "SQL::Translator::Schema" generated by the parser. It is expected that the function transform the schema structure to a string. The "SQL::Translator" instance is also useful for informational purposes; for example, the type of the parser can be retrieved using the "parser_type" method, and the "error" and "debug" methods can be called when needed. When defining a producer, one of several things can be passed in: A module name (e.g., "My::Groovy::Producer"), a module name relative to the "SQL::Translator::Producer" namespace (e.g., "MySQL"), a module name and function combination ("My::Groovy::Producer::transmogrify"), or a reference to an anonymous subroutine. If a full module name is passed in (for the purposes of this method, a string containing "::" is considered to be a module name), it is treated as a package, and a function called "produce" will be invoked: $modulename::produce. If $modulename cannot be loaded, the final portion is stripped off and treated as a function. In other words, if there is no file named My/Groovy/Producer/transmogrify.pm, "SQL::Translator" will attempt to load My/Groovy/Producer.pm and use "transmogrify" as the name of the function, instead of the default "produce". my $tr = SQL::Translator->new; # This will invoke My::Groovy::Producer::produce($tr, $data) $tr->producer("My::Groovy::Producer"); # This will invoke SQL::Translator::Producer::Sybase::produce($tr, $data) $tr->producer("Sybase"); # This will invoke My::Groovy::Producer::transmogrify($tr, $data), # assuming that My::Groovy::Producer::transmogrify is not a module # on disk. $tr->producer("My::Groovy::Producer::transmogrify"); # This will invoke the referenced subroutine directly, as # $subref->($tr, $data); $tr->producer(\&my_producer); There is also a method named "producer_type", which is a string containing the classname to which the above "produce" function belongs. In the case of anonymous subroutines, this method returns the string "CODE". Finally, there is a method named "producer_args", which is both an accessor and a mutator. Arbitrary data may be stored in name => value pairs for the producer subroutine to access: sub My::Random::producer { my ($tr, $data) = @_; my $pr_args = $tr->producer_args(); # $pr_args is a hashref. Extra data passed to the "producer" method is passed to "producer_args": $tr->producer("xSV", delimiter => ',\s*'); # In SQL::Translator::Producer::xSV: my $args = $tr->producer_args; my $delimiter = $args->{'delimiter'}; # value is ,\s* parser The "parser" method defines or retrieves a subroutine that will be called to perform the parsing. The basic idea is the same as that of "producer" (see above), except the default subroutine name is "parse", and will be invoked as "$module_name::parse($tr, $data)". Also, the parser subroutine will be passed a string containing the entirety of the data to be parsed. # Invokes SQL::Translator::Parser::MySQL::parse() $tr->parser("MySQL"); # Invokes My::Groovy::Parser::parse() $tr->parser("My::Groovy::Parser"); # Invoke an anonymous subroutine directly $tr->parser(sub { my $dumper = Data::Dumper->new([ $_[1] ], [ "SQL" ]); $dumper->Purity(1)->Terse(1)->Deepcopy(1); return $dumper->Dump; }); There is also "parser_type" and "parser_args", which perform analogously to "producer_type" and "producer_args" filters Set or retrieve the filters to run over the schema during the translation, before the producer creates its output. Filters are sub routines called, in order, with the schema object to filter as the 1st arg and a hash of options (passed as a list) for the rest of the args. They are free to do whatever they want to the schema object, which will be handed to any following filters, then used by the producer. Filters are set as an array, which gives the order they run in. Like parsers and producers, they can be defined by a module name, a module name relative to the SQL::Translator::Filter namespace, a module name and function name together or a reference to an anonymous subroutine. When using a module name a function called "filter" will be invoked in that package to do the work. To pass args to the filter set it as an array ref with the 1st value giving the filter (name or sub) and the rest its args. e.g. $tr->filters( sub { my $schema = shift; # Do stuff to schema here! }, DropFKeys, [ "Names", table => 'lc' ], [ "Foo", foo => "bar", hello => "world" ], [ "Filter5" ], ); Although you normally set them in the constructor, which calls through to filters. i.e. my $translator = SQL::Translator->new( ... filters => [ sub { ... }, [ "Names", table => 'lc' ], ], ... ); See t/36-filters.t for more examples. Multiple set calls to filters are cumulative with new filters added to the end of the current list. Returns the filters as a list of array refs, the 1st value being a reference to the filter sub and the rest its args. show_warnings Toggles whether to print warnings of name conflicts, identifier mutations, etc. Probably only generated by producers to let the user know when something won't translate very smoothly (e.g., MySQL "enum" fields into Oracle). Accepts a true or false value, returns the current value. translate The "translate" method calls the subroutine referenced by the "parser" data member, then calls any "filters" and finally calls the "producer" sub routine (these members are described above). It accepts as arguments a number of things, in key => value format, including (potentially) a parser and a producer (they are passed directly to the "parser" and "producer" methods). Here is how the parameter list to "translate" is parsed: * 1 argument means it's the data to be parsed; which could be a string (filename) or a reference to a scalar (a string stored in memory), or a reference to a hash, which is parsed as being more than one argument (see next section). # Parse the file /path/to/datafile my $output = $tr->translate("/path/to/datafile"); # Parse the data contained in the string $data my $output = $tr->translate(\$data); * More than 1 argument means its a hash of things, and it might be setting a parser, producer, or datasource (this key is named "filename" or "file" if it's a file, or "data" for a SCALAR reference. # As above, parse /path/to/datafile, but with different producers for my $prod ("MySQL", "XML", "Sybase") { print $tr->translate( producer => $prod, filename => "/path/to/datafile", ); } # The filename hash key could also be: datasource => \$data, You get the idea. filename, data Using the "filename" method, the filename of the data to be parsed can be set. This method can be used in conjunction with the "data" method, below. If both the "filename" and "data" methods are invoked as mutators, the data set in the "data" method is used. $tr->filename("/my/data/files/create.sql"); or: my $create_script = do { local $/; open CREATE, "/my/data/files/create.sql" or die $!; ; }; $tr->data(\$create_script); "filename" takes a string, which is interpreted as a filename. "data" takes a reference to a string, which is used as the data to be parsed. If a filename is set, then that file is opened and read when the "translate" method is called, as long as the data instance variable is not set. schema Returns the SQL::Translator::Schema object. trace Turns on/off the tracing option of Parse::RecDescent. validate Whether or not to validate the schema object after parsing and before producing. version Returns the version of the SQL::Translator release. AUTHORS See the included AUTHORS file: GETTING HELP/SUPPORT If you are stuck with a problem or have doubts about a particular approach do not hesitate to contact us via any of the following options (the list is sorted by "fastest response time"): * IRC: irc.perl.org#sql-translator * Mailing list: * RT Bug Tracker: HOW TO CONTRIBUTE Contributions are always welcome, in all usable forms (we especially welcome documentation improvements). The delivery methods include git- or unified-diff formatted patches, GitHub pull requests, or plain bug reports either via RT or the Mailing list. Contributors are generally granted access to the official repository after their first several patches pass successful review. Don't hesitate to contact us with any further questions you may have. This project is maintained in a git repository. The code and related tools are accessible at the following locations: * Official repo: * Official gitweb: * GitHub mirror: * Authorized committers: * Travis-CI log: COPYRIGHT Copyright 2012 the SQL::Translator authors, as listed in "AUTHORS". LICENSE This library is free software and may be distributed under the same terms as Perl 5 itself. PRAISE If you find this module useful, please use to rate it. SEE ALSO perl, SQL::Translator::Parser, SQL::Translator::Producer, Parse::RecDescent, GD, GraphViz, Text::RecordParser, Class::DBI, XML::Writer. SQL-Translator-1.60/LICENSE0000644000000000000000000000000013473557373015240 0ustar00rootroot00000000000000SQL-Translator-1.60/Changes0000644000000000000000000006346613473550362015546 0ustar00rootroot00000000000000Changes for SQL::Translator 1.60 - 2019-05-29 * No changes since 1.59_01 1.59_01 - 2019-04-28 * Add support for parsing PostgreSQL dollar-quoted strings * Add support for materialized views in Oracle producer * switched JSON backend from JSON.pm to JSON::MaybeXS * Port Makefile.PL from Module::Install to Distar * Synchronise the version number across all modules 0.11024 2018-01-09 * Remove temporary diagnostics added in 0.11023_01 0.11023_02 2017-12-08 * Make maybe_plan skip on unrecognised failures to load modules 0.11023_01 2017-12-07 * Add diagnostics to t/postgresql-rename-table-and-field.t that's failing mysteriously on some smokers 0.11023 2017-12-05 * Fix error handling for Test::PostgreSQL 1.20 0.11022 2017-12-04 * Add support for monotonically increasing SQLite autoincs (GH#47) * Add support for CHECK constraint in SQLite producer (GH#57) * Add support for CHECK constraint in POD producer (GH#63) * Fix forgotten quoting in the MySQL DROP TABLE diff producer (GH#50) * Fix Pg grammar parsing of UUID, time, timetz columns (RT#100196, GH#52) * Add support for USING and WHERE on indexes in PostgreSQL producer and parser (RT#63814, GH#52) * Improve add_trigger consistency between producers (GH#48) * Add trigger 'scope' attribute support to JSON, YAML and XML producers, and XML and SQLite parsers (RT#119997) * Declare dependencies in deterministic order (RT#102859) * Multiple speedups of naive internal debugging mechanism (GH#54) * Remove dependency on List::MoreUtils ( http://is.gd/lmu_cac_debacle ) * Fix parsing of strings with leading whitespace for MySQL, Oracle, PostgreSQL, SQLServer and SQLite * Fix parsing of MySQL column comments (RT#83380) * Fix multi-line comments in YAML, JSON and PostgreSQL producers * Fix identifier quoting in PostgreSQL diff producer * Fix incorrect type conversion from various BigInt AutoIncrement to the PostgreSQL-specific bigserial (GH#72) * Fix missing semicolons between PostGIS statements * Fix string and identifier quoting in PostGIS statements * Fix intermittent test failures (RT#108460) * Fix relying on exact serialisation for JSON/YAML tests (RT#121901) 0.11021 2015-01-29 * Fix Oracle producer generating an unnecessary / at the end in case there are no triggers * Skip HTML tests if CGI is not installed (RT#98027) * Fix JSON and YAML tests if the defaults have been tweaked (RT#98824) * Fixes for parsing and producing identifiers and values that need quoting and escaping for SQLite, MySQL, PostgreSQL, SQLServer and Oracle (RT#90700, RT#31034) * Add support for ALTER TABLE ... ADD CONSTRAINT to Oracle parser * Add trigger support to Oracle parser (RT#62927) * Fix erroneous PostgreSQL floating point type translations (RT#99725) * Remove executable bit from Parser/JSON.pm (RT#100532) * Update the Free Software Foundation's address (RT#100531) * Provide default index names for SQLite (GH#45) * Fix SQLite diffing on perl 5.8.1 * Fix multi-column indexes in Parser::DBI::PostgreSQL * Fix array data types in Parser::PostgreSQL (GH#49) * Fix multidimensional sizes in Parser::PostgreSQL 0.11020 2014-09-02 * Fix test failure if Test::PostgreSQL is installed but not working 0.11019 2014-09-02 * Add Json and hstore types to Pg Parser (cloudinstustrie) * Fix DROP TABLE in SQL Server Producer * Fix Pg DBI parser test (Dagfinn Ilmari Mannsåker) * Remove spurious warnings (Matt Phillips, Wallace Reis) * Fix MySQL producer for columns with scalar ref in 'ON UPDATE' (Wallace Reis) * Fix handling of views in MySQL DBI parser * Fix handling of renamed fields in SQLite diff (Peter Mottram) * Check numeric equality of default values in numeric-type fields (Wallace Reis) * Fix handling of renamed fields in renamed tables in Pg diff (Peter Mottram) 0.11018 2013-10-31 🎃 * Revert "Fix AUTOINCREMENT in SQLite" 0.11017 2013-10-30 * Apply quotes to fix tables that are reserved words, DBI::SQLServer (Jonathan C. Otsuka) * Add DECIMAL_DIGITS to field size for scale info, DBI::SQLServer (Jonathan C. Otsuka) * De-linkify XML namespace in docs (RT#81838) * Allow both single and double quotes for values in MySQL parser * Fix diff for altering two things per column - add ; at the end * Call all diff methods in list context (it can be merged later) * Fix Pg diff issue with drop constraint on primary keys * SQLite support for SET NULL, SET DEFAULT and NO ACTION in FK clauses * Clean up properly after Parser::DBI::PostgreSQL tests * Fix typos in error messages * Add SQL_TINYINT and SQL_BIGINT to the type map in SQL::Translator::Schema::Field * Add JSON parser and producer (Jon Jensen) * Clean up TTSchema doc some (Gavin Shelley) * Fix AUTOINCREMENT in SQLite (Rafael Porres Molina) 0.11016 2012-10-09 * Allow passing an arrayref to SQLT->filename (lost in Mooification) 0.11015 2012-10-05 * Fix stupid missing version number in SQL::Translator::Schema::Object 0.11014 2012-10-05 * Relicense under Perl 5 terms 0.11013_03 2012-09-25 * Remove SQL::Translator::Schema::Graph as announced in 0.11011 * Remove a number of no longer needed deps 0.11013_02 2012-09-23 * Fix missing dep (List::MoreUtils) 0.11013_01 2012-09-22 * Convert SQL::Translator, ::Schema and ::Schema::* to Moo * Fix leaks by weakening circular references between schema objects 0.11013 2012-09-22 * Make MySQL producer add NULL after every nullable field, conforming to SQL standard, and avoiding MySQL bugs 0.11012 2012-07-02 * Fix/update quoting in PostgreSQL producer * Add missing quote function to SQLServer producer * Fix incorrect Parser::DBI documentation (RT#60878) 0.11011 2012-05-09 [ INCOMPATIBLE CHANGES ] * SQLT no longer supports setting separate conflicting values for the now deprecated 'quote_table_names' and 'quote_field_names'. Instead their values are proxied to the new 'quote_identifiers'. If 'quote_identifiers' is supplied, the legacy settings are ignored (with a warning). If nothing is specified the default is TRUE as before. If only one is specified - default to its value for everything, and if both are specified with a conflicting value an exception is thrown. * Partial quoting support has been added in SQLite. It is currently disabled by default, you need to request is explicitly with quote_identifiers => 1. In a future version of SQL::Translator *THIS DEFAULT BEHAVIOR WILL CHANGE*. If you do NOT WANT quoting, set quote_identifiers to a false value to protect yourself from changes in a future release. * Bump the default MySQL parser version to MySQL 4.0 [ OTHER CHANGES ] * script/sqlt-graph now accepts a --trace option * Fixes to SQLite foreign keys production (patch from Johan Viklund) closes RT#16412, RT#44769 * ON DELETE/UPDATE actions for SQLite (patch from Lukas Thiemeier) closes RT#70734, RT#71283, RT#70378 * Fix data preservation on SQLite diffs involving adding/dropping columns * Support for triggers in PostgreSQL producer and parser * Correct Data Type in SQLT::Parser::DBI::PostgreSQL (patch from Andrew Pam) * Fix index issue in SQLT::Parser::DBI::PostgreSQL * Add column and table comments in SQLT::Parser::DBI::PostgreSQL(patch from Andrew Pam) * Stop the DBI parser from disconnecting externally supplied DBI handles (RT#35868) * Fixed alter_drop_constraint for foreign keys and applying multiple changes via alter_field to a column in Postgres Producer * Added a working mechanism for naming foreign keys in the PostgreSQL producer * Fix PostgreSQL ignoring default values with specified data type * Fix PostgreSQL parser support for (N)::int defaults (patch by Tina Müller) * Fix possible name duplication in SQLlite producer * Oracle does not accept ON DELETE/UPDATE RESTRICT (though it is the actual default) fix by not adding the ON DELETE/UPDATE clause at all * Changed dependency on Digest::SHA1 to the core-bundled Digest::SHA (RT#67989) * Support for double quoted and bit strings as default values in MySQL parser * Improved VIEW support in MySQL parser * Proper handling of CURRENT_TIMESTAMP default values in MySQL parser (RT#65844) * Check in MySQL parser to avoid trying to parse a table defined twice in the same file as indices (and probably other things) get messed up * Workaround for some MySQL quirks on primary key definitions * Fix dropping primary keys in MySQL diffs (RT#62250, patch from Nick Bertrand) * MySQL producer does not attempt to write out non-existent unique constraint names * MySQL parser correctly differentiates between signed and unsigned integer column display sizes * Replace Class::Accessor::Fast dependency with already-included Moo * Entire codebase is now free of tabs and trailing whitespace * Spellfixes (RT#68912) * Fix Diagram Producer POD (RT#71397, RT#71398) * Fix Diagram Producer to use correct binmode on output (RT#71399) * Fix ignored option to script/sqlt-diagram (RT#5992) * Fix t/17sqlfxml-producer.t failures due to whitespace differences introduced by environment config snippets (RT#70786) * Fix assembly of Table objects with numbered columns being added out of order (RT#74771) (based on patch from Jonathan Otsuka) * Fix syntax error in SQL::Translator::Producer::Latex (RT#74953) * Deprecate SQL::Translator::Schema::Graph and the as_graph() schema method * Bump minimum supported perl version to 5.8.1 (mostly due to Moo) 0.11010 2011-10-05 * Add "if exists" to drop view statements in Pg. 0.11009 2011-09-02 * Fix MySQL producer to properly quote all table names on output (patch from geistteufel) 0.11008 2011-05-04 * Correctly create and parse FK constraints in SQLite * Correct postgis geography type insertion and linebreak fix for multiple geometry/geography columns * made PostgreSQL producer consistent with other producers in terms of quoting and allowing functions in constraints and indices * Add distinction of autoinc and regular primary keys to the GraphViz producer * Fix odd invocation of Test::More::pass() in t/36-filters.t (RT#64728) * Quote everything in SQL Server * Turn off constraints before dropping tables in SQL Server * Make true unique constraints if needed in SQL Server * Fixed Producer::PostgresSQL to output array type after type size, i.e. varchar(64)[] rather than varchar[](64) 0.11007 2010-11-30 * Fix POD typo in SQL/Translator/Schema/Trigger.pm * Add explicit Scalar::Util to the deplist for really old perls * Add support for PostGIS Geometry and Geography data types in the Pg producer * Some minor fixes to squash warnings on new perls * Support a custom_type_name hint when creating enum types in PostgreSQL * Fix sqlt options/pod mismatch (RT#58318) * Oracle Producer multicolumn constraint support * Add support for triggers in the MySQL producer * Fix unstable order of View's in MySQL parser 0.11006 2010-06-03 * Fix Producer::Oracle varchar2 without size def (ORA-00906: missing right parenthesis) * Fix Producer::Oracle translate float and double to float instead of number * Fix Producer::Oracle generation of too long unique constraint names * Fix Producer::SQLite when generating VIEWs with add_drop_table => 1 * Fix Producer::MySQL not quoting index names when requested (RT#56173) * Fix Producer::MySQL wrapping extra ()s around VIEW SELECT-ors (RT#56419) * Fix Field::default_value to behave like a real accessor (allow undef as an unsetting argument) * Fix Mysql/Pg/SQLite/MSSQL producers to properly *not* quote numeric default values (RT#57059) * Producer::Oracle tests now use Test::Differences * Prettify output of SQLite producer (less bogus newlines) * Augment SQLite and Pg producers to accept a perl-formatted (%d.%03d%03d) and regular no-leading-zero (X.X.X) *_version producer args 0.11005 2010-02-24 * Fix Parser::DBI::Oracle reading too many tables (RT#49413) * Fix Parser::MySQL tripping up on PRIMARY KEY ... USING (currently value is ignored) (RT#50468) * Fix runaway debug trace (RT#52276) * Fix Parser::PostgreSQL choking on commit; statements in DDL (#RT52277) * Producer::Oracle now respects quote_field|table_names, and no longer does name munging of reserved table names * Producer::Oracle now correctly outputs databse-unique index names 0.11004 2010-02-14 * Fix PG producer numeric precision regression (RT#49301) * Add DB2 producer numeric/decimal precision output * Fix Oracle producer creating numeric precision statements that the test case expects (no whitespace) * Add Oracle producer creating drop view statements like PG producer does * Fix SQL::Translator::Diff to use producer_args instead of producer_options 0.11003 2009-09-28 * Pg parser fixes to deal properly with MATCH * Pg parser fixes to parse timestamp attributes correctly * Fix broken default detection in script/sqlt (RT#27438) * Fix dependency issues with LibXML and TT 0.11002 2009-08-30 * Depend on fixed Parse::RecDescent * Added skip-tables and skip-tables-like options to Diagram 0.11001 2009-08-18 * Removed last use of Readonly * Adjusted YAML dependency 0.11000 2009-08-18 * Re-add version numbers to files, else cpan's "upgrade" gets very confused * Replaced code using Readonly, since most of the rest uses constant, and thats already a dep * Moved YAML and XML::LibXML back to recommends, the tests for both now skip if not installed * Bumped to 0.11000 to supercede 0.10 which has incorrect numbering scheme 0.10 2009-08-13 * Resolved the following RT bugs (thanks to everyone for reporting!): - 25791 does not recognize PostgreSQL ON_ERROR_STOP - 29265 sqlt-diagram: --natural-join needs Graph::Directed - 37814 SQLite translator failing to parse schema - 42548 Producer::PostgreSQL incorrectly inserts the size in 'time(stamp)? with(out) time zone' fields - 43173 SQL::Translator::Parser without versionnumber - will install old 0.09002 - 46805 (No subject) - 47026 META.yml is not packaged due to MANIFEST.SKIP (easyfix) - 32130 Move from XML::XPath to XML::LibXML::XPathContext - 22261 MySQL parse - 13915 missing optional prerequisite cause make test to fail - 8847 Diagram.pm: BINMODE missing in printing of graphic file. - 21065 GraphViz producer fails on tables named 'node' - 35448 Producer::PostgreSQL types without size - 22026 sqlt-diagram uses -f arg twice - 47897 [PATCH] Fix uninitialized value within @_ in (uc|lc) - 47668 Mysql Parser doesn't recognize key types - 46448 sqlt-graph errors out on MySQL DDL with btree keys - 47176 Add Foreign Key support to Parser::DBI::PostgreSQL.pm - 48025 MySQL Producer: Case inconsistency between elements in @no_length_attr and $data_type - 48569 sqlt-diagram fails to load SQLite schema - 48596 SQL::Translator::Diff::schema_diff should produce a list in list context - 44907 SQL::Translator::Producer::PostgreSQL produce() in list context should return a list of statements 0.09007 2009-06-25 * Fixed Pg parser - caching the compiled P::RD schema is a *very* bad idea * Fix MSSQL handling of ON UPDATE/DELETE RESTRICT * Delay MSSQL FK constraint deployment until after all CREATE TABLE statements * Coerce other engine's bytea/blob/clob datatypes to VarBinary 0.09006 2009-06-10 * Multiple test and dependency adhustments to make smokers happy * Fix YAML producer wrt extra attribute * Added support for "time(stamp) (p) with time zone" for Pg producer (mo) 0.09005 2009-06-08 * Add parser support for MySQL default values with a single quote * Properly quote absolute table names in the MySQL producer * Added CREATE VIEW subrules for mysql parser (wreis) * Many fixes to code and tests for trigger's "database_events" * Added semi-colon for (DROP|CREATE) TYPE statements in the Pg producer (wreis) * ALTER TABLE/ALTER COLUMN/DROP DEFAULT support in Pg producer (mo) * XML parser support for multi-event triggers * SQLite producer support for multi-event triggers * XML parser switched from XML::XPath to XML::LibXML * Pg producer ALTER TABLE/COLUMN and DROP DEFAULT support * MySQL producer skips length attribute for columns which do not support that attribute. Currently following column types are added to that list: date time timestamp datetime year * Switch to Module::Install (mandates minimum perl 5.005) * Major cleanup of GraphViz proucer * Massive amount of fixes to SQLite/Pg/Mysql/MSSQL parsers/producers Fix most of the problems uncovered by the roundtrip test framework Some highlights: - Rewind exhausted globs before attempting a read - Do not add xml comment header if no_comments is set - table/field counts are held per schema object, not globally - no more variable table and column names in SQLite and MSSQL - VIEW support for Pg parser, also some cleanups - The way we generate Pg create view statements was not standards compliant (per RhodiumToad in #postgresql) - Disable MSSQL view/procedure production - they never worked in the first place - SQLite/MSSQL improvements: - Support parsing of all DROP clauses - Support parsing of field-level comments - When producing do not append table names to constraint/index names 0.09004 2009-02-13 * Add support for temporary tables in Pg (nachos) * Create Trigger support for SQLite * GraphViz producer improvements 0.09003 2009-02-07 * 0.09002 2008-12-05 * parsing MySQL CURRENT_TIMESTAMP as scalar ref so it can be produced without quotes (jgoulah) * Add ignore_opts parser arg (to ignore table options) in Parser::MySQL (jgoulah) * Skip tests for buggy Spreadsheet::ParseExcel versions (rbo) * Add support for skip tables parser arg in Parser::DBI::MySQL (jgoulah) * Changed behaviour of ::Producer::Oracle when returning an array of statements to make it compatible to DBI->do() * Fixed a few bugs in ::Producer::Oracle * Applied patch from jgoulah to support mysql's MERGE option * Applied patch from rbo to add support of multiple database events on a trigger * Applied patch from lukes to allow drop if exists in sqlite producer, with version >= 3.3 * Applied patch from rjbs with minor changes, now we support scalar refs in default values! * Fixed SQLite producer to end index statements in newlines, in scalar context * Decreed that all list context statements shall not end in ; or ;\n * Fixed SQLite, Diff and MySQL producers to agree with Decree. * Added support for CREATE VIEW + tests in the Pg producer (wreis) * Added support for CREATE VIEW + tests in the sqlite producer (groditi) * Added proper argument parsing and documentation to MySQL Parser and Producer (ribasushi) * Using DROP VIEW instead of OR REPLACE clause in the Pg producer, as replace only allows replacement with identical set of columns (wreis) * Added support for DROP VIEW and fixed CREATE VIEW statement in the sqlite producer (wreis) * Removed source_db and target_db accessors from Diff (throwback to old version, only output_db is used) * Support for longer varchar fields in MySQL 0.09001 2008-08-19 * Added support for CREATE VIEW + tests in the mysql producer (groditi) * Added support for SET fields in the mysql producer + test (groditi) * Added support for proper booleans in the mysql producer, when a mysql version of at least 4.x is supplied * Added support for proper enums under pg (as of 8.3), with pg version check, and deferrable constraints * Added support to truncate long constraint and index names in the mysql producer, because of a change to DBIx::Class to produce such long names in some cases. 0.09000 2008-02-25 * Fix Pg produces idea of which field types need a size param (wreis) * Add support for COLLATE table option to MySQL parser * Allow DEFAULT CHARACTER SET without '=' (as produced by mysqldump) 0.0899_02 2008-01-29 * Major refactoring of SQL::Translator::Diff again: * Diff is no longer one huge monolithic function. * Added more tests for diff * When producing diffs for MySQL you will (by default) get single alter statements per table * SQLite can also do remove columns (by creating a temp table as shown in http://sqlite.org/faq.html#q11 * Columns can be renamed if the new schema is from a form that can have metadata (which is pretty much anything but an SQL file.) It does this by looking at renamed_from in the $field->extra * Updated Oracle and Postgres producers * More tests! 0.0899_01 2007-10-21 * SQL::Translator::Diff now uses the ::Producer modules to create diffs This *will* break back-compatibility Use sqlt-diff-old for the previous one, and fix producers! 0.08001 2007-09-26 * Patched to ignore all TT versions >= 2.15 until TT is fixed :( 0.08 2006-12-07 * Patched 18ttschema-producer.t and 33tt-table-producter.t to skip on TT 2.15, thanks Ash! 0.08_04 2006-11-10 * Patched MySQL producer to name constraints sanely, thanks Ash * Added patch to Producer::DB2 to avoid dependency issues with foreign keys * Added patch to remove single quotes for numeric default values in Producer::DB2 * Fixed Parser::SQLite to require a semicolon after a create trigger statement * Added patch from avinash to add CASCADE to pg table drops 0.08_03 * Added patch to use default values for Pg timestamp fields 0.08_02 2006-11-03 * Added patch from Ash to separate DROP statements in mysql producer in list-context * Fixed up SQLites usage of no-comments 0.08_01 2006-07-23 * Made Trigger check that a give table exists in on_table - castaway * Split some producers (DB2, MySQL, SQLite, PostgreSQL) into sub methods (others to follow) - castaway * Add alter_* methods to some Producers and docs to Producer.pm (for use by Diff later) - castaway * Made changes to allow producers to return a list of statements - castaway * Split sqlt-diff into script and module - castaway * Added quote_table_names and quote_field_names patch (omega, zamolxes) - castaway * Added DB2 Producer - castaway * Added mysql_character_set for 4.1+ -mda * New filters, Names and Globals. -mda * Added the initial work on a template based Dia UML producer. -mda 0.07 2005-06-10 * YAML parser supports extra attributes on tables. * All schema objects now support the extra attribute, so can have arbitary name/value data attached to them. * Refactoring: Added SQL::Translator::Schema::Object - base class for all Schema objects. * Changes to MySQL Parser (Dave Howorth) - ignore INSERT statements - permit ALTER TABLE ADD FOREIGN KEY - allow trailing comma on last field in CREATE statements - collect the database name * TTSchema Producer - Can pass extra variables using tt_vars producer arg. - Can pass extra config using tt_conf producer arg. - Variables and config can be passed on the command line with --tt-var and --tt-conf options to sqlt. * Added schema filters. * MySQL Producer - Added 'mysql_table_type' extra attribute on tables. - Works out InnoDB tables from constraints. - mysql_charset and mysql_collate extra attributes for tables and fiels. 0.06 2004-05-13 * Added SQL::Translator::Manual * Installation process now uses Module::Build * Added new "Dumper" producer * Changed the native SQL Fairy XML format to a fixed mapping. *NB:* You should convert your existing XML schema. See the SQL::Translator::Parser::XML::SQLFairy docs. * Added producers: TT::Base and TT::Table. 0.05 2004-02-27 * Added "COMMENT ON *" syntax to PostgreSQL parser * Some fixes to Oracle parser as reported by Gail Binkley * Added support in PostgreSQL parser for all "ALTER TABLE" statements * Now distributing sqlt-diff script as it's pretty usable * Added new options to sqlt-graph and GraphViz producer (Dave Cash) 0.04 2003-11-07 * Increased version of Constants module to 1.41 to avoid a problem where 0.02 has 1.4 of that file and 0.03 had 1.06 which confused CPAN * Hard-coded all the PREREQ_PM modules in Makefile.PL (rather than setting them dynamically) so that automated tests would pass 0.03 2003-11-06 * Added parsers: XML::SQLFairy, Sybase, SQLite, DBI-MySQL, DBI-PostgreSQL, DBI-SQLite, DBI-Sybase, Storable, YAML * Added producers: XML::SQLFairy, TTSchema, Storable, YAML * HTML producer now uses stylesheets to allow easy customization of colors * Many bug fixes to most every module * Added "sqlt-dumper" script to help create a script for dumping a database a la "mysqldump" * Reversed the arrowheads on the graphical producers to show the relationships in a more standard way * Changes all included script names to start with "sqlt" * Added capturing and printing most embedded table and field comments 0.02 2003-06-17 * Added parsers for Excel and Oracle * Removed Sybase parser because it didn't actually work * Added ClassDBI, Diagram, GraphViz, HTML, POD, SQLite, Sybase producers * Added Schema classes to represent schema as objects * Removed "Raw" producer in favor of the Schema classes * Removed "Validator" class as the Schema classes validate themselves * Improved all existing parsers and producers, expanding them to handle foreign keys much better, produce better output, etc. * Added sqlt-diagram.pl and sqlt-graphviz.pl as CLI frontends to the graphical producers * Added sql_translator.cgi as a web-form frontend to graphical producers * Expanded test suite 0.01 2003-02-27 * Added parsers: XML::SQLFairy, Sybase, SQLite, DBI-MySQL, DBI-PostgreSQL, DBI-SQLite, DBI-Sybase, Storable, YAML * Added producers: XML::SQLFairy, TTSchema, Storable, YAML * HTML producer now uses stylesheets to allow easy customization of colors * Many bug fixes to most every module * Added "sqlt-dumper" script to help create a script for dumping a database a la "mysqldump" * Reversed the arrowheads on the graphical producers to show the relationships in a more standard way * Changes all included script names to start with "sqlt" * Added capturing and printing most embedded table and field comments SQL-Translator-1.60/META.json0000644000000000000000000000562213473557373015673 0ustar00rootroot00000000000000{ "abstract" : "SQL DDL transformations and more", "author" : [ "Ken Youens-Clark " ], "dynamic_config" : 0, "generated_by" : "ExtUtils::MakeMaker version 7.36, CPAN::Meta::Converter version 2.150010", "license" : [ "perl_5" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : 2 }, "name" : "SQL-Translator", "no_index" : { "directory" : [ "t", "inc", "maint", "share", "xt" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : "6.54", "File::ShareDir::Install" : "0" } }, "develop" : { "requires" : { "GD" : "0", "Graph::Directed" : "0", "GraphViz" : "0", "Software::LicenseUtils" : "0", "Spreadsheet::ParseExcel" : "0.41", "Template" : "2.20", "Test::EOL" : "1.1", "Test::NoTabs" : "1.1", "Text::RecordParser" : "0.02", "XML::LibXML" : "1.69" } }, "runtime" : { "recommends" : { "GD" : "0", "Graph::Directed" : "0", "GraphViz" : "0", "Spreadsheet::ParseExcel" : "0.41", "Template" : "2.20", "Text::RecordParser" : "0.02", "XML::LibXML" : "1.69" }, "requires" : { "Carp::Clan" : "0", "DBI" : "1.54", "Digest::SHA" : "0", "File::ShareDir" : "1.0", "Moo" : "1.000003", "Package::Variant" : "1.001001", "Parse::RecDescent" : "1.967009", "Scalar::Util" : "0", "Sub::Quote" : "0", "Try::Tiny" : "0.04", "perl" : "5.008001" } }, "test" : { "requires" : { "JSON::MaybeXS" : "1.003003", "Test::Differences" : "0", "Test::Exception" : "0.31", "Test::More" : "0.88", "Text::ParseWords" : "0", "XML::Writer" : "0.500", "YAML" : "0.66" } } }, "release_status" : "stable", "resources" : { "bugtracker" : { "mailto" : "bug-SQL-Translator@rt.cpan.org", "web" : "https://rt.cpan.org/NoAuth/Bugs.html?Dist=SQL-Translator" }, "license" : [ "http://dev.perl.org/licenses/" ], "repository" : { "type" : "git", "web" : "https://github.com/dbsrgits/sql-translator/" }, "x_IRC" : "irc://irc.perl.org/#sql-translator", "x_Ratings" : "http://cpanratings.perl.org/d/SQL-Translator" }, "version" : "1.60", "x_authority" : "cpan:JROBINSON", "x_serialization_backend" : "JSON::PP version 4.02" } SQL-Translator-1.60/lib/0000755000000000000000000000000013473557372015012 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/0000755000000000000000000000000013473557372015451 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/Translator.pm0000644000000000000000000010326313473550070020131 0ustar00rootroot00000000000000package SQL::Translator; use Moo; our ( $DEFAULT_SUB, $DEBUG, $ERROR ); our $VERSION = '1.60'; $VERSION =~ tr/_//d; $DEBUG = 0 unless defined $DEBUG; $ERROR = ""; use Carp qw(carp croak); use Data::Dumper; use File::Find; use File::Spec::Functions qw(catfile); use File::Basename qw(dirname); use IO::Dir; use Sub::Quote qw(quote_sub); use SQL::Translator::Producer; use SQL::Translator::Schema; use SQL::Translator::Utils qw(throw ex2err carp_ro normalize_quote_options); $DEFAULT_SUB = sub { $_[0]->schema } unless defined $DEFAULT_SUB; with qw( SQL::Translator::Role::Debug SQL::Translator::Role::Error SQL::Translator::Role::BuildArgs ); around BUILDARGS => sub { my $orig = shift; my $self = shift; my $config = $self->$orig(@_); # If a 'parser' or 'from' parameter is passed in, use that as the # parser; if a 'producer' or 'to' parameter is passed in, use that # as the producer; both default to $DEFAULT_SUB. $config->{parser} ||= $config->{from} if defined $config->{from}; $config->{producer} ||= $config->{to} if defined $config->{to}; $config->{filename} ||= $config->{file} if defined $config->{file}; my $quote = normalize_quote_options($config); $config->{quote_identifiers} = $quote if defined $quote; return $config; }; sub BUILD { my ($self) = @_; # Make sure all the tool-related stuff is set up foreach my $tool (qw(producer parser)) { $self->$tool($self->$tool); } } has $_ => ( is => 'rw', default => quote_sub(q{ 0 }), coerce => quote_sub(q{ $_[0] ? 1 : 0 }), ) foreach qw(add_drop_table no_comments show_warnings trace validate); # quote_identifiers is on by default, use a 0-but-true as indicator # so we can allow individual producers to change the default has quote_identifiers => ( is => 'rw', default => quote_sub(q{ '0E0' }), coerce => quote_sub(q{ $_[0] || 0 }), ); sub quote_table_names { (@_ > 1 and ($_[1] xor $_[0]->quote_identifiers) ) ? croak 'Using quote_table_names as a setter is no longer supported' : $_[0]->quote_identifiers; } sub quote_field_names { (@_ > 1 and ($_[1] xor $_[0]->quote_identifiers) ) ? croak 'Using quote_field_names as a setter is no longer supported' : $_[0]->quote_identifiers; } after quote_identifiers => sub { if (@_ > 1) { # synchronize for old code reaching directly into guts $_[0]->{quote_table_names} = $_[0]->{quote_field_names} = $_[1] ? 1 : 0; } }; has producer => ( is => 'rw', default => sub { $DEFAULT_SUB } ); around producer => sub { my $orig = shift; shift->_tool({ orig => $orig, name => 'producer', path => "SQL::Translator::Producer", default_sub => "produce", }, @_); }; has producer_type => ( is => 'rwp', init_arg => undef ); around producer_type => carp_ro('producer_type'); has producer_args => ( is => 'rw', default => quote_sub(q{ +{} }) ); around producer_args => sub { my $orig = shift; shift->_args($orig, @_); }; has parser => ( is => 'rw', default => sub { $DEFAULT_SUB } ); around parser => sub { my $orig = shift; shift->_tool({ orig => $orig, name => 'parser', path => "SQL::Translator::Parser", default_sub => "parse", }, @_); }; has parser_type => ( is => 'rwp', init_arg => undef ); around parser_type => carp_ro('parser_type'); has parser_args => ( is => 'rw', default => quote_sub(q{ +{} }) ); around parser_args => sub { my $orig = shift; shift->_args($orig, @_); }; has filters => ( is => 'rw', default => quote_sub(q{ [] }), coerce => sub { my @filters; # Set. Convert args to list of [\&code,@args] foreach (@{$_[0]||[]}) { my ($filt,@args) = ref($_) eq "ARRAY" ? @$_ : $_; if ( isa($filt,"CODE") ) { push @filters, [$filt,@args]; next; } else { __PACKAGE__->debug("Adding $filt filter. Args:".Dumper(\@args)."\n") if __PACKAGE__->debugging; $filt = _load_sub("$filt\::filter", "SQL::Translator::Filter") || throw(__PACKAGE__->error); push @filters, [$filt,@args]; } } return \@filters; }, ); around filters => sub { my $orig = shift; my $self = shift; return @{$self->$orig([@{$self->$orig}, @_])} if @_; return @{$self->$orig}; }; has filename => ( is => 'rw', isa => sub { foreach my $filename (ref($_[0]) eq 'ARRAY' ? @{$_[0]} : $_[0]) { if (-d $filename) { throw("Cannot use directory '$filename' as input source"); } elsif (not -f _ && -r _) { throw("Cannot use '$filename' as input source: ". "file does not exist or is not readable."); } } }, ); around filename => \&ex2err; has data => ( is => 'rw', builder => 1, lazy => 1, coerce => sub { # Set $self->data based on what was passed in. We will # accept a number of things; do our best to get it right. my $data = shift; if (isa($data, 'ARRAY')) { $data = join '', @$data; } elsif (isa($data, 'GLOB')) { seek ($data, 0, 0) if eof ($data); local $/; $data = <$data>; } return isa($data, 'SCALAR') ? $data : \$data; }, ); around data => sub { my $orig = shift; my $self = shift; if (@_ > 1 && !ref $_[0]) { return $self->$orig(\join('', @_)); } elsif (@_) { return $self->$orig(@_); } return ex2err($orig, $self); }; sub _build_data { my $self = shift; # If we have a filename but no data yet, populate. if (my $filename = $self->filename) { $self->debug("Opening '$filename' to get contents.\n"); local $/; my $data; my @files = ref($filename) eq 'ARRAY' ? @$filename : ($filename); foreach my $file (@files) { open my $fh, '<', $file or throw("Can't read file '$file': $!"); $data .= <$fh>; close $fh or throw("Can't close file '$file': $!"); } return \$data; } } has schema => ( is => 'lazy', init_arg => undef, clearer => 'reset', predicate => '_has_schema', ); around schema => carp_ro('schema'); around reset => sub { my $orig = shift; my $self = shift; $self->$orig(@_); return 1 }; sub _build_schema { SQL::Translator::Schema->new(translator => shift) } sub translate { my $self = shift; my ($args, $parser, $parser_type, $producer, $producer_type); my ($parser_output, $producer_output, @producer_output); # Parse arguments if (@_ == 1) { # Passed a reference to a hash? if (isa($_[0], 'HASH')) { # yep, a hashref $self->debug("translate: Got a hashref\n"); $args = $_[0]; } # Passed a GLOB reference, i.e., filehandle elsif (isa($_[0], 'GLOB')) { $self->debug("translate: Got a GLOB reference\n"); $self->data($_[0]); } # Passed a reference to a string containing the data elsif (isa($_[0], 'SCALAR')) { # passed a ref to a string $self->debug("translate: Got a SCALAR reference (string)\n"); $self->data($_[0]); } # Not a reference; treat it as a filename elsif (! ref $_[0]) { # Not a ref, it's a filename $self->debug("translate: Got a filename\n"); $self->filename($_[0]); } # Passed something else entirely. else { # We're not impressed. Take your empty string and leave. # return ""; # Actually, if data, parser, and producer are set, then we # can continue. Too bad, because I like my comment # (above)... return "" unless ($self->data && $self->producer && $self->parser); } } else { # You must pass in a hash, or you get nothing. return "" if @_ % 2; $args = { @_ }; } # ---------------------------------------------------------------------- # Can specify the data to be transformed using "filename", "file", # "data", or "datasource". # ---------------------------------------------------------------------- if (my $filename = ($args->{'filename'} || $args->{'file'})) { $self->filename($filename); } if (my $data = ($args->{'data'} || $args->{'datasource'})) { $self->data($data); } # ---------------------------------------------------------------- # Get the data. # ---------------------------------------------------------------- my $data = $self->data; # ---------------------------------------------------------------- # Local reference to the parser subroutine # ---------------------------------------------------------------- if ($parser = ($args->{'parser'} || $args->{'from'})) { $self->parser($parser); } $parser = $self->parser; $parser_type = $self->parser_type; # ---------------------------------------------------------------- # Local reference to the producer subroutine # ---------------------------------------------------------------- if ($producer = ($args->{'producer'} || $args->{'to'})) { $self->producer($producer); } $producer = $self->producer; $producer_type = $self->producer_type; # ---------------------------------------------------------------- # Execute the parser, the filters and then execute the producer. # Allowances are made for each piece to die, or fail to compile, # since the referenced subroutines could be almost anything. In # the future, each of these might happen in a Safe environment, # depending on how paranoid we want to be. # ---------------------------------------------------------------- # Run parser unless ( $self->_has_schema ) { eval { $parser_output = $parser->($self, $$data) }; if ($@ || ! $parser_output) { my $msg = sprintf "translate: Error with parser '%s': %s", $parser_type, ($@) ? $@ : " no results"; return $self->error($msg); } } $self->debug("Schema =\n", Dumper($self->schema), "\n") if $self->debugging;; # Validate the schema if asked to. if ($self->validate) { my $schema = $self->schema; return $self->error('Invalid schema') unless $schema->is_valid; } # Run filters my $filt_num = 0; foreach ($self->filters) { $filt_num++; my ($code,@args) = @$_; eval { $code->($self->schema, @args) }; my $err = $@ || $self->error || 0; return $self->error("Error with filter $filt_num : $err") if $err; } # Run producer # Calling wantarray in the eval no work, wrong scope. my $wantarray = wantarray ? 1 : 0; eval { if ($wantarray) { @producer_output = $producer->($self); } else { $producer_output = $producer->($self); } }; if ($@ || !( $producer_output || @producer_output)) { my $err = $@ || $self->error || "no results"; my $msg = "translate: Error with producer '$producer_type': $err"; return $self->error($msg); } return wantarray ? @producer_output : $producer_output; } sub list_parsers { return shift->_list("parser"); } sub list_producers { return shift->_list("producer"); } # ====================================================================== # Private Methods # ====================================================================== # ---------------------------------------------------------------------- # _args($type, \%args); # # Gets or sets ${type}_args. Called by parser_args and producer_args. # ---------------------------------------------------------------------- sub _args { my $self = shift; my $orig = shift; if (@_) { # If the first argument is an explicit undef (remember, we # don't get here unless there is stuff in @_), then we clear # out the producer_args hash. if (! defined $_[0]) { shift @_; $self->$orig({}); } my $args = isa($_[0], 'HASH') ? shift : { @_ }; return $self->$orig({ %{$self->$orig}, %$args }); } return $self->$orig; } # ---------------------------------------------------------------------- # Does the get/set work for parser and producer. e.g. # return $self->_tool({ # name => 'producer', # path => "SQL::Translator::Producer", # default_sub => "produce", # }, @_); # ---------------------------------------------------------------------- sub _tool { my ($self,$args) = (shift, shift); my $name = $args->{name}; my $orig = $args->{orig}; return $self->{$name} unless @_; # get accessor my $path = $args->{path}; my $default_sub = $args->{default_sub}; my $tool = shift; # passed an anonymous subroutine reference if (isa($tool, 'CODE')) { $self->$orig($tool); $self->${\"_set_${name}_type"}("CODE"); $self->debug("Got $name: code ref\n"); } # Module name was passed directly # We try to load the name; if it doesn't load, there's a # possibility that it has a function name attached to it, # so we give it a go. else { $tool =~ s/-/::/g if $tool !~ /::/; my ($code,$sub); ($code,$sub) = _load_sub("$tool\::$default_sub", $path); unless ($code) { if ( __PACKAGE__->error =~ m/Can't find module/ ) { # Mod not found so try sub ($code,$sub) = _load_sub("$tool", $path) unless $code; die "Can't load $name subroutine '$tool' : ".__PACKAGE__->error unless $code; } else { die "Can't load $name '$tool' : ".__PACKAGE__->error; } } # get code reference and assign my (undef,$module,undef) = $sub =~ m/((.*)::)?(\w+)$/; $self->$orig($code); $self->${\"_set_$name\_type"}($sub eq "CODE" ? "CODE" : $module); $self->debug("Got $name: $sub\n"); } # At this point, $self->{$name} contains a subroutine # reference that is ready to run # Anything left? If so, it's args my $meth = "$name\_args"; $self->$meth(@_) if (@_); return $self->{$name}; } # ---------------------------------------------------------------------- # _list($type) # ---------------------------------------------------------------------- sub _list { my $self = shift; my $type = shift || return (); my $uctype = ucfirst lc $type; # # First find all the directories where SQL::Translator # parsers or producers (the "type") appear to live. # load("SQL::Translator::$uctype") or return (); my $path = catfile "SQL", "Translator", $uctype; my @dirs; for (@INC) { my $dir = catfile $_, $path; $self->debug("_list_${type}s searching $dir\n"); next unless -d $dir; push @dirs, $dir; } # # Now use File::File::find to look recursively in those # directories for all the *.pm files, then present them # with the slashes turned into dashes. # my %found; find( sub { if ( -f && m/\.pm$/ ) { my $mod = $_; $mod =~ s/\.pm$//; my $cur_dir = $File::Find::dir; my $base_dir = quotemeta catfile 'SQL', 'Translator', $uctype; # # See if the current directory is below the base directory. # if ( $cur_dir =~ m/$base_dir(.*)/ ) { $cur_dir = $1; $cur_dir =~ s!^/!!; # kill leading slash $cur_dir =~ s!/!-!g; # turn other slashes into dashes } else { $cur_dir = ''; } $found{ join '-', map { $_ || () } $cur_dir, $mod } = 1; } }, @dirs ); return sort { lc $a cmp lc $b } keys %found; } # ---------------------------------------------------------------------- # load(MODULE [,PATH[,PATH]...]) # # Loads a Perl module. Short circuits if a module is already loaded. # # MODULE - is the name of the module to load. # # PATH - optional list of 'package paths' to look for the module in. e.g # If you called load('Super::Foo' => 'My', 'Other') it will # try to load the mod Super::Foo then My::Super::Foo then Other::Super::Foo. # # Returns package name of the module actually loaded or false and sets error. # # Note, you can't load a name from the root namespace (ie one without '::' in # it), therefore a single word name without a path fails. # ---------------------------------------------------------------------- sub load { my $name = shift; my @path; push @path, "" if $name =~ /::/; # Empty path to check name on its own first push @path, @_ if @_; foreach (@path) { my $module = $_ ? "$_\::$name" : $name; my $file = $module; $file =~ s[::][/]g; $file .= ".pm"; __PACKAGE__->debug("Loading $name as $file\n"); return $module if $INC{$file}; # Already loaded eval { require $file }; next if $@ =~ /Can't locate $file in \@INC/; eval { $module->import() } unless $@; return __PACKAGE__->error("Error loading $name as $module : $@") if $@ && $@ !~ /"SQL::Translator::Producer" is not exported/; return $module; # Module loaded ok } return __PACKAGE__->error("Can't find module $name. Path:".join(",",@path)); } # ---------------------------------------------------------------------- # Load the sub name given (including package), optionally using a base package # path. Returns code ref and name of sub loaded, including its package. # (\&code, $sub) = load_sub( 'MySQL::produce', "SQL::Translator::Producer" ); # (\&code, $sub) = load_sub( 'MySQL::produce', @path ); # ---------------------------------------------------------------------- sub _load_sub { my ($tool, @path) = @_; my (undef,$module,$func_name) = $tool =~ m/((.*)::)?(\w+)$/; if ( my $module = load($module => @path) ) { my $sub = "$module\::$func_name"; return wantarray ? ( \&{ $sub }, $sub ) : \&$sub; } return undef; } sub format_table_name { return shift->_format_name('_format_table_name', @_); } sub format_package_name { return shift->_format_name('_format_package_name', @_); } sub format_fk_name { return shift->_format_name('_format_fk_name', @_); } sub format_pk_name { return shift->_format_name('_format_pk_name', @_); } # ---------------------------------------------------------------------- # The other format_*_name methods rely on this one. It optionally # accepts a subroutine ref as the first argument (or uses an identity # sub if one isn't provided or it doesn't already exist), and applies # it to the rest of the arguments (if any). # ---------------------------------------------------------------------- sub _format_name { my $self = shift; my $field = shift; my @args = @_; if (ref($args[0]) eq 'CODE') { $self->{$field} = shift @args; } elsif (! exists $self->{$field}) { $self->{$field} = sub { return shift }; } return @args ? $self->{$field}->(@args) : $self->{$field}; } sub isa($$) { my ($ref, $type) = @_; return UNIVERSAL::isa($ref, $type); } sub version { my $self = shift; return $VERSION; } # Must come after all 'has' declarations around new => \&ex2err; 1; # ---------------------------------------------------------------------- # Who killed the pork chops? # What price bananas? # Are you my Angel? # Allen Ginsberg # ---------------------------------------------------------------------- =pod =head1 NAME SQL::Translator - manipulate structured data definitions (SQL and more) =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( # Print debug info debug => 1, # Print Parse::RecDescent trace trace => 0, # Don't include comments in output no_comments => 0, # Print name mutations, conflicts show_warnings => 0, # Add "drop table" statements add_drop_table => 1, # to quote or not to quote, thats the question quote_identifiers => 1, # Validate schema object validate => 1, # Make all table names CAPS in producers which support this option format_table_name => sub {my $tablename = shift; return uc($tablename)}, # Null-op formatting, only here for documentation's sake format_package_name => sub {return shift}, format_fk_name => sub {return shift}, format_pk_name => sub {return shift}, ); my $output = $translator->translate( from => 'MySQL', to => 'Oracle', # Or an arrayref of filenames, i.e. [ $file1, $file2, $file3 ] filename => $file, ) or die $translator->error; print $output; =head1 DESCRIPTION This documentation covers the API for SQL::Translator. For a more general discussion of how to use the modules and scripts, please see L. SQL::Translator is a group of Perl modules that converts vendor-specific SQL table definitions into other formats, such as other vendor-specific SQL, ER diagrams, documentation (POD and HTML), XML, and Class::DBI classes. The main focus of SQL::Translator is SQL, but parsers exist for other structured data formats, including Excel spreadsheets and arbitrarily delimited text files. Through the separation of the code into parsers and producers with an object model in between, it's possible to combine any parser with any producer, to plug in custom parsers or producers, or to manipulate the parsed data via the built-in object model. Presently only the definition parts of SQL are handled (CREATE, ALTER), not the manipulation of data (INSERT, UPDATE, DELETE). =head1 CONSTRUCTOR =head2 new The constructor is called C, and accepts a optional hash of options. Valid options are: =over 4 =item * parser / from =item * parser_args =item * producer / to =item * producer_args =item * filters =item * filename / file =item * data =item * debug =item * add_drop_table =item * quote_identifiers =item * quote_table_names (DEPRECATED) =item * quote_field_names (DEPRECATED) =item * no_comments =item * trace =item * validate =back All options are, well, optional; these attributes can be set via instance methods. Internally, they are; no (non-syntactical) advantage is gained by passing options to the constructor. =head1 METHODS =head2 add_drop_table Toggles whether or not to add "DROP TABLE" statements just before the create definitions. =head2 quote_identifiers Toggles whether or not to quote identifiers (table, column, constraint, etc.) with a quoting mechanism suitable for the chosen Producer. The default (true) is to quote them. =head2 quote_table_names DEPRECATED - A legacy proxy to L =head2 quote_field_names DEPRECATED - A legacy proxy to L =head2 no_comments Toggles whether to print comments in the output. Accepts a true or false value, returns the current value. =head2 producer The C method is an accessor/mutator, used to retrieve or define what subroutine is called to produce the output. A subroutine defined as a producer will be invoked as a function (I) and passed its container C instance, which it should call the C method on, to get the C generated by the parser. It is expected that the function transform the schema structure to a string. The C instance is also useful for informational purposes; for example, the type of the parser can be retrieved using the C method, and the C and C methods can be called when needed. When defining a producer, one of several things can be passed in: A module name (e.g., C), a module name relative to the C namespace (e.g., C), a module name and function combination (C), or a reference to an anonymous subroutine. If a full module name is passed in (for the purposes of this method, a string containing "::" is considered to be a module name), it is treated as a package, and a function called "produce" will be invoked: C<$modulename::produce>. If $modulename cannot be loaded, the final portion is stripped off and treated as a function. In other words, if there is no file named F, C will attempt to load F and use C as the name of the function, instead of the default C. my $tr = SQL::Translator->new; # This will invoke My::Groovy::Producer::produce($tr, $data) $tr->producer("My::Groovy::Producer"); # This will invoke SQL::Translator::Producer::Sybase::produce($tr, $data) $tr->producer("Sybase"); # This will invoke My::Groovy::Producer::transmogrify($tr, $data), # assuming that My::Groovy::Producer::transmogrify is not a module # on disk. $tr->producer("My::Groovy::Producer::transmogrify"); # This will invoke the referenced subroutine directly, as # $subref->($tr, $data); $tr->producer(\&my_producer); There is also a method named C, which is a string containing the classname to which the above C function belongs. In the case of anonymous subroutines, this method returns the string "CODE". Finally, there is a method named C, which is both an accessor and a mutator. Arbitrary data may be stored in name => value pairs for the producer subroutine to access: sub My::Random::producer { my ($tr, $data) = @_; my $pr_args = $tr->producer_args(); # $pr_args is a hashref. Extra data passed to the C method is passed to C: $tr->producer("xSV", delimiter => ',\s*'); # In SQL::Translator::Producer::xSV: my $args = $tr->producer_args; my $delimiter = $args->{'delimiter'}; # value is ,\s* =head2 parser The C method defines or retrieves a subroutine that will be called to perform the parsing. The basic idea is the same as that of C (see above), except the default subroutine name is "parse", and will be invoked as C<$module_name::parse($tr, $data)>. Also, the parser subroutine will be passed a string containing the entirety of the data to be parsed. # Invokes SQL::Translator::Parser::MySQL::parse() $tr->parser("MySQL"); # Invokes My::Groovy::Parser::parse() $tr->parser("My::Groovy::Parser"); # Invoke an anonymous subroutine directly $tr->parser(sub { my $dumper = Data::Dumper->new([ $_[1] ], [ "SQL" ]); $dumper->Purity(1)->Terse(1)->Deepcopy(1); return $dumper->Dump; }); There is also C and C, which perform analogously to C and C =head2 filters Set or retrieve the filters to run over the schema during the translation, before the producer creates its output. Filters are sub routines called, in order, with the schema object to filter as the 1st arg and a hash of options (passed as a list) for the rest of the args. They are free to do whatever they want to the schema object, which will be handed to any following filters, then used by the producer. Filters are set as an array, which gives the order they run in. Like parsers and producers, they can be defined by a module name, a module name relative to the SQL::Translator::Filter namespace, a module name and function name together or a reference to an anonymous subroutine. When using a module name a function called C will be invoked in that package to do the work. To pass args to the filter set it as an array ref with the 1st value giving the filter (name or sub) and the rest its args. e.g. $tr->filters( sub { my $schema = shift; # Do stuff to schema here! }, DropFKeys, [ "Names", table => 'lc' ], [ "Foo", foo => "bar", hello => "world" ], [ "Filter5" ], ); Although you normally set them in the constructor, which calls through to filters. i.e. my $translator = SQL::Translator->new( ... filters => [ sub { ... }, [ "Names", table => 'lc' ], ], ... ); See F for more examples. Multiple set calls to filters are cumulative with new filters added to the end of the current list. Returns the filters as a list of array refs, the 1st value being a reference to the filter sub and the rest its args. =head2 show_warnings Toggles whether to print warnings of name conflicts, identifier mutations, etc. Probably only generated by producers to let the user know when something won't translate very smoothly (e.g., MySQL "enum" fields into Oracle). Accepts a true or false value, returns the current value. =head2 translate The C method calls the subroutine referenced by the C data member, then calls any C and finally calls the C sub routine (these members are described above). It accepts as arguments a number of things, in key => value format, including (potentially) a parser and a producer (they are passed directly to the C and C methods). Here is how the parameter list to C is parsed: =over =item * 1 argument means it's the data to be parsed; which could be a string (filename) or a reference to a scalar (a string stored in memory), or a reference to a hash, which is parsed as being more than one argument (see next section). # Parse the file /path/to/datafile my $output = $tr->translate("/path/to/datafile"); # Parse the data contained in the string $data my $output = $tr->translate(\$data); =item * More than 1 argument means its a hash of things, and it might be setting a parser, producer, or datasource (this key is named "filename" or "file" if it's a file, or "data" for a SCALAR reference. # As above, parse /path/to/datafile, but with different producers for my $prod ("MySQL", "XML", "Sybase") { print $tr->translate( producer => $prod, filename => "/path/to/datafile", ); } # The filename hash key could also be: datasource => \$data, You get the idea. =back =head2 filename, data Using the C method, the filename of the data to be parsed can be set. This method can be used in conjunction with the C method, below. If both the C and C methods are invoked as mutators, the data set in the C method is used. $tr->filename("/my/data/files/create.sql"); or: my $create_script = do { local $/; open CREATE, "/my/data/files/create.sql" or die $!; ; }; $tr->data(\$create_script); C takes a string, which is interpreted as a filename. C takes a reference to a string, which is used as the data to be parsed. If a filename is set, then that file is opened and read when the C method is called, as long as the data instance variable is not set. =head2 schema Returns the SQL::Translator::Schema object. =head2 trace Turns on/off the tracing option of Parse::RecDescent. =head2 validate Whether or not to validate the schema object after parsing and before producing. =head2 version Returns the version of the SQL::Translator release. =head1 AUTHORS See the included AUTHORS file: L =head1 GETTING HELP/SUPPORT If you are stuck with a problem or have doubts about a particular approach do not hesitate to contact us via any of the following options (the list is sorted by "fastest response time"): =over =item * IRC: irc.perl.org#sql-translator =for html (click for instant chatroom login) =item * Mailing list: L =item * RT Bug Tracker: L =back =head1 HOW TO CONTRIBUTE Contributions are always welcome, in all usable forms (we especially welcome documentation improvements). The delivery methods include git- or unified-diff formatted patches, GitHub pull requests, or plain bug reports either via RT or the Mailing list. Contributors are generally granted access to the official repository after their first several patches pass successful review. Don't hesitate to L us with any further questions you may have. This project is maintained in a git repository. The code and related tools are accessible at the following locations: =over =item * Official repo: L =item * Official gitweb: L =item * GitHub mirror: L =item * Authorized committers: L =item * Travis-CI log: L =for html ↪ Stable branch CI status: =back =head1 COPYRIGHT Copyright 2012 the SQL::Translator authors, as listed in L. =head1 LICENSE This library is free software and may be distributed under the same terms as Perl 5 itself. =head1 PRAISE If you find this module useful, please use L to rate it. =head1 SEE ALSO L, L, L, L, L, L, L, L, L. SQL-Translator-1.60/lib/SQL/Translator/0000755000000000000000000000000013473557372017602 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/Translator/Producer/0000755000000000000000000000000013473557372021365 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/Translator/Producer/PostgreSQL.pm0000644000000000000000000007662213473550070023727 0ustar00rootroot00000000000000package SQL::Translator::Producer::PostgreSQL; =head1 NAME SQL::Translator::Producer::PostgreSQL - PostgreSQL producer for SQL::Translator =head1 SYNOPSIS my $t = SQL::Translator->new( parser => '...', producer => 'PostgreSQL' ); $t->translate; =head1 DESCRIPTION Creates a DDL suitable for PostgreSQL. Very heavily based on the Oracle producer. Now handles PostGIS Geometry and Geography data types on table definitions. Does not yet support PostGIS Views. =cut use strict; use warnings; our ( $DEBUG, $WARN ); our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; use base qw(SQL::Translator::Producer); use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment parse_dbms_version batch_alter_table_statements normalize_quote_options); use SQL::Translator::Generator::DDL::PostgreSQL; use Data::Dumper; use constant MAX_ID_LENGTH => 62; { my ($quoting_generator, $nonquoting_generator); sub _generator { my $options = shift; return $options->{generator} if exists $options->{generator}; return normalize_quote_options($options) ? $quoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new : $nonquoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new( quote_chars => [], ); } } my ( %translate ); BEGIN { %translate = ( # # MySQL types # double => 'double precision', decimal => 'numeric', int => 'integer', mediumint => 'integer', tinyint => 'smallint', char => 'character', varchar => 'character varying', longtext => 'text', mediumtext => 'text', tinytext => 'text', tinyblob => 'bytea', blob => 'bytea', mediumblob => 'bytea', longblob => 'bytea', enum => 'character varying', set => 'character varying', datetime => 'timestamp', year => 'date', # # Oracle types # number => 'integer', varchar2 => 'character varying', long => 'text', clob => 'text', # # Sybase types # comment => 'text', # # MS Access types # memo => 'text', ); } my %truncated; =pod =head1 PostgreSQL Create Table Syntax CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name ( { column_name data_type [ DEFAULT default_expr ] [ column_constraint [, ... ] ] | table_constraint } [, ... ] ) [ INHERITS ( parent_table [, ... ] ) ] [ WITH OIDS | WITHOUT OIDS ] where column_constraint is: [ CONSTRAINT constraint_name ] { NOT NULL | NULL | UNIQUE | PRIMARY KEY | CHECK (expression) | REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] and table_constraint is: [ CONSTRAINT constraint_name ] { UNIQUE ( column_name [, ... ] ) | PRIMARY KEY ( column_name [, ... ] ) | CHECK ( expression ) | FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] =head1 Create Index Syntax CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( column [ ops_name ] [, ...] ) [ WHERE predicate ] CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] ) [ WHERE predicate ] =cut sub produce { my $translator = shift; local $DEBUG = $translator->debug; local $WARN = $translator->show_warnings; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $pargs = $translator->producer_args; my $postgres_version = parse_dbms_version( $pargs->{postgres_version}, 'perl' ); my $generator = _generator({ quote_identifiers => $translator->quote_identifiers }); my @output; push @output, header_comment unless ($no_comments); my (@table_defs, @fks); my %type_defs; for my $table ( $schema->get_tables ) { my ($table_def, $fks) = create_table($table, { generator => $generator, no_comments => $no_comments, postgres_version => $postgres_version, add_drop_table => $add_drop_table, type_defs => \%type_defs, }); push @table_defs, $table_def; push @fks, @$fks; } for my $view ( $schema->get_views ) { push @table_defs, create_view($view, { postgres_version => $postgres_version, add_drop_view => $add_drop_table, generator => $generator, no_comments => $no_comments, }); } for my $trigger ( $schema->get_triggers ) { push @table_defs, create_trigger( $trigger, { add_drop_trigger => $add_drop_table, generator => $generator, no_comments => $no_comments, }); } push @output, map { "$_;\n\n" } values %type_defs; push @output, map { "$_;\n\n" } @table_defs; if ( @fks ) { push @output, "--\n-- Foreign Key Definitions\n--\n\n" unless $no_comments; push @output, map { "$_;\n\n" } @fks; } if ( $WARN ) { if ( %truncated ) { warn "Truncated " . keys( %truncated ) . " names:\n"; warn "\t" . join( "\n\t", sort keys %truncated ) . "\n"; } } return wantarray ? @output : join ('', @output); } { my %global_names; sub mk_name { my $basename = shift || ''; my $type = shift || ''; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? MAX_ID_LENGTH - (length($type) + 1) : MAX_ID_LENGTH; $basename = substr( $basename, 0, $max_name ) if length( $basename ) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ( $basename ne $basename_orig and $critical ) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to ", MAX_ID_LENGTH, " character limit to make '$name'\n" if $WARN; $truncated{ $basename_orig } = $name; } $scope ||= \%global_names; if ( my $prev = $scope->{ $name } ) { my $name_orig = $name; $name .= sprintf( "%02d", ++$prev ); substr($name, MAX_ID_LENGTH - 3) = "00" if length( $name ) > MAX_ID_LENGTH; warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{ $name_orig }++; } $scope->{ $name }++; return $name; } } sub is_geometry { my $field = shift; return 1 if $field->data_type eq 'geometry'; } sub is_geography { my $field = shift; return 1 if $field->data_type eq 'geography'; } sub create_table { my ($table, $options) = @_; my $generator = _generator($options); my $no_comments = $options->{no_comments} || 0; my $add_drop_table = $options->{add_drop_table} || 0; my $postgres_version = $options->{postgres_version} || 0; my $type_defs = $options->{type_defs} || {}; my $table_name = $table->name or next; my $table_name_qt = $generator->quote($table_name); my ( @comments, @field_defs, @index_defs, @constraint_defs, @fks ); push @comments, "--\n-- Table: $table_name\n--\n" unless $no_comments; if ( !$no_comments and my $comments = $table->comments ) { $comments =~ s/^/-- /mg; push @comments, "-- Comments:\n$comments\n--\n"; } # # Fields # for my $field ( $table->get_fields ) { push @field_defs, create_field($field, { generator => $generator, postgres_version => $postgres_version, type_defs => $type_defs, constraint_defs => \@constraint_defs, }); } # # Index Declarations # for my $index ( $table->get_indices ) { my ($idef, $constraints) = create_index($index, { generator => $generator, }); $idef and push @index_defs, $idef; push @constraint_defs, @$constraints; } # # Table constraints # for my $c ( $table->get_constraints ) { my ($cdefs, $fks) = create_constraint($c, { generator => $generator, }); push @constraint_defs, @$cdefs; push @fks, @$fks; } my $create_statement = join("\n", @comments); if ($add_drop_table) { if ($postgres_version >= 8.002) { $create_statement .= "DROP TABLE IF EXISTS $table_name_qt CASCADE;\n"; } else { $create_statement .= "DROP TABLE $table_name_qt CASCADE;\n"; } } my $temporary = $table->extra->{temporary} ? "TEMPORARY " : ""; $create_statement .= "CREATE ${temporary}TABLE $table_name_qt (\n" . join( ",\n", map { " $_" } @field_defs, @constraint_defs ). "\n)" ; $create_statement .= @index_defs ? ';' : q{}; $create_statement .= ( $create_statement =~ /;$/ ? "\n" : q{} ) . join(";\n", @index_defs); # # Geometry # if (my @geometry_columns = grep { is_geometry($_) } $table->get_fields) { $create_statement .= join(";\n", '', map{ drop_geometry_column($_, $options) } @geometry_columns) if $options->{add_drop_table}; $create_statement .= join(";\n", '', map{ add_geometry_column($_, $options) } @geometry_columns); } return $create_statement, \@fks; } sub create_view { my ($view, $options) = @_; my $generator = _generator($options); my $postgres_version = $options->{postgres_version} || 0; my $add_drop_view = $options->{add_drop_view}; my $view_name = $view->name; debug("PKG: Looking at view '${view_name}'\n"); my $create = ''; $create .= "--\n-- View: " . $generator->quote($view_name) . "\n--\n" unless $options->{no_comments}; if ($add_drop_view) { if ($postgres_version >= 8.002) { $create .= "DROP VIEW IF EXISTS " . $generator->quote($view_name) . ";\n"; } else { $create .= "DROP VIEW " . $generator->quote($view_name) . ";\n"; } } $create .= 'CREATE'; my $extra = $view->extra; $create .= " TEMPORARY" if exists($extra->{temporary}) && $extra->{temporary}; $create .= " VIEW " . $generator->quote($view_name); if ( my @fields = $view->fields ) { my $field_list = join ', ', map { $generator->quote($_) } @fields; $create .= " ( ${field_list} )"; } if ( my $sql = $view->sql ) { $create .= " AS\n ${sql}\n"; } if ( $extra->{check_option} ) { $create .= ' WITH ' . uc $extra->{check_option} . ' CHECK OPTION'; } return $create; } { my %field_name_scope; sub create_field { my ($field, $options) = @_; my $generator = _generator($options); my $table_name = $field->table->name; my $constraint_defs = $options->{constraint_defs} || []; my $postgres_version = $options->{postgres_version} || 0; my $type_defs = $options->{type_defs} || {}; $field_name_scope{$table_name} ||= {}; my $field_name = $field->name; my $field_comments = ''; if (my $comments = $field->comments) { $comments =~ s/(?quote($field_name); # # Datatype # my $data_type = lc $field->data_type; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join( ', ', map { __PACKAGE__->_quote_string($_) } @$list ); if ($postgres_version >= 8.003 && $data_type eq 'enum') { my $type_name = $extra{'custom_type_name'} || $field->table->name . '_' . $field->name . '_type'; $field_def .= ' '. $type_name; my $new_type_def = "DROP TYPE IF EXISTS $type_name CASCADE;\n" . "CREATE TYPE $type_name AS ENUM ($commalist)"; if (! exists $type_defs->{$type_name} ) { $type_defs->{$type_name} = $new_type_def; } elsif ( $type_defs->{$type_name} ne $new_type_def ) { die "Attempted to redefine type name '$type_name' as a different type.\n"; } } else { $field_def .= ' '. convert_datatype($field); } # # Default value # __PACKAGE__->_apply_default_value( $field, \$field_def, [ 'NULL' => \'NULL', 'now()' => 'now()', 'CURRENT_TIMESTAMP' => 'CURRENT_TIMESTAMP', ], ); # # Not null constraint # $field_def .= ' NOT NULL' unless $field->is_nullable; # # Geometry constraints # if (is_geometry($field)) { foreach ( create_geometry_constraints($field, $options) ) { my ($cdefs, $fks) = create_constraint($_, $options); push @$constraint_defs, @$cdefs; push @$fks, @$fks; } } return $field_def; } } sub create_geometry_constraints { my ($field, $options) = @_; my $fname = _generator($options)->quote($field); my @constraints; push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_dims_".$field->name, expression => "(ST_NDims($fname) = ".$field->extra->{dimensions}.")", table => $field->table, type => CHECK_C, ); push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_srid_".$field->name, expression => "(ST_SRID($fname) = ".$field->extra->{srid}.")", table => $field->table, type => CHECK_C, ); push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_geotype_".$field->name, expression => "(GeometryType($fname) = ". __PACKAGE__->_quote_string($field->extra->{geometry_type}) ."::text OR $fname IS NULL)", table => $field->table, type => CHECK_C, ); return @constraints; } { my %index_name; sub create_index { my ($index, $options) = @_; my $generator = _generator($options); my $table_name = $index->table->name; my ($index_def, @constraint_defs); my $name = $index->name || join('_', $table_name, 'idx', ++$index_name{ $table_name }); my $type = $index->type || NORMAL; my @fields = $index->fields; return unless @fields; my $index_using; my $index_where; for my $opt ( $index->options ) { if ( ref $opt eq 'HASH' ) { foreach my $key (keys %$opt) { my $value = $opt->{$key}; next unless defined $value; if ( uc($key) eq 'USING' ) { $index_using = "USING $value"; } elsif ( uc($key) eq 'WHERE' ) { $index_where = "WHERE $value"; } } } } my $def_start = 'CONSTRAINT ' . $generator->quote($name) . ' '; my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ( $generator->quote($_) ) } @fields)) . ')'; if ( $type eq PRIMARY_KEY ) { push @constraint_defs, "${def_start}PRIMARY KEY ".$field_names; } elsif ( $type eq UNIQUE ) { push @constraint_defs, "${def_start}UNIQUE " .$field_names; } elsif ( $type eq NORMAL ) { $index_def = 'CREATE INDEX ' . $generator->quote($name) . ' on ' . $generator->quote($table_name) . ' ' . join ' ', grep { defined } $index_using, $field_names, $index_where; } else { warn "Unknown index type ($type) on table $table_name.\n" if $WARN; } return $index_def, \@constraint_defs; } } sub create_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $table_name = $c->table->name; my (@constraint_defs, @fks); my $name = $c->name || ''; my @fields = grep { defined } $c->fields; my @rfields = grep { defined } $c->reference_fields; next if !@fields && $c->type ne CHECK_C; my $def_start = $name ? 'CONSTRAINT ' . $generator->quote($name) . ' ' : ''; my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ( $generator->quote($_) ) } @fields)) . ')'; if ( $c->type eq PRIMARY_KEY ) { push @constraint_defs, "${def_start}PRIMARY KEY ".$field_names; } elsif ( $c->type eq UNIQUE ) { push @constraint_defs, "${def_start}UNIQUE " .$field_names; } elsif ( $c->type eq CHECK_C ) { my $expression = $c->expression; push @constraint_defs, "${def_start}CHECK ($expression)"; } elsif ( $c->type eq FOREIGN_KEY ) { my $def .= "ALTER TABLE " . $generator->quote($table_name) . " ADD ${def_start}FOREIGN KEY $field_names" . "\n REFERENCES " . $generator->quote($c->reference_table); if ( @rfields ) { $def .= ' (' . join( ', ', map { $generator->quote($_) } @rfields ) . ')'; } if ( $c->match_type ) { $def .= ' MATCH ' . ( $c->match_type =~ /full/i ) ? 'FULL' : 'PARTIAL'; } if ( $c->on_delete ) { $def .= ' ON DELETE '. $c->on_delete; } if ( $c->on_update ) { $def .= ' ON UPDATE '. $c->on_update; } if ( $c->deferrable ) { $def .= ' DEFERRABLE'; } push @fks, "$def"; } return \@constraint_defs, \@fks; } sub create_trigger { my ($trigger,$options) = @_; my $generator = _generator($options); my @statements; push @statements, sprintf( 'DROP TRIGGER IF EXISTS %s', $generator->quote($trigger->name) ) if $options->{add_drop_trigger}; my $scope = $trigger->scope || ''; $scope = " FOR EACH $scope" if $scope; push @statements, sprintf( 'CREATE TRIGGER %s %s %s ON %s%s %s', $generator->quote($trigger->name), $trigger->perform_action_when, join( ' OR ', @{ $trigger->database_events } ), $generator->quote($trigger->on_table), $scope, $trigger->action, ); return @statements; } sub convert_datatype { my ($field) = @_; my @size = $field->size; my $data_type = lc $field->data_type; my $array = $data_type =~ s/\[\]$//; if ( $data_type eq 'enum' ) { # my $len = 0; # $len = ($len < length($_)) ? length($_) : $len for (@$list); # my $chk_name = mk_name( $table_name.'_'.$field_name, 'chk' ); # push @$constraint_defs, # 'CONSTRAINT "$chk_name" CHECK (' . $generator->quote(field_name) . # qq[IN ($commalist))]; $data_type = 'character varying'; } elsif ( $data_type eq 'set' ) { $data_type = 'character varying'; } elsif ( $field->is_auto_increment ) { if ( (defined $size[0] && $size[0] > 11) or $data_type eq 'bigint' ) { $data_type = 'bigserial'; } else { $data_type = 'serial'; } undef @size; } else { $data_type = defined $translate{ lc $data_type } ? $translate{ lc $data_type } : $data_type; } if ( $data_type =~ /^time/i || $data_type =~ /^interval/i ) { if ( defined $size[0] && $size[0] > 6 ) { $size[0] = 6; } } if ( $data_type eq 'integer' ) { if ( defined $size[0] && $size[0] > 0) { if ( $size[0] > 10 ) { $data_type = 'bigint'; } elsif ( $size[0] < 5 ) { $data_type = 'smallint'; } else { $data_type = 'integer'; } } else { $data_type = 'integer'; } } my $type_with_size = join('|', 'bit', 'varbit', 'character', 'bit varying', 'character varying', 'time', 'timestamp', 'interval', 'numeric', 'float' ); if ( $data_type !~ /$type_with_size/ ) { @size = (); } if (defined $size[0] && $size[0] > 0 && $data_type =~ /^time/i ) { $data_type =~ s/^(time.*?)( with.*)?$/$1($size[0])/; $data_type .= $2 if(defined $2); } elsif ( defined $size[0] && $size[0] > 0 ) { $data_type .= '(' . join( ',', @size ) . ')'; } if($array) { $data_type .= '[]'; } # # Geography # if($data_type eq 'geography'){ $data_type .= '('.$field->extra->{geography_type}.','. $field->extra->{srid} .')' } return $data_type; } sub alter_field { my ($from_field, $to_field, $options) = @_; die "Can't alter field in another table" if($from_field->table->name ne $to_field->table->name); my $generator = _generator($options); my @out; # drop geometry column and constraints push @out, drop_geometry_column($from_field, $options), drop_geometry_constraints($from_field, $options), if is_geometry($from_field); # it's necessary to start with rename column cause this would affect # all of the following statements which would be broken if do the # rename later # BUT: drop geometry is done before the rename, cause it work's on the # $from_field directly push @out, sprintf('ALTER TABLE %s RENAME COLUMN %s TO %s', map($generator->quote($_), $to_field->table->name, $from_field->name, $to_field->name, ), ) if($from_field->name ne $to_field->name); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s SET NOT NULL', map($generator->quote($_), $to_field->table->name, $to_field->name ), ) if(!$to_field->is_nullable and $from_field->is_nullable); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s DROP NOT NULL', map($generator->quote($_), $to_field->table->name, $to_field->name ), ) if (!$from_field->is_nullable and $to_field->is_nullable); my $from_dt = convert_datatype($from_field); my $to_dt = convert_datatype($to_field); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s TYPE %s', map($generator->quote($_), $to_field->table->name, $to_field->name ), $to_dt, ) if($to_dt ne $from_dt); my $old_default = $from_field->default_value; my $new_default = $to_field->default_value; my $default_value = $to_field->default_value; # fixes bug where output like this was created: # ALTER TABLE users ALTER COLUMN column SET DEFAULT ThisIsUnescaped; if(ref $default_value eq "SCALAR" ) { $default_value = $$default_value; } elsif( defined $default_value && $to_dt =~ /^(character|text)/xsmi ) { $default_value = __PACKAGE__->_quote_string($default_value); } push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s', map($generator->quote($_), $to_field->table->name, $to_field->name, ), $default_value, ) if ( defined $new_default && (!defined $old_default || $old_default ne $new_default) ); # fixes bug where removing the DEFAULT statement of a column # would result in no change push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT', map($generator->quote($_), $to_field->table->name, $to_field->name, ), ) if ( !defined $new_default && defined $old_default ); # add geometry column and constraints push @out, add_geometry_column($to_field, $options), add_geometry_constraints($to_field, $options), if is_geometry($to_field); return wantarray ? @out : join(";\n", @out); } sub rename_field { alter_field(@_) } sub add_field { my ($new_field,$options) = @_; my $out = sprintf('ALTER TABLE %s ADD COLUMN %s', _generator($options)->quote($new_field->table->name), create_field($new_field, $options)); $out .= ";\n".add_geometry_column($new_field, $options) . ";\n".add_geometry_constraints($new_field, $options) if is_geometry($new_field); return $out; } sub drop_field { my ($old_field, $options) = @_; my $generator = _generator($options); my $out = sprintf('ALTER TABLE %s DROP COLUMN %s', $generator->quote($old_field->table->name), $generator->quote($old_field->name)); $out .= ";\n".drop_geometry_column($old_field, $options) if is_geometry($old_field); return $out; } sub add_geometry_column { my ($field, $options) = @_; return sprintf( "INSERT INTO geometry_columns VALUES (%s,%s,%s,%s,%s,%s,%s)", map(__PACKAGE__->_quote_string($_), '', $field->table->schema->name, $options->{table} ? $options->{table} : $field->table->name, $field->name, $field->extra->{dimensions}, $field->extra->{srid}, $field->extra->{geometry_type}, ), ); } sub drop_geometry_column { my ($field) = @_; return sprintf( "DELETE FROM geometry_columns WHERE f_table_schema = %s AND f_table_name = %s AND f_geometry_column = %s", map(__PACKAGE__->_quote_string($_), $field->table->schema->name, $field->table->name, $field->name, ), ); } sub add_geometry_constraints { my ($field, $options) = @_; return join(";\n", map { alter_create_constraint($_, $options) } create_geometry_constraints($field, $options)); } sub drop_geometry_constraints { my ($field, $options) = @_; return join(";\n", map { alter_drop_constraint($_, $options) } create_geometry_constraints($field, $options)); } sub alter_table { my ($to_table, $options) = @_; my $generator = _generator($options); my $out = sprintf('ALTER TABLE %s %s', $generator->quote($to_table->name), $options->{alter_table_action}); $out .= ";\n".$options->{geometry_changes} if $options->{geometry_changes}; return $out; } sub rename_table { my ($old_table, $new_table, $options) = @_; my $generator = _generator($options); $options->{alter_table_action} = "RENAME TO " . $generator->quote($new_table); my @geometry_changes = map { drop_geometry_column($_, $options), add_geometry_column($_, { %{$options}, table => $new_table }), } grep { is_geometry($_) } $old_table->get_fields; $options->{geometry_changes} = join (";\n",@geometry_changes) if @geometry_changes; return alter_table($old_table, $options); } sub alter_create_index { my ($index, $options) = @_; my $generator = _generator($options); my ($idef, $constraints) = create_index($index, $options); return $index->type eq NORMAL ? $idef : sprintf('ALTER TABLE %s ADD %s', $generator->quote($index->table->name), join(q{}, @$constraints) ); } sub alter_drop_index { my ($index, $options) = @_; return 'DROP INDEX '. _generator($options)->quote($index->name); } sub alter_drop_constraint { my ($c, $options) = @_; my $generator = _generator($options); # attention: Postgres has a very special naming structure for naming # foreign keys and primary keys. It names them using the name of the # table as prefix and fkey or pkey as suffix, concatenated by an underscore my $c_name; if( $c->name ) { # Already has a name, just use it $c_name = $c->name; } elsif ( $c->type eq FOREIGN_KEY ) { # Doesn't have a name, and is foreign key, append '_fkey' $c_name = $c->table->name . '_' . ($c->fields)[0] . '_fkey'; } elsif ( $c->type eq PRIMARY_KEY ) { # Doesn't have a name, and is primary key, append '_pkey' $c_name = $c->table->name . '_pkey'; } return sprintf( 'ALTER TABLE %s DROP CONSTRAINT %s', map { $generator->quote($_) } $c->table->name, $c_name, ); } sub alter_create_constraint { my ($index, $options) = @_; my $generator = _generator($options); my ($defs, $fks) = create_constraint(@_); # return if there are no constraint definitions so we don't run # into output like this: # ALTER TABLE users ADD ; return unless(@{$defs} || @{$fks}); return $index->type eq FOREIGN_KEY ? join(q{}, @{$fks}) : join( ' ', 'ALTER TABLE', $generator->quote($index->table->name), 'ADD', join(q{}, @{$defs}, @{$fks}) ); } sub drop_table { my ($table, $options) = @_; my $generator = _generator($options); my $out = "DROP TABLE " . $generator->quote($table) . " CASCADE"; my @geometry_drops = map { drop_geometry_column($_); } grep { is_geometry($_) } $table->get_fields; $out .= join(";\n", '', @geometry_drops) if @geometry_drops; return $out; } sub batch_alter_table { my ( $table, $diff_hash, $options ) = @_; # as long as we're not renaming the table we don't need to be here if ( @{$diff_hash->{rename_table}} == 0 ) { return batch_alter_table_statements($diff_hash, $options); } # first we need to perform drops which are on old table my @sql = batch_alter_table_statements($diff_hash, $options, qw( alter_drop_constraint alter_drop_index drop_field )); # next comes the rename_table my $old_table = $diff_hash->{rename_table}[0][0]; push @sql, rename_table( $old_table, $table, $options ); # for alter_field (and so also rename_field) we need to make sure old # field has table name set to new table otherwise calling alter_field dies $diff_hash->{alter_field} = [map { $_->[0]->table($table) && $_ } @{$diff_hash->{alter_field}}]; $diff_hash->{rename_field} = [map { $_->[0]->table($table) && $_ } @{$diff_hash->{rename_field}}]; # now add everything else push @sql, batch_alter_table_statements($diff_hash, $options, qw( add_field alter_field rename_field alter_create_index alter_create_constraint alter_table )); return @sql; } 1; # ------------------------------------------------------------------- # Life is full of misery, loneliness, and suffering -- # and it's all over much too soon. # Woody Allen # ------------------------------------------------------------------- =pod =head1 SEE ALSO SQL::Translator, SQL::Translator::Producer::Oracle. =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/POD.pm0000644000000000000000000001010213473550070022323 0ustar00rootroot00000000000000package SQL::Translator::Producer::POD; =head1 NAME SQL::Translator::Producer::POD - POD producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'POD', '...' ); print $t->translate; =head1 DESCRIPTION Creates a POD description of each table, field, index, and constraint. A good starting point for text documentation of a schema. You can easily convert the output to HTML or text using "perldoc" or other interesting formats using Pod::POM or Template::Toolkit's POD plugin. =cut use strict; use warnings; our $VERSION = '1.60'; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(header_comment); sub produce { my $t = shift; my $schema = $t->schema; my $schema_name = $schema->name || 'Schema'; my $args = $t->producer_args; my $title = $args->{'title'} || $schema_name; my $pod = "=pod\n\n=head1 DESCRIPTION\n\n$title\n\n=head1 TABLES\n\n"; for my $table ( $schema->get_tables ) { my $table_name = $table->name or next; my @fields = $table->get_fields or next; $pod .= "=head2 $table_name\n\n=head3 FIELDS\n\n"; # # Fields # for my $field ( @fields ) { $pod .= "=head4 " . $field->name . "\n\n=over 4\n\n"; my $data_type = $field->data_type; my $size = $field->size; $data_type .= "($size)" if $size; $pod .= "=item * $data_type\n\n"; $pod .= "=item * PRIMARY KEY\n\n" if $field->is_primary_key; my $default = $field->default_value; $pod .= "=item * Default '$default' \n\n" if defined $default; $pod .= sprintf( "=item * Nullable '%s' \n\n", $field->is_nullable ? 'Yes' : 'No' ); $pod .= "=back\n\n"; } # # Indices # if ( my @indices = $table->get_indices ) { $pod .= "=head3 INDICES\n\n"; for my $index ( @indices ) { $pod .= "=head4 " . $index->type . "\n\n=over 4\n\n"; $pod .= "=item * Fields = " . join(', ', $index->fields ) . "\n\n"; $pod .= "=back\n\n"; } } # # Constraints # if ( my @constraints = $table->get_constraints ) { $pod .= "=head3 CONSTRAINTS\n\n"; for my $c ( @constraints ) { $pod .= "=head4 " . $c->type . "\n\n=over 4\n\n"; if($c->type eq CHECK_C) { $pod .= "=item * Expression = " . $c->expression . "\n\n"; } else { $pod .= "=item * Fields = " . join(', ', $c->fields ) . "\n\n"; if ( $c->type eq FOREIGN_KEY ) { $pod .= "=item * Reference Table = Lreference_table . ">\n\n"; $pod .= "=item * Reference Fields = " . join(', ', map {"L"} $c->reference_fields ) . "\n\n"; } if ( my $update = $c->on_update ) { $pod .= "=item * On update = $update\n\n"; } if ( my $delete = $c->on_delete ) { $pod .= "=item * On delete = $delete\n\n"; } } $pod .= "=back\n\n"; } } } my $header = ( map { $_ || () } split( /\n/, header_comment('', '') ) )[0]; $header =~ s/^Created by //; $pod .= "=head1 PRODUCED BY\n\n$header\n\n=cut"; return $pod; } 1; # ------------------------------------------------------------------- # Expect poison from the standing water. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head2 CONTRIBUTORS Jonathan Yu Efrequency@cpan.orgE =head1 SEE ALSO perldoc, perlpod, Pod::POM, Template::Manual::Plugins. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/GraphViz.pm0000644000000000000000000004541413473550070023451 0ustar00rootroot00000000000000package SQL::Translator::Producer::GraphViz; =pod =head1 NAME SQL::Translator::Producer::GraphViz - GraphViz producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $trans = SQL::Translator->new( from => 'MySQL', # or your db of choice to => 'GraphViz', producer_args => { out_file => 'schema.png', bgcolor => 'lightgoldenrodyellow', show_constraints => 1, show_datatypes => 1, show_sizes => 1 } ) or die SQL::Translator->error; $trans->translate or die $trans->error; =head1 DESCRIPTION Creates a graph of a schema using the amazing graphviz (see http://www.graphviz.org/) application (via the L module). It's nifty--you should try it! =head1 PRODUCER ARGS All L constructor attributes are accepted and passed through to L. The following defaults are assumed for some attributes: layout => 'dot', overlap => 'false', node => { shape => 'record', style => 'filled', fillcolor => 'white', }, # in inches width => 8.5, height => 11, See the documentation of L for more info on these and other attributes. In addition this producer accepts the following arguments: =over 4 =item * skip_tables An arrayref or a comma-separated list of table names that should be skipped. Note that a skipped table node may still appear if another table has foreign key constraints pointing to the skipped table. If this happens no table field/index information will be included. =item * skip_tables_like An arrayref or a comma-separated list of regular expressions matching table names that should be skipped. =item * cluster Clustering of tables allows you to group and box tables according to function or domain or whatever criteria you choose. The syntax for clustering tables is: cluster => 'cluster1=table1,table2;cluster2=table3,table4' Or pass it as an arrayref like so: cluster => [ 'cluster1=table1,table2', 'cluster2=table3,table4' ] Or like so: cluster => [ { name => 'cluster1', tables => [ 'table1', 'table2' ] }, { name => 'cluster2', tables => [ 'table3', 'table4' ] }, ] =item * out_file The name of the file where the resulting GraphViz output will be written. Alternatively an open filehandle can be supplied. If undefined (the default) - the result is returned as a string. =item * output_type (DEFAULT: 'png') This determines which L will be invoked to generate the graph: C translates to C, C to C and so on. =item * fontname This sets the global font name (or full path to font file) for node, edge, and graph labels =item * fontsize This sets the global font size for node and edge labels (note that arbitrarily large sizes may be ignored due to page size or graph size constraints) =item * show_fields (DEFAULT: true) If set to a true value, the names of the columns in a table will be displayed in each table's node =item * show_fk_only If set to a true value, only columns which are foreign keys will be displayed in each table's node =item * show_datatypes If set to a true value, the datatype of each column will be displayed next to each column's name; this option will have no effect if the value of C is set to false =item * friendly_ints If set to a true value, each integer type field will be displayed as a tinyint, smallint, integer or bigint depending on the field's associated size parameter. This only applies for the C type (and not the C type, which is always assumed to be a 32-bit integer); this option will have no effect if the value of C is set to false =item * friendly_ints_extended If set to a true value, the friendly ints displayed will take into account the non-standard types, 'tinyint' and 'mediumint' (which, as far as I am aware, is only implemented in MySQL) =item * show_sizes If set to a true value, the size (in bytes) of each CHAR and VARCHAR column will be displayed in parentheses next to the column's name; this option will have no effect if the value of C is set to false =item * show_constraints If set to a true value, a field's constraints (i.e., its primary-key-ness, its foreign-key-ness and/or its uniqueness) will appear as a comma-separated list in brackets next to the field's name; this option will have no effect if the value of C is set to false =item * show_indexes If set to a true value, each record will also show the indexes set on each table. It describes the index types along with which columns are included in the index. =item * show_index_names (DEFAULT: true) If C is set to a true value, then the value of this parameter determines whether or not to print names of indexes. if C is false, then a list of indexed columns will appear below the field list. Otherwise, it will be a list prefixed with the name of each index. =item * natural_join If set to a true value, L will be called before generating the graph. =item * join_pk_only The value of this option will be passed as the value of the like-named argument to L; implies C<< natural_join => 1 >> =item * skip_fields The value of this option will be passed as the value of the like-named argument to L; implies C<< natural_join => 1 >> =back =head2 DEPRECATED ARGS =over 4 =item * node_shape Deprecated, use node => { shape => ... } instead =item * add_color Deprecated, use bgcolor => 'lightgoldenrodyellow' instead If set to a true value, the graphic will have a background color of 'lightgoldenrodyellow'; otherwise the default white background will be used =item * nodeattrs Deprecated, use node => { ... } instead =item * edgeattrs Deprecated, use edge => { ... } instead =item * graphattrs Deprecated, use graph => { ... } instead =back =cut use warnings; use strict; use GraphViz; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug); use Scalar::Util qw/openhandle/; our $DEBUG; our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; sub produce { my $t = shift; my $schema = $t->schema; my $args = $t->producer_args; local $DEBUG = $t->debug; # translate legacy {node|edge|graph}attrs to just {node|edge|graph} for my $argtype (qw/node edge graph/) { my $old_arg = $argtype . 'attrs'; my %arglist = (map { %{ $_ || {} } } ( delete $args->{$old_arg}, delete $args->{$argtype} ) ); $args->{$argtype} = \%arglist if keys %arglist; } # explode font settings for (qw/fontsize fontname/) { if (defined $args->{$_}) { $args->{node}{$_} ||= $args->{$_}; $args->{edge}{$_} ||= $args->{$_}; $args->{graph}{$_} ||= $args->{$_}; } } # legacy add_color setting, trumped by bgcolor if set $args->{bgcolor} ||= 'lightgoldenrodyellow' if $args->{add_color}; # legacy node_shape setting, defaults to 'record', trumped by {node}{shape} $args->{node}{shape} ||= ( $args->{node_shape} || 'record' ); # maintain defaults $args->{layout} ||= 'dot'; $args->{output_type} ||= 'png'; $args->{overlap} ||= 'false'; $args->{node}{style} ||= 'filled'; $args->{node}{fillcolor} ||= 'white'; $args->{show_fields} = 1 if not exists $args->{show_fields}; $args->{show_index_names} = 1 if not exists $args->{show_index_names}; $args->{width} = 8.5 if not defined $args->{width}; $args->{height} = 11 if not defined $args->{height}; for ( $args->{height}, $args->{width} ) { $_ = 0 unless $_ =~ /^\d+(?:.\d+)?$/; $_ = 0 if $_ < 0; } # so split won't warn $args->{$_} ||= '' for qw/skip_fields skip_tables skip_tables_like cluster/; my %skip_fields = map { s/^\s+|\s+$//g; length $_ ? ($_, 1) : () } split ( /,/, $args->{skip_fields} ); my %skip_tables = map { $_, 1 } ( ref $args->{skip_tables} eq 'ARRAY' ? @{$args->{skip_tables}} : split (/\s*,\s*/, $args->{skip_tables}) ); my @skip_tables_like = map { qr/$_/ } ( ref $args->{skip_tables_like} eq 'ARRAY' ? @{$args->{skip_tables_like}} : split (/\s*,\s*/, $args->{skip_tables_like}) ); # join_pk_only/skip_fields implies natural_join $args->{natural_join} = 1 if ($args->{join_pk_only} or scalar keys %skip_fields); # usually we do not want direction when using natural join $args->{directed} = ($args->{natural_join} ? 0 : 1) if not exists $args->{directed}; $schema->make_natural_joins( join_pk_only => $args->{join_pk_only}, skip_fields => $args->{skip_fields}, ) if $args->{natural_join}; my %cluster; if ( defined $args->{'cluster'} ) { my @clusters; if ( ref $args->{'cluster'} eq 'ARRAY' ) { @clusters = @{ $args->{'cluster'} }; } else { @clusters = split /\s*;\s*/, $args->{'cluster'}; } for my $c ( @clusters ) { my ( $cluster_name, @cluster_tables ); if ( ref $c eq 'HASH' ) { $cluster_name = $c->{'name'} || $c->{'cluster_name'}; @cluster_tables = @{ $c->{'tables'} || [] }; } else { my ( $name, $tables ) = split /\s*=\s*/, $c; $cluster_name = $name; @cluster_tables = split /\s*,\s*/, $tables; } for my $table ( @cluster_tables ) { $cluster{ $table } = $cluster_name; } } } # # Create a blank GraphViz object and see if we can produce the output type. # my $gv = GraphViz->new( %$args ) or die sprintf ("Can't create GraphViz object: %s\n", $@ || 'reason unknown' ); my $output_method = "as_$args->{output_type}"; # the generators are AUTOLOADed so can't use ->can ($output_method) eval { $gv->$output_method }; die "Invalid output type: '$args->{output_type}'" if $@; # # Process tables definitions, create nodes # my %nj_registry; # for locations of fields for natural joins my @fk_registry; # for locations of fields for foreign keys TABLE: for my $table ( $schema->get_tables ) { my $table_name = $table->name; if ( @skip_tables_like or keys %skip_tables ) { next TABLE if $skip_tables{ $table_name }; for my $regex ( @skip_tables_like ) { next TABLE if $table_name =~ $regex; } } my @fields = $table->get_fields; if ( $args->{show_fk_only} ) { @fields = grep { $_->is_foreign_key } @fields; } my $field_str = ''; if ($args->{show_fields}) { my @fmt_fields; for my $field (@fields) { my $field_info; if ($args->{show_datatypes}) { my $field_type = $field->data_type; my $size = $field->size; if ( $args->{friendly_ints} && $size && (lc ($field_type) eq 'integer') ) { # Automatically translate to int2, int4, int8 # Type (Bits) Max. Signed/Unsigned Length # tinyint* (8) 128 3 # 255 3 # smallint (16) 32767 5 # 65535 5 # mediumint* (24) 8388607 7 # 16777215 8 # int (32) 2147483647 10 # 4294967295 11 # bigint (64) 9223372036854775807 19 # 18446744073709551615 20 # # * tinyint and mediumint are nonstandard extensions which are # only available under MySQL (to my knowledge) if ($size <= 3 and $args->{friendly_ints_extended}) { $field_type = 'tinyint'; } elsif ($size <= 5) { $field_type = 'smallint'; } elsif ($size <= 8 and $args->{friendly_ints_extended}) { $field_type = 'mediumint'; } elsif ($size <= 11) { $field_type = 'integer'; } else { $field_type = 'bigint'; } } $field_info = $field_type; if ($args->{show_sizes} && $size && ($field_type =~ /^ (?: NUMERIC | DECIMAL | (VAR)?CHAR2? ) $/ix ) ) { $field_info .= '(' . $size . ')'; } } my $constraints; if ($args->{show_constraints}) { my @constraints; push(@constraints, $field->is_auto_increment ? 'PA' : 'PK') if $field->is_primary_key; push(@constraints, 'FK') if $field->is_foreign_key; push(@constraints, 'U') if $field->is_unique; push(@constraints, 'N') if $field->is_nullable; $constraints = join (',', @constraints); } # construct the field line from all info gathered so far push @fmt_fields, join (' ', '-', $field->name, $field_info || (), $constraints ? "[$constraints]" : (), ); } # join field lines with graphviz formatting $field_str = join ('\l', @fmt_fields) . '\l'; } my $index_str = ''; if ($args->{show_indexes}) { my @fmt_indexes; for my $index ($table->get_indices) { next unless $index->is_valid; push @fmt_indexes, join (' ', '*', $args->{show_index_names} ? $index->name . ':' : () , join (', ', $index->fields), ($index->type eq 'UNIQUE') ? '[U]' : (), ); } # join index lines with graphviz formatting (if any indexes at all) $index_str = join ('\l', @fmt_indexes) . '\l' if @fmt_indexes; } my $name_str = $table_name . '\n'; # escape spaces for ($name_str, $field_str, $index_str) { $_ =~ s/ /\\ /g; } my $node_args; # only the 'record' type supports nice formatting if ($args->{node}{shape} eq 'record') { # the necessity to supply shape => 'record' is a graphviz bug $node_args = { shape => 'record', label => sprintf ('{%s}', join ('|', $name_str, $field_str || (), $index_str || (), ), ), }; } else { my $sep = sprintf ('%s\n', '-' x ( (length $table_name) + 2) ); $node_args = { label => join ($sep, $name_str, $field_str || (), $index_str || (), ), }; } if (my $cluster_name = $cluster{$table_name} ) { $node_args->{cluster} = $cluster_name; } $gv->add_node(qq["$table_name"], %$node_args); debug("Processing table '$table_name'"); debug("Fields = ", join(', ', map { $_->name } @fields)) if $DEBUG; for my $f ( @fields ) { my $name = $f->name or next; my $is_pk = $f->is_primary_key; my $is_unique = $f->is_unique; # # Decide if we should skip this field. # if ( $args->{natural_join} ) { next unless $is_pk || $f->is_foreign_key; } my $constraints = $f->{'constraints'}; if ( $args->{natural_join} && !$skip_fields{ $name } ) { push @{ $nj_registry{ $name } }, $table_name; } } unless ( $args->{natural_join} ) { for my $c ( $table->get_constraints ) { next unless $c->type eq FOREIGN_KEY; my $fk_table = $c->reference_table or next; for my $field_name ( $c->fields ) { for my $fk_field ( $c->reference_fields ) { next unless defined $schema->get_table( $fk_table ); # a condition is optional if at least one fk is nullable push @fk_registry, [ $table_name, $fk_table, scalar (grep { $_->is_nullable } ($c->fields)) ]; } } } } } # # Process relationships, create edges # my (@table_bunches, %optional_constraints); if ( $args->{natural_join} ) { for my $field_name ( keys %nj_registry ) { my @table_names = @{ $nj_registry{ $field_name } || [] } or next; next if scalar @table_names == 1; push @table_bunches, [ @table_names ]; } } else { for my $i (0 .. $#fk_registry) { my $fk = $fk_registry[$i]; push @table_bunches, [$fk->[0], $fk->[1]]; $optional_constraints{$i} = $fk->[2]; } } my %done; for my $bi (0 .. $#table_bunches) { my @tables = @{$table_bunches[$bi]}; for my $i ( 0 .. $#tables ) { my $table1 = $tables[ $i ]; for my $j ( 1 .. $#tables ) { next if $i == $j; my $table2 = $tables[ $j ]; next if $done{ $table1 }{ $table2 }; debug("Adding edge '$table2' -> '$table1'"); $gv->add_edge( qq["$table2"], qq["$table1"], arrowhead => $optional_constraints{$bi} ? 'empty' : 'normal', ); $done{ $table1 }{ $table2 } = 1; } } } # # Print the image # if ( my $out = $args->{out_file} ) { if (openhandle ($out)) { print $out $gv->$output_method; } else { open my $fh, '>', $out or die "Can't write '$out': $!\n"; binmode $fh; print $fh $gv->$output_method; close $fh; } } else { return $gv->$output_method; } } 1; =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE Jonathan Yu Efrequency@cpan.orgE =head1 SEE ALSO SQL::Translator, GraphViz =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Latex.pm0000644000000000000000000000357113473550070022772 0ustar00rootroot00000000000000package SQL::Translator::Producer::Latex; =pod =head1 NAME SQL::Translator::Producer::Latex - Produces latex formatted tables ready for import from schema. =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'Latex', ); print $translator->translate; =head1 DESCRIPTION Currently you will get one class (with the a table stereotype) generated per table in the schema. The fields are added as attributes of the classes and their datatypes set. It doesn't currently set any of the relationships. It doesn't do any layout, all the classes are in one big stack. However it is still useful as you can use the layout tools in Dia to automatically arrange them horizontally or vertically. =head2 Producer Args =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.60'; use SQL::Translator::Utils 'debug'; sub produce { my $translator = shift; my $schema = $translator->schema; my $o = ''; for my $table ( $schema->get_tables ) { my $table_name = $table->name or next; my $n = latex($table_name); $o .= sprintf ' \subsubsection{%s} %s \begin{table}[htb] \caption{%s} \label{tab:%s} \center { \small \begin{tabular}{l l p{8cm}} Column & Datatype & Description \\\\ \hline ', $n, latex($table->comments), $n, $table_name; foreach my $f ($table->get_fields) { $o .= sprintf '%s & %s & %s \\\\', map {latex($_)} ($f->name, $f->data_type, $f->comments || ''); $o .= "\n"; } $o .= sprintf ' \end{tabular} } \end{table} \clearpage '; } return $o; } sub latex { my $s = shift; return '' unless defined $s; $s =~ s/([\&\_\$\{\#])/\\$1/g; return $s; } 1; =pod =head1 AUTHOR Chris Mungall =head1 SEE ALSO SQL::Translator. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/XML/0000755000000000000000000000000013473557372022025 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/Translator/Producer/XML/SQLFairy.pm0000644000000000000000000002605713473550070024013 0ustar00rootroot00000000000000package SQL::Translator::Producer::XML::SQLFairy; =pod =head1 NAME SQL::Translator::Producer::XML::SQLFairy - SQLFairy's default XML format =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( from => 'MySQL', to => 'XML-SQLFairy', filename => 'schema.sql', show_warnings => 1, ); print $t->translate; =head1 DESCRIPTION Creates XML output of a schema, in the flavor of XML used natively by the SQLFairy project (L). This format is detailed here. The XML lives in the C namespace. With a root element of . Objects in the schema are mapped to tags of the same name as the objects class (all lowercase). The attributes of the objects (e.g. $field->name) are mapped to attributes of the tag, except for sql, comments and action, which get mapped to child data elements. List valued attributes (such as the list of fields in an index) get mapped to comma separated lists of values in the attribute. Child objects, such as a tables fields, get mapped to child tags wrapped in a set of container tags using the plural of their contained classes name. An objects' extra attribute (a hash of arbitrary data) is mapped to a tag called extra, with the hash of data as attributes, sorted into alphabetical order. e.g. ...
SELECT email FROM Basic WHERE email IS NOT NULL
To see a complete example of the XML translate one of your schema :) $ sqlt -f MySQL -t XML-SQLFairy schema.sql =head1 ARGS =over 4 =item add_prefix Set to true to use the default namespace prefix of 'sqlf', instead of using the default namespace for C e.g. =item prefix Set to the namespace prefix you want to use for the C e.g. =item newlines If true (the default) inserts newlines around the XML, otherwise the schema is written on one line. =item indent When using newlines the number of whitespace characters to use as the indent. Default is 2, set to 0 to turn off indenting. =back =head1 LEGACY FORMAT The previous version of the SQLFairy XML allowed the attributes of the schema objects to be written as either xml attributes or as data elements, in any combination. The old producer could produce attribute only or data element only versions. While this allowed for lots of flexibility in writing the XML the result is a great many possible XML formats, not so good for DTD writing, XPathing etc! So we have moved to a fixed version described above. This version of the producer will now only produce the new style XML. To convert your old format files simply pass them through the translator :) $ sqlt -f XML-SQLFairy -t XML-SQLFairy schema-old.xml > schema-new.xml =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.60'; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils qw(header_comment debug); BEGIN { # Will someone fix XML::Writer already? local $^W = 0; require XML::Writer; import XML::Writer; } # Which schema object attributes (methods) to write as xml elements rather than # as attributes. e.g. blah, blah... my @MAP_AS_ELEMENTS = qw/sql comments action extra/; my $Namespace = 'http://sqlfairy.sourceforge.net/sqlfairy.xml'; my $Name = 'sqlf'; my $PArgs = {}; my $no_comments; sub produce { my $translator = shift; my $schema = $translator->schema; $no_comments = $translator->no_comments; $PArgs = $translator->producer_args; my $newlines = defined $PArgs->{newlines} ? $PArgs->{newlines} : 1; my $indent = defined $PArgs->{indent} ? $PArgs->{indent} : 2; # Setup the XML::Writer and set the namespace my $io; my $prefix = ""; $prefix = $Name if $PArgs->{add_prefix}; $prefix = $PArgs->{prefix} if $PArgs->{prefix}; my $xml = XML::Writer->new( OUTPUT => \$io, NAMESPACES => 1, PREFIX_MAP => { $Namespace => $prefix }, DATA_MODE => $newlines, DATA_INDENT => $indent, ); # Start the document $xml->xmlDecl('UTF-8'); $xml->comment(header_comment('', '')) unless $no_comments; xml_obj($xml, $schema, tag => "schema", methods => [qw/name database extra/], end_tag => 0 ); # # Table # $xml->startTag( [ $Namespace => "tables" ] ); for my $table ( $schema->get_tables ) { debug "Table:",$table->name; xml_obj($xml, $table, tag => "table", methods => [qw/name order extra/], end_tag => 0 ); # # Fields # xml_obj_children( $xml, $table, tag => 'field', methods =>[qw/ name data_type size is_nullable default_value is_auto_increment is_primary_key is_foreign_key extra comments order /], ); # # Indices # xml_obj_children( $xml, $table, tag => 'index', collection_tag => "indices", methods => [qw/name type fields options extra/], ); # # Constraints # xml_obj_children( $xml, $table, tag => 'constraint', methods => [qw/ name type fields reference_table reference_fields on_delete on_update match_type expression options deferrable extra /], ); # # Comments # xml_obj_children( $xml, $table, tag => 'comment', collection_tag => "comments", methods => [qw/ comments /], ); $xml->endTag( [ $Namespace => 'table' ] ); } $xml->endTag( [ $Namespace => 'tables' ] ); # # Views # xml_obj_children( $xml, $schema, tag => 'view', methods => [qw/name sql fields order extra/], ); # # Tiggers # xml_obj_children( $xml, $schema, tag => 'trigger', methods => [qw/name database_events action on_table perform_action_when fields order extra scope/], ); # # Procedures # xml_obj_children( $xml, $schema, tag => 'procedure', methods => [qw/name sql parameters owner comments order extra/], ); $xml->endTag([ $Namespace => 'schema' ]); $xml->end; return $io; } # # Takes and XML::Write object, Schema::* parent object, the tag name, # the collection name and a list of methods (of the children) to write as XML. # The collection name defaults to the name with an s on the end and is used to # work out the method to get the children with. eg a name of 'foo' gives a # collection of foos and gets the members using ->get_foos. # sub xml_obj_children { my ($xml,$parent) = (shift,shift); my %args = @_; my ($name,$collection_name,$methods) = @args{qw/tag collection_tag methods/}; $collection_name ||= "${name}s"; my $meth; if ( $collection_name eq 'comments' ) { $meth = 'comments'; } else { $meth = "get_$collection_name"; } my @kids = $parent->$meth; #@kids || return; $xml->startTag( [ $Namespace => $collection_name ] ); for my $obj ( @kids ) { if ( $collection_name eq 'comments' ){ $xml->dataElement( [ $Namespace => 'comment' ], $obj ); } else { xml_obj($xml, $obj, tag => "$name", end_tag => 1, methods => $methods, ); } } $xml->endTag( [ $Namespace => $collection_name ] ); } # # Takes an XML::Writer, Schema::* object and list of method names # and writes the object out as XML. All methods values are written as attributes # except for the methods listed in @MAP_AS_ELEMENTS which get written as child # data elements. # # The attributes/tags are written in the same order as the method names are # passed. # # TODO # - Should the Namespace be passed in instead of global? Pass in the same # as Writer ie [ NS => TAGNAME ] # my $elements_re = join("|", @MAP_AS_ELEMENTS); $elements_re = qr/^($elements_re)$/; sub xml_obj { my ($xml, $obj, %args) = @_; my $tag = $args{'tag'} || ''; my $end_tag = $args{'end_tag'} || ''; my @meths = @{ $args{'methods'} }; my $empty_tag = 0; # Use array to ensure consistent (ie not hash) ordering of attribs # The order comes from the meths list passed in. my @tags; my @attr; foreach ( grep { defined $obj->$_ } @meths ) { my $what = m/$elements_re/ ? \@tags : \@attr; my $val = $_ eq 'extra' ? { $obj->$_ } : $obj->$_; $val = ref $val eq 'ARRAY' ? join(',', @$val) : $val; push @$what, $_ => $val; }; my $child_tags = @tags; $end_tag && !$child_tags ? $xml->emptyTag( [ $Namespace => $tag ], @attr ) : $xml->startTag( [ $Namespace => $tag ], @attr ); while ( my ($name,$val) = splice @tags,0,2 ) { if ( ref $val eq 'HASH' ) { $xml->emptyTag( [ $Namespace => $name ], map { ($_, $val->{$_}) } sort keys %$val ); } else { $xml->dataElement( [ $Namespace => $name ], $val ); } } $xml->endTag( [ $Namespace => $tag ] ) if $child_tags && $end_tag; } 1; # ------------------------------------------------------------------- # The eyes of fire, the nostrils of air, # The mouth of water, the beard of earth. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Darren Chamberlain Edarren@cpan.orgE, Mark Addison Emark.addison@itn.co.ukE. =head1 SEE ALSO C, L, L, L, L. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/MySQL.pm0000644000000000000000000007050313473550070022661 0ustar00rootroot00000000000000package SQL::Translator::Producer::MySQL; =head1 NAME SQL::Translator::Producer::MySQL - MySQL-specific producer for SQL::Translator =head1 SYNOPSIS Use via SQL::Translator: use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'MySQL', '...' ); $t->translate; =head1 DESCRIPTION This module will produce text output of the schema suitable for MySQL. There are still some issues to be worked out with syntax differences between MySQL versions 3 and 4 ("SET foreign_key_checks," character sets for fields, etc.). =head1 ARGUMENTS This producer takes a single optional producer_arg C, which provides the desired version for the target database. By default MySQL v3 is assumed, and statements pertaining to any features introduced in later versions (e.g. CREATE VIEW) are not produced. Valid version specifiers for C are listed L =head2 Table Types Normally the tables will be created without any explicit table type given and so will use the MySQL default. Any tables involved in foreign key constraints automatically get a table type of InnoDB, unless this is overridden by setting the C extra attribute explicitly on the table. =head2 Extra attributes. The producer recognises the following extra attributes on the Schema objects. =over 4 =item B Set the list of allowed values for Enum fields. =item B, B, B Set the MySQL field options of the same name. =item B, B Use when producing diffs to indicate that the current table/field has been renamed from the old name as given in the attribute value. =item B Set the type of the table e.g. 'InnoDB', 'MyISAM'. This will be automatically set for tables involved in foreign key constraints if it is not already set explicitly. See L<"Table Types">. Please note that the C option is the preferred method of specifying the MySQL storage engine to use, but this method still works for backwards compatibility. =item B, B Set the tables default character set and collation order. =item B, B Set the fields character set and collation order. =back =cut use strict; use warnings; our ( $DEBUG, %used_names ); our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; # Maximum length for most identifiers is 64, according to: # http://dev.mysql.com/doc/refman/4.1/en/identifiers.html # http://dev.mysql.com/doc/refman/5.0/en/identifiers.html my $DEFAULT_MAX_ID_LENGTH = 64; use base qw(SQL::Translator::Producer); use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Generator::DDL::MySQL; use SQL::Translator::Utils qw(debug header_comment truncate_id_uniquely parse_mysql_version batch_alter_table_statements normalize_quote_options ); # # Use only lowercase for the keys (e.g. "long" and not "LONG") # my %translate = ( # # Oracle types # varchar2 => 'varchar', long => 'text', clob => 'longtext', # # Sybase types # int => 'integer', money => 'float', real => 'double', comment => 'text', bit => 'tinyint', # # Access types # 'long integer' => 'integer', 'text' => 'text', 'datetime' => 'datetime', # # PostgreSQL types # bytea => 'BLOB', ); # # Column types that do not support length attribute # my @no_length_attr = qw/ date time timestamp datetime year /; sub preprocess_schema { my ($schema) = @_; # extra->{mysql_table_type} used to be the type. It belongs in options, so # move it if we find it. Return Engine type if found in extra or options # Similarly for mysql_charset and mysql_collate my $extra_to_options = sub { my ($table, $extra_name, $opt_name) = @_; my $extra = $table->extra; my $extra_type = delete $extra->{$extra_name}; # Now just to find if there is already an Engine or Type option... # and lets normalize it to ENGINE since: # # The ENGINE table option specifies the storage engine for the table. # TYPE is a synonym, but ENGINE is the preferred option name. # my $options = $table->options; # If multiple option names, normalize to the first one if (ref $opt_name) { OPT_NAME: for ( @$opt_name[1..$#$opt_name] ) { for my $idx ( 0..$#{$options} ) { my ($key, $value) = %{ $options->[$idx] }; if (uc $key eq $_) { $options->[$idx] = { $opt_name->[0] => $value }; last OPT_NAME; } } } $opt_name = $opt_name->[0]; } # This assumes that there isn't both a Type and an Engine option. OPTION: for my $idx ( 0..$#{$options} ) { my ($key, $value) = %{ $options->[$idx] }; next unless uc $key eq $opt_name; # make sure case is right on option name delete $options->[$idx]{$key}; return $options->[$idx]{$opt_name} = $value || $extra_type; } if ($extra_type) { push @$options, { $opt_name => $extra_type }; return $extra_type; } }; # Names are only specific to a given schema local %used_names = (); # # Work out which tables need to be InnoDB to support foreign key # constraints. We do this first as we need InnoDB at both ends. # foreach my $table ( $schema->get_tables ) { $extra_to_options->($table, 'mysql_table_type', ['ENGINE', 'TYPE'] ); $extra_to_options->($table, 'mysql_charset', 'CHARACTER SET' ); $extra_to_options->($table, 'mysql_collate', 'COLLATE' ); foreach my $c ( $table->get_constraints ) { next unless $c->type eq FOREIGN_KEY; # Normalize constraint names here. my $c_name = $c->name; # Give the constraint a name if it doesn't have one, so it doesn't feel # left out $c_name = $table->name . '_fk' unless length $c_name; $c->name( next_unused_name($c_name) ); for my $meth (qw/table reference_table/) { my $table = $schema->get_table($c->$meth) || next; # This normalizes the types to ENGINE and returns the value if its there next if $extra_to_options->($table, 'mysql_table_type', ['ENGINE', 'TYPE']); $table->options( { 'ENGINE' => 'InnoDB' } ); } } # foreach constraints my %map = ( mysql_collate => 'collate', mysql_charset => 'character set'); foreach my $f ( $table->get_fields ) { my $extra = $f->extra; for (keys %map) { $extra->{$map{$_}} = delete $extra->{$_} if exists $extra->{$_}; } my @size = $f->size; if ( !$size[0] && $f->data_type =~ /char$/ ) { $f->size( (255) ); } } } } { my ($quoting_generator, $nonquoting_generator); sub _generator { my $options = shift; return $options->{generator} if exists $options->{generator}; return normalize_quote_options($options) ? $quoting_generator ||= SQL::Translator::Generator::DDL::MySQL->new() : $nonquoting_generator ||= SQL::Translator::Generator::DDL::MySQL->new( quote_chars => [], ); } } sub produce { my $translator = shift; local $DEBUG = $translator->debug; local %used_names; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $show_warnings = $translator->show_warnings || 0; my $producer_args = $translator->producer_args; my $mysql_version = parse_mysql_version ($producer_args->{mysql_version}, 'perl') || 0; my $max_id_length = $producer_args->{mysql_max_id_length} || $DEFAULT_MAX_ID_LENGTH; my $generator = _generator({ quote_identifiers => $translator->quote_identifiers }); debug("PKG: Beginning production\n"); %used_names = (); my $create = ''; $create .= header_comment unless ($no_comments); # \todo Don't set if MySQL 3.x is set on command line my @create = "SET foreign_key_checks=0"; preprocess_schema($schema); # # Generate sql # my @table_defs =(); for my $table ( $schema->get_tables ) { # print $table->name, "\n"; push @table_defs, create_table($table, { add_drop_table => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version }); } if ($mysql_version >= 5.000001) { for my $view ( $schema->get_views ) { push @table_defs, create_view($view, { add_replace_view => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version }); } } if ($mysql_version >= 5.000002) { for my $trigger ( $schema->get_triggers ) { push @table_defs, create_trigger($trigger, { add_drop_trigger => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version }); } } # print "@table_defs\n"; push @table_defs, "SET foreign_key_checks=1"; return wantarray ? ($create ? $create : (), @create, @table_defs) : ($create . join('', map { $_ ? "$_;\n\n" : () } (@create, @table_defs))); } sub create_trigger { my ($trigger, $options) = @_; my $generator = _generator($options); my $trigger_name = $trigger->name; debug("PKG: Looking at trigger '${trigger_name}'\n"); my @statements; my $events = $trigger->database_events; for my $event ( @$events ) { my $name = $trigger_name; if (@$events > 1) { $name .= "_$event"; warn "Multiple database events supplied for trigger '${trigger_name}', ", "creating trigger '${name}' for the '${event}' event\n" if $options->{show_warnings}; } my $action = $trigger->action; if($action !~ /^ \s* BEGIN [\s\;] .*? [\s\;] END [\s\;]* $/six) { $action .= ";" unless $action =~ /;\s*\z/; $action = "BEGIN $action END"; } push @statements, "DROP TRIGGER IF EXISTS " . $generator->quote($name) if $options->{add_drop_trigger}; push @statements, sprintf( "CREATE TRIGGER %s %s %s ON %s\n FOR EACH ROW %s", $generator->quote($name), $trigger->perform_action_when, $event, $generator->quote($trigger->on_table), $action, ); } # Tack the comment onto the first statement $statements[0] = "--\n-- Trigger " . $generator->quote($trigger_name) . "\n--\n" . $statements[0] unless $options->{no_comments}; return @statements; } sub create_view { my ($view, $options) = @_; my $generator = _generator($options); my $view_name = $view->name; my $view_name_qt = $generator->quote($view_name); debug("PKG: Looking at view '${view_name}'\n"); # Header. Should this look like what mysqldump produces? my $create = ''; $create .= "--\n-- View: $view_name_qt\n--\n" unless $options->{no_comments}; $create .= 'CREATE'; $create .= ' OR REPLACE' if $options->{add_replace_view}; $create .= "\n"; my $extra = $view->extra; # ALGORITHM if( exists($extra->{mysql_algorithm}) && defined(my $algorithm = $extra->{mysql_algorithm}) ){ $create .= " ALGORITHM = ${algorithm}\n" if $algorithm =~ /(?:UNDEFINED|MERGE|TEMPTABLE)/i; } # DEFINER if( exists($extra->{mysql_definer}) && defined(my $user = $extra->{mysql_definer}) ){ $create .= " DEFINER = ${user}\n"; } # SECURITY if( exists($extra->{mysql_security}) && defined(my $security = $extra->{mysql_security}) ){ $create .= " SQL SECURITY ${security}\n" if $security =~ /(?:DEFINER|INVOKER)/i; } #Header, cont. $create .= " VIEW $view_name_qt"; if( my @fields = $view->fields ){ my $list = join ', ', map { $generator->quote($_) } @fields; $create .= " ( ${list} )"; } if( my $sql = $view->sql ){ # do not wrap parenthesis around the selector, mysql doesn't like this # http://bugs.mysql.com/bug.php?id=9198 $create .= " AS\n ${sql}\n"; } # $create .= ""; return $create; } sub create_table { my ($table, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($table->name); debug("PKG: Looking at table '$table_name'\n"); # # Header. Should this look like what mysqldump produces? # my $create = ''; my $drop; $create .= "--\n-- Table: $table_name\n--\n" unless $options->{no_comments}; $drop = qq[DROP TABLE IF EXISTS $table_name] if $options->{add_drop_table}; $create .= "CREATE TABLE $table_name (\n"; # # Fields # my @field_defs; for my $field ( $table->get_fields ) { push @field_defs, create_field($field, $options); } # # Indices # my @index_defs; my %indexed_fields; for my $index ( $table->get_indices ) { push @index_defs, create_index($index, $options); $indexed_fields{ $_ } = 1 for $index->fields; } # # Constraints -- need to handle more than just FK. -ky # my @constraint_defs; my @constraints = $table->get_constraints; for my $c ( @constraints ) { my $constr = create_constraint($c, $options); push @constraint_defs, $constr if($constr); unless ( $indexed_fields{ ($c->fields())[0] } || $c->type ne FOREIGN_KEY ) { push @index_defs, "INDEX (" . $generator->quote(($c->fields())[0]) . ")"; $indexed_fields{ ($c->fields())[0] } = 1; } } $create .= join(",\n", map { " $_" } @field_defs, @index_defs, @constraint_defs ); # # Footer # $create .= "\n)"; $create .= generate_table_options($table, $options) || ''; # $create .= ";\n\n"; return $drop ? ($drop,$create) : $create; } sub generate_table_options { my ($table, $options) = @_; my $create; my $table_type_defined = 0; my $generator = _generator($options); my $charset = $table->extra('mysql_charset'); my $collate = $table->extra('mysql_collate'); my $union = undef; for my $t1_option_ref ( $table->options ) { my($key, $value) = %{$t1_option_ref}; $table_type_defined = 1 if uc $key eq 'ENGINE' or uc $key eq 'TYPE'; if (uc $key eq 'CHARACTER SET') { $charset = $value; next; } elsif (uc $key eq 'COLLATE') { $collate = $value; next; } elsif (uc $key eq 'UNION') { $union = '(' . join(', ', map { $generator->quote($_) } @$value) . ')'; next; } $create .= " $key=$value"; } my $mysql_table_type = $table->extra('mysql_table_type'); $create .= " ENGINE=$mysql_table_type" if $mysql_table_type && !$table_type_defined; my $comments = $table->comments; $create .= " DEFAULT CHARACTER SET $charset" if $charset; $create .= " COLLATE $collate" if $collate; $create .= " UNION=$union" if $union; $create .= qq[ comment='$comments'] if $comments; return $create; } sub create_field { my ($field, $options) = @_; my $generator = _generator($options); my $field_name = $field->name; debug("PKG: Looking at field '$field_name'\n"); my $field_def = $generator->quote($field_name); # data type and size my $data_type = $field->data_type; my @size = $field->size; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join( ', ', map { __PACKAGE__->_quote_string($_) } @$list ); my $charset = $extra{'mysql_charset'}; my $collate = $extra{'mysql_collate'}; my $mysql_version = $options->{mysql_version} || 0; # # Oracle "number" type -- figure best MySQL type # if ( lc $data_type eq 'number' ) { # not an integer if ( scalar @size > 1 ) { $data_type = 'double'; } elsif ( $size[0] && $size[0] >= 12 ) { $data_type = 'bigint'; } elsif ( $size[0] && $size[0] <= 1 ) { $data_type = 'tinyint'; } else { $data_type = 'int'; } } # # Convert a large Oracle varchar to "text" # (not necessary as of 5.0.3 http://dev.mysql.com/doc/refman/5.0/en/char.html) # elsif ( $data_type =~ /char/i && $size[0] > 255 ) { unless ($size[0] <= 65535 && $mysql_version >= 5.000003 ) { $data_type = 'text'; @size = (); } } elsif ( $data_type =~ /boolean/i ) { if ($mysql_version >= 4) { $data_type = 'boolean'; } else { $data_type = 'enum'; $commalist = "'0','1'"; } } elsif ( exists $translate{ lc $data_type } ) { $data_type = $translate{ lc $data_type }; } @size = () if $data_type =~ /(text|blob)/i; if ( $data_type =~ /(double|float)/ && scalar @size == 1 ) { push @size, '0'; } $field_def .= " $data_type"; if ( lc($data_type) eq 'enum' || lc($data_type) eq 'set') { $field_def .= '(' . $commalist . ')'; } elsif ( defined $size[0] && $size[0] > 0 && ! grep lc($data_type) eq $_, @no_length_attr ) { $field_def .= '(' . join( ', ', @size ) . ')'; } # char sets $field_def .= " CHARACTER SET $charset" if $charset; $field_def .= " COLLATE $collate" if $collate; # MySQL qualifiers for my $qual ( qw[ binary unsigned zerofill ] ) { my $val = $extra{ $qual } || $extra{ uc $qual } or next; $field_def .= " $qual"; } for my $qual ( 'character set', 'collate', 'on update' ) { my $val = $extra{ $qual } || $extra{ uc $qual } or next; if ( ref $val ) { $field_def .= " $qual ${$val}"; } else { $field_def .= " $qual $val"; } } # Null? if ( $field->is_nullable ) { $field_def .= ' NULL'; } else { $field_def .= ' NOT NULL'; } # Default? __PACKAGE__->_apply_default_value( $field, \$field_def, [ 'NULL' => \'NULL', ], ); if ( my $comments = $field->comments ) { $comments = __PACKAGE__->_quote_string($comments); $field_def .= qq[ comment $comments]; } # auto_increment? $field_def .= " auto_increment" if $field->is_auto_increment; return $field_def; } sub _quote_string { my ($self, $string) = @_; $string =~ s/([\\'])/$1$1/g; return qq{'$string'}; } sub alter_create_index { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join( ' ', 'ALTER TABLE', $table_name, 'ADD', create_index(@_) ); } sub create_index { my ( $index, $options ) = @_; my $generator = _generator($options); return join( ' ', map { $_ || () } lc $index->type eq 'normal' ? 'INDEX' : $index->type . ' INDEX', $index->name ? $generator->quote(truncate_id_uniquely( $index->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH )) : '', '(' . join( ', ', map { $generator->quote($_) } $index->fields ) . ')' ); } sub alter_drop_index { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join( ' ', 'ALTER TABLE', $table_name, 'DROP', 'INDEX', $index->name || $index->fields ); } sub alter_drop_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($c->table->name); my @out = ('ALTER','TABLE',$table_name,'DROP'); if($c->type eq PRIMARY_KEY) { push @out, $c->type; } else { push @out, ($c->type eq FOREIGN_KEY ? $c->type : "INDEX"), $generator->quote($c->name); } return join(' ',@out); } sub alter_create_constraint { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join( ' ', 'ALTER TABLE', $table_name, 'ADD', create_constraint(@_) ); } sub create_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $leave_name = $options->{leave_name} || undef; my $reference_table_name = $generator->quote($c->reference_table); my @fields = $c->fields or return; if ( $c->type eq PRIMARY_KEY ) { return 'PRIMARY KEY (' . join(", ", map { $generator->quote($_) } @fields) . ')'; } elsif ( $c->type eq UNIQUE ) { return sprintf 'UNIQUE %s(%s)', ((defined $c->name && $c->name) ? $generator->quote( truncate_id_uniquely( $c->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH ), ) . ' ' : '' ), ( join ', ', map { $generator->quote($_) } @fields ), ; } elsif ( $c->type eq FOREIGN_KEY ) { # # Make sure FK field is indexed or MySQL complains. # my $table = $c->table; my $c_name = truncate_id_uniquely( $c->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH ); my $def = join(' ', 'CONSTRAINT', ($c_name ? $generator->quote($c_name) : () ), 'FOREIGN KEY' ); $def .= ' ('. join( ', ', map { $generator->quote($_) } @fields ) . ')'; $def .= ' REFERENCES ' . $reference_table_name; my @rfields = map { $_ || () } $c->reference_fields; unless ( @rfields ) { my $rtable_name = $c->reference_table; if ( my $ref_table = $table->schema->get_table( $rtable_name ) ) { push @rfields, $ref_table->primary_key; } else { warn "Can't find reference table '$rtable_name' " . "in schema\n" if $options->{show_warnings}; } } if ( @rfields ) { $def .= ' (' . join( ', ', map { $generator->quote($_) } @rfields ) . ')'; } else { warn "FK constraint on " . $table->name . '.' . join('', @fields) . " has no reference fields\n" if $options->{show_warnings}; } if ( $c->match_type ) { $def .= ' MATCH ' . ( $c->match_type =~ /full/i ) ? 'FULL' : 'PARTIAL'; } if ( $c->on_delete ) { $def .= ' ON DELETE '. $c->on_delete; } if ( $c->on_update ) { $def .= ' ON UPDATE '. $c->on_update; } return $def; } return undef; } sub alter_table { my ($to_table, $options) = @_; my $table_options = generate_table_options($to_table, $options) || ''; my $table_name = _generator($options)->quote($to_table->name); my $out = sprintf('ALTER TABLE %s%s', $table_name, $table_options); return $out; } sub rename_field { alter_field(@_) } sub alter_field { my ($from_field, $to_field, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($to_field->table->name); my $out = sprintf('ALTER TABLE %s CHANGE COLUMN %s %s', $table_name, $generator->quote($from_field->name), create_field($to_field, $options)); return $out; } sub add_field { my ($new_field, $options) = @_; my $table_name = _generator($options)->quote($new_field->table->name); my $out = sprintf('ALTER TABLE %s ADD COLUMN %s', $table_name, create_field($new_field, $options)); return $out; } sub drop_field { my ($old_field, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($old_field->table->name); my $out = sprintf('ALTER TABLE %s DROP COLUMN %s', $table_name, $generator->quote($old_field->name)); return $out; } sub batch_alter_table { my ($table, $diff_hash, $options) = @_; # InnoDB has an issue with dropping and re-adding a FK constraint under the # name in a single alter statement, see: http://bugs.mysql.com/bug.php?id=13741 # # We have to work round this. my %fks_to_alter; my %fks_to_drop = map { $_->type eq FOREIGN_KEY ? ( $_->name => $_ ) : ( ) } @{$diff_hash->{alter_drop_constraint} }; my %fks_to_create = map { if ( $_->type eq FOREIGN_KEY) { $fks_to_alter{$_->name} = $fks_to_drop{$_->name} if $fks_to_drop{$_->name}; ( $_->name => $_ ); } else { ( ) } } @{$diff_hash->{alter_create_constraint} }; my @drop_stmt; if (scalar keys %fks_to_alter) { $diff_hash->{alter_drop_constraint} = [ grep { !$fks_to_alter{$_->name} } @{ $diff_hash->{alter_drop_constraint} } ]; @drop_stmt = batch_alter_table($table, { alter_drop_constraint => [ values %fks_to_alter ] }, $options); } my @stmts = batch_alter_table_statements($diff_hash, $options); #quote my $generator = _generator($options); # rename_table makes things a bit more complex my $renamed_from = ""; $renamed_from = $generator->quote($diff_hash->{rename_table}[0][0]->name) if $diff_hash->{rename_table} && @{$diff_hash->{rename_table}}; return unless @stmts; # Just zero or one stmts. return now return (@drop_stmt,@stmts) unless @stmts > 1; # Now strip off the 'ALTER TABLE xyz' of all but the first one my $table_name = $generator->quote($table->name); my $re = $renamed_from ? qr/^ALTER TABLE (?:\Q$table_name\E|\Q$renamed_from\E) / : qr/^ALTER TABLE \Q$table_name\E /; my $first = shift @stmts; my ($alter_table) = $first =~ /($re)/; my $padd = " " x length($alter_table); return @drop_stmt, join( ",\n", $first, map { s/$re//; $padd . $_ } @stmts); } sub drop_table { my ($table, $options) = @_; return ( # Drop (foreign key) constraints so table drops cleanly batch_alter_table( $table, { alter_drop_constraint => [ grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints ] }, $options ), 'DROP TABLE ' . _generator($options)->quote($table), ); } sub rename_table { my ($old_table, $new_table, $options) = @_; my $generator = _generator($options); my $old_table_name = $generator->quote($old_table); my $new_table_name = $generator->quote($new_table); return "ALTER TABLE $old_table_name RENAME TO $new_table_name"; } sub next_unused_name { my $name = shift || ''; if ( !defined($used_names{$name}) ) { $used_names{$name} = $name; return $name; } my $i = 1; while ( defined($used_names{$name . '_' . $i}) ) { ++$i; } $name .= '_' . $i; $used_names{$name} = $name; return $name; } 1; =pod =head1 SEE ALSO SQL::Translator, http://www.mysql.com/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/XML.pm0000644000000000000000000000124413473550070022350 0ustar00rootroot00000000000000package SQL::Translator::Producer::XML; =pod =head1 NAME SQL::Translator::Producer::XML - Alias to XML::SQLFairy producer =head1 DESCRIPTION Previous versions of SQL::Translator included an XML producer, but the namespace has since been further subdivided. Therefore, this module is now just an alias to the XML::SQLFairy producer. =head1 SEE ALSO SQL::Translator::Producer::XML::SQLFairy. =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut use strict; use warnings; our $DEBUG; our $VERSION = '1.60'; $DEBUG = 1 unless defined $DEBUG; use SQL::Translator::Producer::XML::SQLFairy; *produce = \&SQL::Translator::Producer::XML::SQLFairy::produce; 1; SQL-Translator-1.60/lib/SQL/Translator/Producer/SQLServer.pm0000644000000000000000000000435213473550070023541 0ustar00rootroot00000000000000package SQL::Translator::Producer::SQLServer; use strict; use warnings; our ( $DEBUG, $WARN ); our $VERSION = '1.60'; $DEBUG = 1 unless defined $DEBUG; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); use SQL::Translator::Generator::DDL::SQLServer; sub produce { my $translator = shift; SQL::Translator::Generator::DDL::SQLServer->new( add_comments => !$translator->no_comments, add_drop_table => $translator->add_drop_table, )->schema($translator->schema) } 1; =head1 NAME SQL::Translator::Producer::SQLServer - MS SQLServer producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'SQLServer' ); $t->translate; =head1 DESCRIPTION This is currently a thin wrapper around the nextgen L DDL maker. =head1 Extra Attributes =over 4 =item field.list List of values for an enum field. =back =head1 TODO * !! Write some tests !! * Reserved words list needs updating to SQLServer. * Triggers, Procedures and Views DO NOT WORK # Text of view is already a 'create view' statement so no need to # be fancy foreach ( $schema->get_views ) { my $name = $_->name(); $output .= "\n\n"; $output .= "--\n-- View: $name\n--\n\n" unless $no_comments; my $text = $_->sql(); $text =~ s/\r//g; $output .= "$text\nGO\n"; } # Text of procedure already has the 'create procedure' stuff # so there is no need to do anything fancy. However, we should # think about doing fancy stuff with granting permissions and # so on. foreach ( $schema->get_procedures ) { my $name = $_->name(); $output .= "\n\n"; $output .= "--\n-- Procedure: $name\n--\n\n" unless $no_comments; my $text = $_->sql(); $text =~ s/\r//g; $output .= "$text\nGO\n"; } =head1 SEE ALSO L =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Diagram.pm0000644000000000000000000004775113473550070023271 0ustar00rootroot00000000000000package SQL::Translator::Producer::Diagram; =head1 NAME SQL::Translator::Producer::Diagram - ER diagram producer for SQL::Translator =head1 SYNOPSIS Use via SQL::Translator: use SQL::Translator; my $t = SQL::Translator->new( from => 'MySQL', to => 'Diagram', producer_args => { # All args are optional out_file => 'schema.png',# if not provided will return from translate() output_type => 'png', # is default or 'jpeg' title => 'My Schema', # default is filename font_size => 'medium', # is default or 'small,' 'large' imap_file => '', # filename to write image map coords imap_url => '', # base URL for image map gutter => 30 # is default, px distance b/w cols num_columns => 5, # the number of columns no_lines => 1, # do not draw lines to show FKs add_color => 1, # give it some color show_fk_only => 1, # show only fields used in FKs join_pk_only => 1, # use only primary keys to figure PKs natural_join => 1, # intuit FKs if not defined skip_fields => [...], # list* of field names to exclude skip_tables => [...], # list* of table names to exclude skip_tables_like => [...], # list* of regexen to exclude tables } ) or die SQL::Translator->error; $t->translate; * "list" can be either an array-ref or a comma-separated string =cut use strict; use warnings; use GD; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug); our $DEBUG; our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; use constant VALID_FONT_SIZE => { small => 1, medium => 1, large => 1, huge => 1, }; use constant VALID_IMAGE_TYPE => { png => 1, jpeg => 1, }; sub produce { my $t = shift; my $schema = $t->schema; my $args = $t->producer_args; local $DEBUG = $t->debug; debug("Schema =\n", Dumper( $schema )) if $DEBUG; debug("Producer args =\n", Dumper( $args )) if $DEBUG; my $out_file = $args->{'out_file'} || ''; my $output_type = $args->{'output_type'} || 'png'; my $title = $args->{'title'} || $t->filename; my $font_size = $args->{'font_size'} || 'medium'; my $imap_file = $args->{'imap_file'} || ''; my $imap_url = $args->{'imap_url'} || ''; my $gutter = $args->{'gutter'} || 30; # distance b/w columns my $num_columns = $args->{'num_columns'} || $args->{'no_columns'} || ''; my $no_lines = $args->{'no_lines'}; my $add_color = $args->{'add_color'}; my $show_fk_only = $args->{'show_fk_only'}; my $join_pk_only = $args->{'join_pk_only'}; my $natural_join = $args->{'natural_join'} || $join_pk_only; my %skip_field = map { $_, 1 } ( ref $args->{'skip_fields'} eq 'ARRAY' ? @{ $args->{'skip_fields'} } : split ( /\s*,\s*/, $args->{'skip_fields'}||'' ) ); my %skip_table = map { $_, 1 } ( ref $args->{'skip_tables'} eq 'ARRAY' ? @{ $args->{'skip_tables'} } : split ( /\s*,\s*/, $args->{'skip_tables'}||'' ) ); my @skip_tables_like = map { qr/$_/ } ( ref $args->{'skip_tables_like'} eq 'ARRAY' ? @{ $args->{'skip_tables_like'} } : split ( /\s*,\s*/, $args->{'skip_tables_like'}||'' ) ); my @table_names; if ( $natural_join ) { $schema->make_natural_joins( join_pk_only => $join_pk_only, skip_fields => $args->{'skip_fields'}, ); my $g = $schema->as_graph_pm; my $d = Graph::Traversal::DFS->new( $g, next_alphabetic => 1 ); $d->preorder; @table_names = $d->dfs; } else { @table_names = map { $_->name } $schema->get_tables; } die "Invalid image type '$output_type'" unless VALID_IMAGE_TYPE->{ $output_type }; die "Invalid font size '$font_size'" unless VALID_FONT_SIZE->{ $font_size }; # # Layout the image. # my $font = $font_size eq 'small' ? gdTinyFont : $font_size eq 'medium' ? gdSmallFont : $font_size eq 'large' ? gdLargeFont : gdGiantFont; my $num_tables = scalar @table_names; $num_columns = 0 unless $num_columns =~ /^\d+$/; $num_columns ||= sprintf( "%.0f", sqrt( $num_tables ) + .5 ); $num_columns ||= .5; my $no_per_col = sprintf( "%.0f", $num_tables/$num_columns + .5 ); my @shapes; my ( $max_x, $max_y ); # the furthest x and y used my $orig_y = 40; # used to reset y for each column my ( $x, $y ) = (30,$orig_y); # where to start my $cur_col = 1; # the current column my $no_this_col = 0; # number of tables in current column my $this_col_x = $x; # current column's x my %nj_registry; # for locations of fields for natural joins my @fk_registry; # for locations of fields for foreign keys my %table_x; # for max x of each table my $field_no; # counter to give distinct no. to each field my %coords; # holds fields coordinates my @imap_coords; # for making clickable image map my %legend; TABLE: for my $table_name ( @table_names ) { my $table = $schema->get_table( $table_name ); if ( @skip_tables_like or keys %skip_table ) { next TABLE if $skip_table{ $table_name }; for my $regex ( @skip_tables_like ) { next TABLE if $table_name =~ $regex; } } my $top = $y; push @shapes, [ 'string', $font, $this_col_x, $y, $table_name, 'black' ]; $y += $font->height + 2; my $below_table_name = $y; $y += 2; my $this_max_x = $this_col_x + ($font->width * length($table_name)); debug("Processing table '$table_name'"); my @fields = $table->get_fields; debug("Fields = ", join(', ', map { $_->name } @fields)) if $DEBUG; my ( @fld_desc, $max_name, $max_desc ); for my $f ( @fields ) { my $name = $f->name or next; my $is_pk = $f->is_primary_key; my @attr; # # Decide if we should skip this field. # if ( $show_fk_only ) { next unless $is_pk || $f->is_foreign_key; } if ( $is_pk ) { push @attr, 'PK'; $legend{'Primary key'} = '[PK]'; } if ( $f->is_unique ) { push @attr, 'U'; $legend{'Unique constraint'} = '[U]'; } if ( $f->is_foreign_key ) { push @attr, 'FK'; $legend{'Foreign Key'} = '[FK]'; } my $attr = ''; if ( @attr ) { $attr .= '[' . join(', ', @attr) . ']'; } my $desc = $f->data_type; $desc .= '('.$f->size.')' if $f->size && $f->data_type =~ /^(VAR)?CHAR2?$/i; my $nlen = length $name; my $dlen = length $desc; $max_name = $nlen if $nlen > ($max_name||0); $max_desc = $dlen if $dlen > ($max_desc||0); push @fld_desc, [ $name, $desc, $f->{'name'}, $is_pk, $attr ]; } $max_name += 2; $max_desc += 2; for my $fld_desc ( @fld_desc ) { my ( $name, $desc, $orig_name, $is_pk, $attr ) = @$fld_desc; my $diff1 = $max_name - length $name; my $diff2 = $max_desc - length $desc; $name .= ' ' x $diff1; $desc .= ' ' x $diff2; $desc = $name . $desc . $attr; push @shapes, [ 'string', $font, $this_col_x, $y, $desc, 'black' ]; $y += $font->height + 2; my $length = $this_col_x + ( $font->width * length( $desc ) ); $this_max_x = $length if $length > $this_max_x; my $constraints = $table->{'fields'}{ $orig_name }{'constraints'}; if ( $natural_join && !$skip_field{ $orig_name } ) { push @{ $nj_registry{ $orig_name } }, $table_name; } my $y_link = $y - $font->height/2; $coords{ $table_name }{ $orig_name }{'coords'} = { left => [ $this_col_x - 6, $y_link ], right => [ $length + 2 , $y_link ], table => $table_name, field_no => ++$field_no, is_pk => $is_pk, fld_name => $orig_name, }; push @imap_coords, [ $imap_url."#$table_name-$orig_name", $this_col_x, $y - $font->height, $length, $y_link, ]; } unless ( $natural_join ) { for my $c ( $table->get_constraints ) { next unless $c->type eq FOREIGN_KEY; my $fk_table = $c->reference_table or next; for my $field_name ( $c->fields ) { for my $fk_field ( $c->reference_fields ) { next unless defined $schema->get_table( $fk_table ); push @fk_registry, [ [ $fk_table , $fk_field ], [ $table_name, $field_name ], ]; } } } } $this_max_x += 5; $table_x{ $table_name } = $this_max_x + 5; push @shapes, [ 'line', $this_col_x - 5, $below_table_name, $this_max_x, $below_table_name, 'black' ]; my @bounds = ( $this_col_x - 5, $top - 5, $this_max_x, $y + 5 ); if ( $add_color ) { unshift @shapes, [ 'filledRectangle', $bounds[0], $bounds[1], $this_max_x, $below_table_name, 'khaki' ]; unshift @shapes, [ 'filledRectangle', @bounds, 'white' ]; } push @imap_coords, [ $imap_url."#$table_name", $bounds[0], $bounds[1], $this_max_x, $below_table_name, ]; push @shapes, [ 'rectangle', @bounds, 'black' ]; $max_x = $this_max_x if $this_max_x > ($max_x||0); $y += 25; if ( ++$no_this_col == $no_per_col ) {# if we've filled up this column $cur_col++; # up the column number $no_this_col = 0; # reset the number of tables $max_x += $gutter; # push the x over for next column $this_col_x = $max_x; # remember the max x for this col $max_y = $y if $y > ($max_y||0); # note the max y $y = $orig_y; # reset the y for next column } } # # Connect the lines. # my %horz_taken; my %done; unless ( $no_lines ) { my @position_bunches; if ( $natural_join ) { for my $field_name ( keys %nj_registry ) { my @positions; my @table_names = @{ $nj_registry{ $field_name } || [] } or next; next if scalar @table_names == 1; for my $table_name ( @table_names ) { push @positions, $coords{ $table_name }{ $field_name }{'coords'}; } push @position_bunches, [ @positions ]; } } else { for my $pair ( @fk_registry ) { push @position_bunches, [ $coords{$pair->[0][0]}{ $pair->[0][1] }{'coords'}, $coords{$pair->[1][0]}{ $pair->[1][1] }{'coords'}, ]; } } my $is_directed = $natural_join ? 0 : 1; for my $bunch ( @position_bunches ) { my @positions = @$bunch; for my $i ( 0 .. $#positions ) { my $pos1 = $positions[ $i ]; my ( $ax, $ay ) = @{ $pos1->{'left'} || [] } or next; my ( $bx, $by ) = @{ $pos1->{'right'} || [] } or next; my $table1 = $pos1->{'table'}; my $fno1 = $pos1->{'field_no'}; my $is_pk = $pos1->{'is_pk'}; next if $join_pk_only and !$is_pk; for my $j ( 0 .. $#positions ) { my $pos2 = $positions[ $j ]; my ( $cx, $cy ) = @{ $pos2->{'left'} || [] } or next; my ( $dx, $dy ) = @{ $pos2->{'right'} || [] } or next; my $table2 = $pos2->{'table'}; my $fno2 = $pos2->{'field_no'}; next if $table1 eq $table2; next if $done{ $fno1 }{ $fno2 }; next if $fno1 == $fno2; my @distances = (); push @distances, [ abs ( $ax - $cx ) + abs ( $ay - $cy ), [ $ax, $ay, $cx, $cy ], [ 'left', 'left' ] ]; push @distances, [ abs ( $ax - $dx ) + abs ( $ay - $dy ), [ $ax, $ay, $dx, $dy ], [ 'left', 'right' ], ]; push @distances, [ abs ( $bx - $cx ) + abs ( $by - $cy ), [ $bx, $by, $cx, $cy ], [ 'right', 'left' ], ]; push @distances, [ abs ( $bx - $dx ) + abs ( $by - $dy ), [ $bx, $by, $dx, $dy ], [ 'right', 'right' ], ]; @distances = sort { $a->[0] <=> $b->[0] } @distances; my $shortest = $distances[0]; my ( $x1, $y1, $x2, $y2 ) = @{ $shortest->[1] }; my ( $side1, $side2 ) = @{ $shortest->[2] }; my ( $start, $end ); my $offset = 9; my $col1_right = $table_x{ $table1 }; my $col2_right = $table_x{ $table2 }; my $diff = 0; if ( $x1 == $x2 ) { while ( $horz_taken{ $x1 + $diff } ) { $diff = $side1 eq 'left' ? $diff - 2 : $diff + 2; } $horz_taken{ $x1 + $diff } = 1; } if ( $side1 eq 'left' ) { $start = $x1 - $offset + $diff; } else { $start = $col1_right + $diff; } if ( $side2 eq 'left' ) { $end = $x2 - $offset + $diff; } else { $end = $col2_right + $diff; } push @shapes, [ 'line', $x1, $y1, $start, $y1, 'cadetblue' ]; push @shapes, [ 'line', $start, $y1, $end, $y2, 'cadetblue' ]; push @shapes, [ 'line', $end, $y2, $x2, $y2, 'cadetblue' ]; if ( $is_directed ) { if ( $side1 eq 'right' && $side2 eq 'left' || $side1 eq 'left' && $side2 eq 'left' ) { push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2 - 3, $y2 +3, 'cadetblue' ]; } else { push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2 + 3, $y2 +3, 'cadetblue' ]; } } $done{ $fno1 }{ $fno2 } = 1; $done{ $fno2 }{ $fno1 } = 1; } } } } # # Add the title, legend and signature. # my $large_font = gdLargeFont; my $title_len = $large_font->width * length $title; push @shapes, [ 'string', $large_font, $max_x/2 - $title_len/2, 10, $title, 'black' ]; if ( %legend ) { $max_y += 5; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, 'Legend', 'black' ]; $max_y += $font->height + 4; my $longest; for my $len ( map { length $_ } values %legend ) { $longest = $len if $len > ($longest||0); } $longest += 2; while ( my ( $key, $shape ) = each %legend ) { my $space = $longest - length $shape; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, join( '', $shape, ' ' x $space, $key ), 'black' ]; $max_y += $font->height + 4; } } my $sig = 'Created by SQL::Translator ' . $t->version; my $sig_len = $font->width * length $sig; push @shapes, [ 'string', $font, $max_x - $sig_len, $max_y - $font->height - 4, $sig, 'black' ]; # # Render the image. # my $gd = GD::Image->new( $max_x + 30, $max_y ); unless ( $gd->can( $output_type ) ) { die "GD can't create images of type '$output_type'\n"; } my %colors = map { $_->[0], $gd->colorAllocate( @{$_->[1]} ) } ( [ white => [ 255, 255, 255 ] ], [ beige => [ 245, 245, 220 ] ], [ black => [ 0, 0, 0 ] ], [ lightblue => [ 173, 216, 230 ] ], [ cadetblue => [ 95, 158, 160 ] ], [ lightgoldenrodyellow => [ 250, 250, 210 ] ], [ khaki => [ 240, 230, 140 ] ], [ red => [ 255, 0, 0 ] ], ); $gd->interlaced( 'true' ); my $background_color = $add_color ? 'lightgoldenrodyellow' : 'white'; $gd->fill( 0, 0, $colors{ $background_color } ); for my $shape ( @shapes ) { my $method = shift @$shape; my $color = pop @$shape; $gd->$method( @$shape, $colors{ $color } ); } # # Make image map. # debug("imap file = '$imap_file'"); if ( $imap_file && @imap_coords ) { open my $fh, '>', $imap_file or die "Can't write '$imap_file': $!\n"; print $fh qq[\n]. qq[\n]; for my $rec ( @imap_coords ) { my $href = shift @$rec; print $fh q[\n]; } print $fh qq[]; close $fh; } # # Print the image. # if ( $out_file ) { open my $fh, '>', $out_file or die "Can't write '$out_file': $!\n"; binmode $fh; print $fh $gd->$output_type; close $fh; } else { return $gd->$output_type; } } 1; =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Storable.pm0000644000000000000000000000207413473550070023465 0ustar00rootroot00000000000000package SQL::Translator::Producer::Storable; =head1 NAME SQL::Translator::Producer::Storable - serializes the SQL::Translator::Schema object via the Storable module =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new; $translator->producer('Storable'); =head1 DESCRIPTION This module uses Storable to serialize a schema to a string so that it can be saved to disk. Serializing a schema and then calling producers on the stored can realize significant performance gains when parsing takes a long time. =cut use strict; use warnings; our ( $DEBUG, @EXPORT_OK ); $DEBUG = 0 unless defined $DEBUG; our $VERSION = '1.60'; use Storable; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $serialized = Storable::nfreeze($schema); return $serialized; } 1; =pod =head1 AUTHOR Paul Harrington Eharringp@deshaw.comE. =head1 SEE ALSO SQL::Translator, SQL::Translator::Schema, Storable. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Sybase.pm0000644000000000000000000003154313473550070023143 0ustar00rootroot00000000000000package SQL::Translator::Producer::Sybase; =head1 NAME SQL::Translator::Producer::Sybase - Sybase producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'Sybase' ); $t->translate; =head1 DESCRIPTION This module will produce text output of the schema suitable for Sybase. =cut use strict; use warnings; our ( $DEBUG, $WARN ); our $VERSION = '1.60'; $DEBUG = 1 unless defined $DEBUG; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); my %translate = ( # # Sybase types # integer => 'numeric', int => 'numeric', number => 'numeric', money => 'money', varchar => 'varchar', varchar2 => 'varchar', timestamp => 'datetime', text => 'varchar', real => 'double precision', comment => 'text', bit => 'bit', tinyint => 'smallint', float => 'double precision', serial => 'numeric', boolean => 'varchar', char => 'char', long => 'varchar', ); my %reserved = map { $_, 1 } qw[ ALL ANALYSE ANALYZE AND ANY AS ASC BETWEEN BINARY BOTH CASE CAST CHECK COLLATE COLUMN CONSTRAINT CROSS CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEFAULT DEFERRABLE DESC DISTINCT DO ELSE END EXCEPT FALSE FOR FOREIGN FREEZE FROM FULL GROUP HAVING ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT NATURAL NEW NOT NOTNULL NULL OFF OFFSET OLD ON ONLY OR ORDER OUTER OVERLAPS PRIMARY PUBLIC REFERENCES RIGHT SELECT SESSION_USER SOME TABLE THEN TO TRAILING TRUE UNION UNIQUE USER USING VERBOSE WHEN WHERE ]; my $max_id_length = 30; my %used_identifiers = (); my %global_names; my %unreserve; my %truncated; =pod =head1 Sybase Create Table Syntax CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name ( { column_name data_type [ DEFAULT default_expr ] [ column_constraint [, ... ] ] | table_constraint } [, ... ] ) [ INHERITS ( parent_table [, ... ] ) ] [ WITH OIDS | WITHOUT OIDS ] where column_constraint is: [ CONSTRAINT constraint_name ] { NOT NULL | NULL | UNIQUE | PRIMARY KEY | CHECK (expression) | REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] and table_constraint is: [ CONSTRAINT constraint_name ] { UNIQUE ( column_name [, ... ] ) | PRIMARY KEY ( column_name [, ... ] ) | CHECK ( expression ) | FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] =head1 Create Index Syntax CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( column [ ops_name ] [, ...] ) [ WHERE predicate ] CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] ) [ WHERE predicate ] =cut sub produce { my $translator = shift; $DEBUG = $translator->debug; $WARN = $translator->show_warnings; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $output; $output .= header_comment unless ($no_comments); for my $table ( $schema->get_tables ) { my $table_name = $table->name or next; $table_name = mk_name( $table_name, '', undef, 1 ); my $table_name_ur = unreserve($table_name) || ''; my ( @comments, @field_defs, @index_defs, @constraint_defs ); push @comments, "--\n-- Table: $table_name_ur\n--" unless $no_comments; push @comments, map { "-- $_" } $table->comments; # # Fields # my %field_name_scope; for my $field ( $table->get_fields ) { my $field_name = mk_name( $field->name, '', \%field_name_scope, undef,1 ); my $field_name_ur = unreserve( $field_name, $table_name ); my $field_def = qq["$field_name_ur"]; $field_def =~ s/\"//g; if ( $field_def =~ /identity/ ){ $field_def =~ s/identity/pidentity/; } # # Datatype # my $data_type = lc $field->data_type; my $orig_data_type = $data_type; my %extra = $field->extra; my $list = $extra{'list'} || []; # \todo deal with embedded quotes my $commalist = join( ', ', map { qq['$_'] } @$list ); my $seq_name; if ( $data_type eq 'enum' ) { my $check_name = mk_name( $table_name.'_'.$field_name, 'chk' ,undef, 1 ); push @constraint_defs, "CONSTRAINT $check_name CHECK ($field_name IN ($commalist))"; $data_type .= 'character varying'; } elsif ( $data_type eq 'set' ) { $data_type .= 'character varying'; } elsif ( $field->is_auto_increment ) { $field_def .= ' IDENTITY'; } else { if ( defined $translate{ $data_type } ) { $data_type = $translate{ $data_type }; } else { warn "Unknown datatype: $data_type ", "($table_name.$field_name)\n" if $WARN; } } my $size = $field->size; unless ( $size ) { if ( $data_type =~ /numeric/ ) { $size = '9,0'; } elsif ( $orig_data_type eq 'text' ) { #interpret text fields as long varchars $size = '255'; } elsif ( $data_type eq 'varchar' && $orig_data_type eq 'boolean' ) { $size = '6'; } elsif ( $data_type eq 'varchar' ) { $size = '255'; } } $field_def .= " $data_type"; $field_def .= "($size)" if $size; # # Default value # my $default = $field->default_value; if ( defined $default ) { $field_def .= sprintf( ' DEFAULT %s', ( $field->is_auto_increment && $seq_name ) ? qq[nextval('"$seq_name"'::text)] : ( $default =~ m/null/i ) ? 'NULL' : "'$default'" ); } # # Not null constraint # unless ( $field->is_nullable ) { $field_def .= ' NOT NULL'; } else { $field_def .= ' NULL' if $data_type ne 'bit'; } push @field_defs, $field_def; } # # Constraint Declarations # my @constraint_decs = (); my $c_name_default; for my $constraint ( $table->get_constraints ) { my $name = $constraint->name || ''; my $type = $constraint->type || NORMAL; my @fields = map { unreserve( $_, $table_name ) } $constraint->fields; my @rfields = map { unreserve( $_, $table_name ) } $constraint->reference_fields; next unless @fields; if ( $type eq PRIMARY_KEY ) { $name ||= mk_name( $table_name, 'pk', undef,1 ); push @constraint_defs, "CONSTRAINT $name PRIMARY KEY ". '(' . join( ', ', @fields ) . ')'; } elsif ( $type eq FOREIGN_KEY ) { $name ||= mk_name( $table_name, 'fk', undef,1 ); push @constraint_defs, "CONSTRAINT $name FOREIGN KEY". ' (' . join( ', ', @fields ) . ') REFERENCES '. $constraint->reference_table. ' (' . join( ', ', @rfields ) . ')'; } elsif ( $type eq UNIQUE ) { $name ||= mk_name( $table_name, $name || ++$c_name_default,undef, 1 ); push @constraint_defs, "CONSTRAINT $name UNIQUE " . '(' . join( ', ', @fields ) . ')'; } } # # Indices # for my $index ( $table->get_indices ) { push @index_defs, 'CREATE INDEX ' . $index->name . " ON $table_name (". join( ', ', $index->fields ) . ");"; } my $create_statement; $create_statement = qq[DROP TABLE $table_name_ur;\n] if $add_drop_table; $create_statement .= qq[CREATE TABLE $table_name_ur (\n]. join( ",\n", map { " $_" } @field_defs, @constraint_defs ). "\n);" ; $output .= join( "\n\n", @comments, $create_statement, @index_defs, '' ); } foreach my $view ( $schema->get_views ) { my (@comments, $view_name); $view_name = $view->name(); push @comments, "--\n-- View: $view_name\n--" unless $no_comments; # text of view is already a 'create view' statement so no need # to do anything fancy. $output .= join("\n\n", @comments, $view->sql(), ); } foreach my $procedure ( $schema->get_procedures ) { my (@comments, $procedure_name); $procedure_name = $procedure->name(); push @comments, "--\n-- Procedure: $procedure_name\n--" unless $no_comments; # text of procedure already has the 'create procedure' stuff # so there is no need to do anything fancy. However, we should # think about doing fancy stuff with granting permissions and # so on. $output .= join("\n\n", @comments, $procedure->sql(), ); } if ( $WARN ) { if ( %truncated ) { warn "Truncated " . keys( %truncated ) . " names:\n"; warn "\t" . join( "\n\t", sort keys %truncated ) . "\n"; } if ( %unreserve ) { warn "Encounted " . keys( %unreserve ) . " unsafe names in schema (reserved or invalid):\n"; warn "\t" . join( "\n\t", sort keys %unreserve ) . "\n"; } } return $output; } sub mk_name { my $basename = shift || ''; my $type = shift || ''; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? $max_id_length - (length($type) + 1) : $max_id_length; $basename = substr( $basename, 0, $max_name ) if length( $basename ) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ( $basename ne $basename_orig and $critical ) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n" if $WARN; $truncated{ $basename_orig } = $name; } $scope ||= \%global_names; if ( my $prev = $scope->{ $name } ) { my $name_orig = $name; $name .= sprintf( "%02d", ++$prev ); substr($name, $max_id_length - 3) = "00" if length( $name ) > $max_id_length; warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{ $name_orig }++; } $name = substr( $name, 0, $max_id_length ) if ((length( $name ) > $max_id_length) && $critical); $scope->{ $name }++; return $name; } sub unreserve { my $name = shift || ''; my $schema_obj_name = shift || ''; my ( $suffix ) = ( $name =~ s/(\W.*)$// ) ? $1 : ''; # also trap fields that don't begin with a letter return $name if !$reserved{ uc $name } && $name =~ /^[a-z]/i; if ( $schema_obj_name ) { ++$unreserve{"$schema_obj_name.$name"}; } else { ++$unreserve{"$name (table name)"}; } my $unreserve = sprintf '%s_', $name; return $unreserve.$suffix; } 1; =pod =head1 SEE ALSO SQL::Translator. =head1 AUTHORS Sam Angiuoli Eangiuoli@users.sourceforge.netE, Paul Harrington Eharringp@deshaw.comE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/YAML.pm0000644000000000000000000001371313473550070022456 0ustar00rootroot00000000000000package SQL::Translator::Producer::YAML; =head1 NAME SQL::Translator::Producer::YAML - A YAML producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new(producer => 'YAML'); =head1 DESCRIPTION This module uses YAML to serialize a schema to a string so that it can be saved to disk. Serializing a schema and then calling producers on the stored can realize significant performance gains when parsing takes a long time. =cut use strict; use warnings; our $VERSION = '1.60'; use YAML qw(Dump); sub produce { my $translator = shift; my $schema = $translator->schema; return Dump({ schema => { tables => { map { ($_->name => view_table($_)) } $schema->get_tables, }, views => { map { ($_->name => view_view($_)) } $schema->get_views, }, triggers => { map { ($_->name => view_trigger($_)) } $schema->get_triggers, }, procedures => { map { ($_->name => view_procedure($_)) } $schema->get_procedures, }, }, translator => { add_drop_table => $translator->add_drop_table, filename => $translator->filename, no_comments => $translator->no_comments, parser_args => $translator->parser_args, producer_args => $translator->producer_args, parser_type => $translator->parser_type, producer_type => $translator->producer_type, show_warnings => $translator->show_warnings, trace => $translator->trace, version => $translator->version, }, keys %{$schema->extra} ? ('extra' => { $schema->extra } ) : (), }); } sub view_table { my $table = shift; return { 'name' => $table->name, 'order' => $table->order, 'options' => $table->options || [], $table->comments ? ('comments' => [ $table->comments ] ) : (), 'constraints' => [ map { view_constraint($_) } $table->get_constraints ], 'indices' => [ map { view_index($_) } $table->get_indices ], 'fields' => { map { ($_->name => view_field($_)) } $table->get_fields }, keys %{$table->extra} ? ('extra' => { $table->extra } ) : (), }; } sub view_constraint { my $constraint = shift; return { 'deferrable' => scalar $constraint->deferrable, 'expression' => scalar $constraint->expression, 'fields' => [ map { ref $_ ? $_->name : $_ } $constraint->field_names ], 'match_type' => scalar $constraint->match_type, 'name' => scalar $constraint->name, 'options' => scalar $constraint->options, 'on_delete' => scalar $constraint->on_delete, 'on_update' => scalar $constraint->on_update, 'reference_fields' => [ map { ref $_ ? $_->name : $_ } $constraint->reference_fields ], 'reference_table' => scalar $constraint->reference_table, 'type' => scalar $constraint->type, keys %{$constraint->extra} ? ('extra' => { $constraint->extra } ) : (), }; } sub view_field { my $field = shift; return { 'order' => scalar $field->order, 'name' => scalar $field->name, 'data_type' => scalar $field->data_type, 'size' => [ $field->size ], 'default_value' => scalar $field->default_value, 'is_nullable' => scalar $field->is_nullable, 'is_primary_key' => scalar $field->is_primary_key, 'is_unique' => scalar $field->is_unique, $field->is_auto_increment ? ('is_auto_increment' => 1) : (), $field->comments ? ('comments' => [ $field->comments ]) : (), keys %{$field->extra} ? ('extra' => { $field->extra } ) : (), }; } sub view_procedure { my $procedure = shift; return { 'order' => scalar $procedure->order, 'name' => scalar $procedure->name, 'sql' => scalar $procedure->sql, 'parameters' => scalar $procedure->parameters, 'owner' => scalar $procedure->owner, 'comments' => scalar $procedure->comments, keys %{$procedure->extra} ? ('extra' => { $procedure->extra } ) : (), }; } sub view_trigger { my $trigger = shift; return { 'order' => scalar $trigger->order, 'name' => scalar $trigger->name, 'perform_action_when' => scalar $trigger->perform_action_when, 'database_events' => scalar $trigger->database_events, 'fields' => scalar $trigger->fields, 'on_table' => scalar $trigger->on_table, 'action' => scalar $trigger->action, (defined $trigger->scope ? ( 'scope' => scalar $trigger->scope, ) : ()), keys %{$trigger->extra} ? ('extra' => { $trigger->extra } ) : (), }; } sub view_view { my $view = shift; return { 'order' => scalar $view->order, 'name' => scalar $view->name, 'sql' => scalar $view->sql, 'fields' => scalar $view->fields, keys %{$view->extra} ? ('extra' => { $view->extra } ) : (), }; } sub view_index { my $index = shift; return { 'name' => scalar $index->name, 'type' => scalar $index->type, 'fields' => scalar $index->fields, 'options' => scalar $index->options, keys %{$index->extra} ? ('extra' => { $index->extra } ) : (), }; } 1; =head1 SEE ALSO SQL::Translator, YAML, http://www.yaml.org/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/JSON.pm0000644000000000000000000001367513473550070022474 0ustar00rootroot00000000000000package SQL::Translator::Producer::JSON; =head1 NAME SQL::Translator::Producer::JSON - A JSON producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new(producer => 'JSON'); =head1 DESCRIPTION This module serializes a schema to a JSON string. =cut use strict; use warnings; our $VERSION = '1.60'; use JSON::MaybeXS 'to_json'; sub produce { my $translator = shift; my $schema = $translator->schema; return to_json({ schema => { tables => { map { ($_->name => view_table($_)) } $schema->get_tables, }, views => { map { ($_->name => view_view($_)) } $schema->get_views, }, triggers => { map { ($_->name => view_trigger($_)) } $schema->get_triggers, }, procedures => { map { ($_->name => view_procedure($_)) } $schema->get_procedures, }, }, translator => { add_drop_table => $translator->add_drop_table, filename => $translator->filename, no_comments => $translator->no_comments, parser_args => $translator->parser_args, producer_args => $translator->producer_args, parser_type => $translator->parser_type, producer_type => $translator->producer_type, show_warnings => $translator->show_warnings, trace => $translator->trace, version => $translator->version, }, keys %{$schema->extra} ? ('extra' => { $schema->extra } ) : (), }, { allow_blessed => 1, allow_unknown => 1, %{$translator->producer_args}, }); } sub view_table { my $table = shift; return { 'name' => $table->name, 'order' => $table->order, 'options' => $table->options || [], $table->comments ? ('comments' => [ $table->comments ] ) : (), 'constraints' => [ map { view_constraint($_) } $table->get_constraints ], 'indices' => [ map { view_index($_) } $table->get_indices ], 'fields' => { map { ($_->name => view_field($_)) } $table->get_fields }, keys %{$table->extra} ? ('extra' => { $table->extra } ) : (), }; } sub view_constraint { my $constraint = shift; return { 'deferrable' => scalar $constraint->deferrable, 'expression' => scalar $constraint->expression, 'fields' => [ map { ref $_ ? $_->name : $_ } $constraint->field_names ], 'match_type' => scalar $constraint->match_type, 'name' => scalar $constraint->name, 'options' => scalar $constraint->options, 'on_delete' => scalar $constraint->on_delete, 'on_update' => scalar $constraint->on_update, 'reference_fields' => [ map { ref $_ ? $_->name : $_ } $constraint->reference_fields ], 'reference_table' => scalar $constraint->reference_table, 'type' => scalar $constraint->type, keys %{$constraint->extra} ? ('extra' => { $constraint->extra } ) : (), }; } sub view_field { my $field = shift; return { 'order' => scalar $field->order, 'name' => scalar $field->name, 'data_type' => scalar $field->data_type, 'size' => [ $field->size ], 'default_value' => scalar $field->default_value, 'is_nullable' => scalar $field->is_nullable, 'is_primary_key' => scalar $field->is_primary_key, 'is_unique' => scalar $field->is_unique, $field->is_auto_increment ? ('is_auto_increment' => 1) : (), $field->comments ? ('comments' => [ $field->comments ]) : (), keys %{$field->extra} ? ('extra' => { $field->extra } ) : (), }; } sub view_procedure { my $procedure = shift; return { 'order' => scalar $procedure->order, 'name' => scalar $procedure->name, 'sql' => scalar $procedure->sql, 'parameters' => scalar $procedure->parameters, 'owner' => scalar $procedure->owner, 'comments' => scalar $procedure->comments, keys %{$procedure->extra} ? ('extra' => { $procedure->extra } ) : (), }; } sub view_trigger { my $trigger = shift; return { 'order' => scalar $trigger->order, 'name' => scalar $trigger->name, 'perform_action_when' => scalar $trigger->perform_action_when, 'database_events' => scalar $trigger->database_events, 'fields' => scalar $trigger->fields, 'on_table' => scalar $trigger->on_table, 'action' => scalar $trigger->action, (defined $trigger->scope ? ( 'scope' => scalar $trigger->scope, ) : ()), keys %{$trigger->extra} ? ('extra' => { $trigger->extra } ) : (), }; } sub view_view { my $view = shift; return { 'order' => scalar $view->order, 'name' => scalar $view->name, 'sql' => scalar $view->sql, 'fields' => scalar $view->fields, keys %{$view->extra} ? ('extra' => { $view->extra } ) : (), }; } sub view_index { my $index = shift; return { 'name' => scalar $index->name, 'type' => scalar $index->type, 'fields' => scalar $index->fields, 'options' => scalar $index->options, keys %{$index->extra} ? ('extra' => { $index->extra } ) : (), }; } 1; =head1 SEE ALSO SQL::Translator, JSON::MaybeXS, http://www.json.org/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. Jon Jensen Ejonj@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Dumper.pm0000644000000000000000000001746613473550070023161 0ustar00rootroot00000000000000package SQL::Translator::Producer::Dumper; =head1 NAME SQL::Translator::Producer::Dumper - SQL Dumper producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator::Producer::Dumper; Options: db_user Database username db_password Database password dsn DSN for DBI mysql_loadfile Create MySQL's LOAD FILE syntax instead of INSERTs skip=t1[,t2] Skip tables in comma-separated list skiplike=regex Skip tables in comma-separated list =head1 DESCRIPTION This producer creates a Perl script that can connect to a database and dump the data as INSERT statements (a la mysqldump) or as a file suitable for MySQL's LOAD DATA command. If you enable "add-truncate" or specify tables to "skip" (also using the "skiplike" regular expression) then the generated dumper script will leave out those tables. However, these will also be options in the generated dumper, so you can wait to specify these options when you dump your database. The database username, password, and DSN can be hardcoded into the generated script, or part of the DSN can be intuited from the "database" argument. =cut use strict; use warnings; use Config; use SQL::Translator; use File::Temp 'tempfile'; use Template; use Data::Dumper; our $VERSION = '1.60'; sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $add_truncate = $args->{'add_truncate'} || 0; my $skip = $args->{'skip'} || ''; my $skiplike = $args->{'skiplike'} || ''; my $db_user = $args->{'db_user'} || 'db_user'; my $db_pass = $args->{'db_password'} || 'db_pass'; my $parser_name = $t->parser_type; my %skip = map { $_, 1 } map { s/^\s+|\s+$//; $_ } split (/,/, $skip); my $sqlt_version = $t->version; if ( $parser_name =~ /Parser::(\w+)$/ ) { $parser_name = $1 } my %type_to_dbd = ( MySQL => 'mysql', Oracle => 'Oracle', PostgreSQL => 'Pg', SQLite => 'SQLite', Sybase => 'Sybase', ); my $dbd = $type_to_dbd{ $parser_name } || 'DBD'; my $dsn = $args->{'dsn'} || "dbi:$dbd:"; if ( $dbd eq 'Pg' && ! $args->{'dsn'} ) { $dsn .= 'dbname=dbname;host=hostname'; } elsif ( $dbd eq 'Oracle' && ! $args->{'dsn'} ) { $db_user = "$db_user/$db_pass@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)" . "(HOST=hostname)(PORT=1521))(CONNECT_DATA=(SID=sid)))"; $db_pass = ''; } elsif ( $dbd eq 'mysql' && ! $args->{'dsn'} ) { $dsn .= 'dbname'; } my $template = Template->new; my $template_text = template(); my $out; $template->process( \$template_text, { translator => $t, schema => $schema, db_user => $db_user, db_pass => $db_pass, dsn => $dsn, perl => $Config{'startperl'}, skip => \%skip, skiplike => $skiplike, }, \$out ) or die $template->error; return $out; } sub template { # # Returns the template to be processed by Template Toolkit # return <<'EOF'; [% perl || '#!/usr/bin/perl' %] [% USE date %] # # Generated by SQL::Translator [% translator.version %] # [% date.format( date.now, "%Y-%m-%d" ) %] # For more info, see http://sqlfairy.sourceforge.net/ # use strict; use Cwd; use DBI; use Getopt::Long; use File::Spec::Functions 'catfile'; my ( $help, $add_truncate, $skip, $skiplike, $no_comments, $takelike, $mysql_loadfile ); GetOptions( 'add-truncate' => \$add_truncate, 'h|help' => \$help, 'no-comments' => \$no_comments, 'mysql-loadfile' => \$mysql_loadfile, 'skip:s' => \$skip, 'skiplike:s' => \$skiplike, 'takelike:s' => \$takelike, ); if ( $help ) { print <<"USAGE"; Usage: $0 [options] > dump.sql Options: -h|--help Show help and exit --add-truncate Add "TRUNCATE TABLE" statements --mysql-loadfile Create MySQL's LOAD FILE syntax, not INSERTs --no-comments Suppress comments --skip=t1[,t2] Comma-separated list of tables to skip --skiplike=regex Regular expression of table names to skip --takelike=regex Regular expression of table names to take USAGE exit(0); } $no_comments = 1 if $mysql_loadfile; [%- SET table_defs = []; SET max_field = 0; FOREACH table IN schema.get_tables; SET table_name = table.name; NEXT IF skip.$table_name; NEXT IF skiplike AND table_name.match("(?:$skiplike)"); SET field_names = []; SET types = {}; FOR field IN table.get_fields; field_name = field.name; fname_len = field.name.length; max_field = fname_len > max_field ? fname_len : max_field; types.$field_name = field.data_type.match( '(char|str|long|text|enum|date)' ) ? 'string' : 'number'; field_names.push( field_name ); END; table_defs.push({ name => table_name, types => types, fields => field_names, }); END -%] my $db = DBI->connect( '[% dsn %]', '[% db_user %]', '[% db_pass %]', { RaiseError => 1 } ); my %skip = map { $_, 1 } map { s/^\s+|\s+$//; $_ } split (/,/, $skip); my @tables = ( [%- FOREACH t IN table_defs %] { table_name => '[% t.name %]', fields => [ qw/ [% t.fields.join(' ') %] / ], types => { [%- FOREACH fname IN t.types.keys %] '[% fname %]' => '[% t.types.$fname %]', [%- END %] }, }, [%- END %] ); for my $table ( @tables ) { my $table_name = $table->{'table_name'}; next if $skip{ $table_name }; next if $skiplike && $table_name =~ qr/$skiplike/; next if $takelike && $table_name !~ qr/$takelike/; my ( $out_fh, $outfile ); if ( $mysql_loadfile ) { $outfile = catfile( cwd(), "$table_name.txt" ); open $out_fh, ">$outfile" or die "Can't write LOAD FILE to '$table_name': $!\n"; } print "--\n-- Data for table '$table_name'\n--\n" unless $no_comments; if ( $add_truncate ) { print "TRUNCATE TABLE $table_name;\n"; } my $sql = 'select ' . join(', ', @{ $table->{'fields'} } ) . " from $table_name" ; my $sth = $db->prepare( $sql ); $sth->execute; while ( my $rec = $sth->fetchrow_hashref ) { my @vals; for my $fld ( @{ $table->{'fields'} } ) { my $val = $rec->{ $fld }; if ( $table->{'types'}{ $fld } eq 'string' ) { if ( defined $val ) { $val =~ s/'/\\'/g; $val = qq['$val'] } else { $val = qq['']; } } else { $val = defined $val ? $val : $mysql_loadfile ? '\N' : 'NULL'; } push @vals, $val; } if ( $mysql_loadfile ) { print $out_fh join("\t", @vals), "\n"; } else { print "INSERT INTO $table_name (". join(', ', @{ $table->{'fields'} }) . ') VALUES (', join(', ', @vals), ");\n"; } } if ( $out_fh ) { print "LOAD DATA INFILE '$outfile' INTO TABLE $table_name ", "FIELDS OPTIONALLY ENCLOSED BY '\\'';\n"; close $out_fh or die "Can't close filehandle: $!\n"; } else { print "\n"; } } EOF } 1; # ------------------------------------------------------------------- # To create a little flower is the labour of ages. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/Oracle.pm0000644000000000000000000006372613473550070023132 0ustar00rootroot00000000000000package SQL::Translator::Producer::Oracle; =head1 NAME SQL::Translator::Producer::Oracle - Oracle SQL producer =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'Oracle' ); print $translator->translate( $file ); =head1 DESCRIPTION Creates an SQL DDL suitable for Oracle. =head1 producer_args =over =item delay_constraints This option remove the primary key and other key constraints from the CREATE TABLE statement and adds ALTER TABLEs at the end with it. =item quote_field_names Controls whether quotes are being used around column names in generated DDL. =item quote_table_names Controls whether quotes are being used around table, sequence and trigger names in generated DDL. =back =head1 NOTES =head2 Autoincremental primary keys This producer uses sequences and triggers to autoincrement primary key columns, if necessary. SQLPlus and DBI expect a slightly different syntax of CREATE TRIGGER statement. You might have noticed that this producer returns a scalar containing all statements concatenated by newlines or an array of single statements depending on the context (scalar, array) it has been called in. SQLPlus expects following trigger syntax: CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; / Whereas if you want to create the same trigger using L, you need to omit the last slash: my $dbh = DBI->connect('dbi:Oracle:mysid', 'scott', 'tiger'); $dbh->do(" CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; "); If you call this producer in array context, we expect you want to process the returned array of statements using L like L does. To get this working we removed the slash in those statements in version 0.09002 of L when called in array context. In scalar context the slash will be still there to ensure compatibility with SQLPlus. =cut use strict; use warnings; our ( $DEBUG, $WARN ); our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; use base 'SQL::Translator::Producer'; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(header_comment); my %translate = ( # # MySQL types # bigint => 'number', double => 'float', decimal => 'number', float => 'float', int => 'number', integer => 'number', mediumint => 'number', smallint => 'number', tinyint => 'number', char => 'char', varchar => 'varchar2', tinyblob => 'blob', blob => 'blob', mediumblob => 'blob', longblob => 'blob', tinytext => 'varchar2', text => 'clob', longtext => 'clob', mediumtext => 'clob', enum => 'varchar2', set => 'varchar2', date => 'date', datetime => 'date', time => 'date', timestamp => 'date', year => 'date', # # PostgreSQL types # numeric => 'number', 'double precision' => 'number', serial => 'number', bigserial => 'number', money => 'number', character => 'char', 'character varying' => 'varchar2', bytea => 'BLOB', interval => 'number', boolean => 'number', point => 'number', line => 'number', lseg => 'number', box => 'number', path => 'number', polygon => 'number', circle => 'number', cidr => 'number', inet => 'varchar2', macaddr => 'varchar2', bit => 'number', 'bit varying' => 'number', # # Oracle types # number => 'number', varchar2 => 'varchar2', long => 'clob', ); # # Oracle 8/9 max size of data types from: # http://www.ss64.com/orasyntax/datatypes.html # my %max_size = ( char => 2000, float => 126, nchar => 2000, nvarchar2 => 4000, number => [ 38, 127 ], raw => 2000, varchar => 4000, # only synonym for varchar2 varchar2 => 4000, ); my $max_id_length = 30; my %used_identifiers = (); my %global_names; my %truncated; # Quote used to escape table, field, sequence and trigger names my $quote_char = '"'; sub produce { my $translator = shift; $DEBUG = $translator->debug; $WARN = $translator->show_warnings || 0; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $oracle_version = $translator->producer_args->{oracle_version} || 0; my $delay_constraints = $translator->producer_args->{delay_constraints}; my ($output, $create, @table_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs); $create .= header_comment unless ($no_comments); my $qt = 1 if $translator->quote_table_names; my $qf = 1 if $translator->quote_field_names; if ( $translator->parser_type =~ /mysql/i ) { $create .= "-- We assume that default NLS_DATE_FORMAT has been changed\n". "-- but we set it here anyway to be self-consistent.\n" unless $no_comments; $create .= "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\n\n"; } for my $table ( $schema->get_tables ) { my ( $table_def, $fk_def, $trigger_def, $index_def, $constraint_def ) = create_table( $table, { add_drop_table => $add_drop_table, show_warnings => $WARN, no_comments => $no_comments, delay_constraints => $delay_constraints, quote_table_names => $qt, quote_field_names => $qf, } ); push @table_defs, @$table_def; push @fk_defs, @$fk_def; push @trigger_defs, @$trigger_def; push @index_defs, @$index_def; push @constraint_defs, @$constraint_def; } my (@view_defs); foreach my $view ( $schema->get_views ) { my ( $view_def ) = create_view( $view, { add_drop_view => $add_drop_table, quote_table_names => $qt, } ); push @view_defs, @$view_def; } if (wantarray) { return defined $create ? $create : (), @table_defs, @view_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs; } else { $create .= join (";\n\n", @table_defs, @view_defs, @fk_defs, @index_defs, @constraint_defs); $create .= ";\n\n"; # If wantarray is not set we have to add "/" in this statement # DBI->do() needs them omitted # triggers may NOT end with a semicolon but a "/" instead $create .= "$_/\n\n" for @trigger_defs; return $create; } } sub create_table { my ($table, $options) = @_; my $qt = $options->{quote_table_names}; my $qf = $options->{quote_field_names}; my $table_name = $table->name; my $table_name_q = quote($table_name,$qt); my $item = ''; my $drop; my (@create, @field_defs, @constraint_defs, @fk_defs, @trigger_defs); push @create, "--\n-- Table: $table_name\n--" unless $options->{no_comments}; push @create, qq[DROP TABLE $table_name_q CASCADE CONSTRAINTS] if $options->{add_drop_table}; my ( %field_name_scope, @field_comments ); for my $field ( $table->get_fields ) { my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($field, $options, \%field_name_scope); push @create, @$field_create if ref $field_create; push @field_defs, @$field_defs if ref $field_defs; push @trigger_defs, @$trigger_defs if ref $trigger_defs; push @field_comments, @$field_comments if ref $field_comments; } # # Table options # my @table_options; for my $opt ( $table->options ) { if ( ref $opt eq 'HASH' ) { my ( $key, $value ) = each %$opt; if ( ref $value eq 'ARRAY' ) { push @table_options, "$key\n(\n". join ("\n", map { " $_->[0]\t$_->[1]" } map { [ each %$_ ] } @$value )."\n)"; } elsif ( !defined $value ) { push @table_options, $key; } else { push @table_options, "$key $value"; } } } # # Table constraints # for my $c ( $table->get_constraints ) { my $name = $c->name || ''; my @fields = map { quote($_,$qf) } $c->fields; my @rfields = map { quote($_,$qf) } $c->reference_fields; next if !@fields && $c->type ne CHECK_C; if ( $c->type eq PRIMARY_KEY ) { # create a name if delay_constraints $name ||= mk_name( $table_name, 'pk' ) if $options->{delay_constraints}; $name = quote($name,$qf); push @constraint_defs, ($name ? "CONSTRAINT $name " : '') . 'PRIMARY KEY (' . join( ', ', @fields ) . ')'; } elsif ( $c->type eq UNIQUE ) { # Don't create UNIQUE constraints identical to the primary key if ( my $pk = $table->primary_key ) { my $u_fields = join(":", @fields); my $pk_fields = join(":", $pk->fields); next if $u_fields eq $pk_fields; } if ($name) { # Force prepend of table_name as ORACLE doesn't allow duplicate # CONSTRAINT names even for different tables (ORA-02264) $name = mk_name( "${table_name}_$name", 'u' ) unless $name =~ /^$table_name/; } else { $name = mk_name( $table_name, 'u' ); } $name = quote($name, $qf); for my $f ( $c->fields ) { my $field_def = $table->get_field( $f ) or next; my $dtype = $translate{ ref $field_def->data_type eq "ARRAY" ? $field_def->data_type->[0] : $field_def->data_type} or next; if ( $WARN && $dtype =~ /clob/i ) { warn "Oracle will not allow UNIQUE constraints on " . "CLOB field '" . $field_def->table->name . '.' . $field_def->name . ".'\n" } } push @constraint_defs, "CONSTRAINT $name UNIQUE " . '(' . join( ', ', @fields ) . ')'; } elsif ( $c->type eq CHECK_C ) { $name ||= mk_name( $name || $table_name, 'ck' ); $name = quote($name, $qf); my $expression = $c->expression || ''; push @constraint_defs, "CONSTRAINT $name CHECK ($expression)"; } elsif ( $c->type eq FOREIGN_KEY ) { $name = mk_name( join('_', $table_name, $c->fields). '_fk' ); $name = quote($name, $qf); my $on_delete = uc ($c->on_delete || ''); my $def = "CONSTRAINT $name FOREIGN KEY "; if ( @fields ) { $def .= '(' . join( ', ', @fields ) . ')'; } my $ref_table = quote($c->reference_table,$qt); $def .= " REFERENCES $ref_table"; if ( @rfields ) { $def .= ' (' . join( ', ', @rfields ) . ')'; } if ( $c->match_type ) { $def .= ' MATCH ' . ( $c->match_type =~ /full/i ) ? 'FULL' : 'PARTIAL'; } if ( $on_delete && $on_delete ne "RESTRICT") { $def .= ' ON DELETE '.$c->on_delete; } # disabled by plu 2007-12-29 - doesn't exist for oracle #if ( $c->on_update ) { # $def .= ' ON UPDATE '. $c->on_update; #} push @fk_defs, sprintf("ALTER TABLE %s ADD %s", $table_name_q, $def); } } # # Index Declarations # my @index_defs = (); for my $index ( $table->get_indices ) { my $index_name = $index->name || ''; my $index_type = $index->type || NORMAL; my @fields = map { quote($_, $qf) } $index->fields; next unless @fields; my @index_options; for my $opt ( $index->options ) { if ( ref $opt eq 'HASH' ) { my ( $key, $value ) = each %$opt; if ( ref $value eq 'ARRAY' ) { push @table_options, "$key\n(\n". join ("\n", map { " $_->[0]\t$_->[1]" } map { [ each %$_ ] } @$value )."\n)"; } elsif ( !defined $value ) { push @index_options, $key; } else { push @index_options, "$key $value"; } } } my $index_options = @index_options ? "\n".join("\n", @index_options) : ''; if ( $index_type eq PRIMARY_KEY ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, 'pk' ); $index_name = quote($index_name, $qf); push @field_defs, 'CONSTRAINT '.$index_name.' PRIMARY KEY '. '(' . join( ', ', @fields ) . ')'; } elsif ( $index_type eq NORMAL ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, $index_name || 'i' ); $index_name = quote($index_name, $qf); push @index_defs, "CREATE INDEX $index_name on $table_name_q (". join( ', ', @fields ). ")$index_options"; } elsif ( $index_type eq UNIQUE ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, $index_name || 'i' ); $index_name = quote($index_name, $qf); push @index_defs, "CREATE UNIQUE INDEX $index_name on $table_name_q (". join( ', ', @fields ). ")$index_options"; } else { warn "Unknown index type ($index_type) on table $table_name.\n" if $WARN; } } if ( my @table_comments = $table->comments ) { for my $comment ( @table_comments ) { next unless $comment; $comment = __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON TABLE $table_name_q is\n $comment" unless $options->{no_comments}; } } my $table_options = @table_options ? "\n".join("\n", @table_options) : ''; push @create, "CREATE TABLE $table_name_q (\n" . join( ",\n", map { " $_" } @field_defs, ($options->{delay_constraints} ? () : @constraint_defs) ) . "\n)$table_options"; @constraint_defs = map { "ALTER TABLE $table_name_q ADD $_" } @constraint_defs; if ( $WARN ) { if ( %truncated ) { warn "Truncated " . keys( %truncated ) . " names:\n"; warn "\t" . join( "\n\t", sort keys %truncated ) . "\n"; } } return \@create, \@fk_defs, \@trigger_defs, \@index_defs, ($options->{delay_constraints} ? \@constraint_defs : []); } sub alter_field { my ($from_field, $to_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($to_field, $options, {}); # Fix ORA-01442 if ($to_field->is_nullable && !$from_field->is_nullable) { die 'Cannot remove NOT NULL from table field'; } elsif (!$from_field->is_nullable && !$to_field->is_nullable) { @$field_defs = map { s/ NOT NULL//; $_} @$field_defs; } my $table_name = quote($to_field->table->name,$qt); return 'ALTER TABLE '.$table_name.' MODIFY ( '.join('', @$field_defs).' )'; } sub add_field { my ($new_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($new_field, $options, {}); my $table_name = quote($new_field->table->name,$qt); my $out = sprintf('ALTER TABLE %s ADD ( %s )', $table_name, join('', @$field_defs)); return $out; } sub create_field { my ($field, $options, $field_name_scope) = @_; my $qf = $options->{quote_field_names}; my $qt = $options->{quote_table_names}; my (@create, @field_defs, @trigger_defs, @field_comments); my $table_name = $field->table->name; my $table_name_q = quote($table_name, $qt); # # Field name # my $field_name = mk_name( $field->name, '', $field_name_scope, 1 ); my $field_name_q = quote($field_name, $qf); my $field_def = quote($field_name, $qf); $field->name( $field_name ); # # Datatype # my $check; my $data_type = lc $field->data_type; my @size = $field->size; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join( ', ', map { __PACKAGE__->_quote_string($_) } @$list ); if ( $data_type eq 'enum' ) { $check = "CHECK ($field_name_q IN ($commalist))"; $data_type = 'varchar2'; } elsif ( $data_type eq 'set' ) { # XXX add a CHECK constraint maybe # (trickier and slower, than enum :) $data_type = 'varchar2'; } else { if (defined $translate{ $data_type }) { if (ref $translate{ $data_type } eq "ARRAY") { ($data_type,$size[0]) = @{$translate{ $data_type }}; } else { $data_type = $translate{ $data_type }; } } $data_type ||= 'varchar2'; } # ensure size is not bigger than max size oracle allows for data type if ( defined $max_size{$data_type} ) { for ( my $i = 0 ; $i < scalar @size ; $i++ ) { my $max = ref( $max_size{$data_type} ) eq 'ARRAY' ? $max_size{$data_type}->[$i] : $max_size{$data_type}; $size[$i] = $max if $size[$i] > $max; } } # # Fixes ORA-02329: column of datatype LOB cannot be # unique or a primary key # if ( $data_type eq 'clob' && $field->is_primary_key ) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a primary key, changing to VARCHAR2\n" if $WARN; } if ( $data_type eq 'clob' && $field->is_unique ) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a unique key, changing to VARCHAR2\n" if $WARN; } # # Fixes ORA-00907: missing right parenthesis # if ( $data_type =~ /(date|clob)/i ) { undef @size; } # # Fixes ORA-00906: missing right parenthesis # if size is 0 or undefined # for (qw/varchar2/) { if ( $data_type =~ /^($_)$/i ) { $size[0] ||= $max_size{$_}; } } $field_def .= " $data_type"; if ( defined $size[0] && $size[0] > 0 ) { $field_def .= '(' . join( ',', @size ) . ')'; } # # Default value # my $default = $field->default_value; if ( defined $default ) { # # Wherein we try to catch a string being used as # a default value for a numerical field. If "true/false," # then sub "1/0," otherwise just test the truthity of the # argument and use that (naive?). # if (ref $default and defined $$default) { $default = $$default; } elsif (ref $default) { $default = 'NULL'; } elsif ( $data_type =~ /^number$/i && $default !~ /^-?\d+$/ && $default !~ m/null/i ) { if ( $default =~ /^true$/i ) { $default = "'1'"; } elsif ( $default =~ /^false$/i ) { $default = "'0'"; } else { $default = $default ? "'1'" : "'0'"; } } elsif ( $data_type =~ /date/ && ( $default eq 'current_timestamp' || $default eq 'now()' ) ) { $default = 'SYSDATE'; } else { $default = $default =~ m/null/i ? 'NULL' : __PACKAGE__->_quote_string($default); } $field_def .= " DEFAULT $default", } # # Not null constraint # unless ( $field->is_nullable ) { $field_def .= ' NOT NULL'; } $field_def .= " $check" if $check; # # Auto_increment # if ( $field->is_auto_increment ) { my $base_name = $table_name . "_". $field_name; my $seq_name = quote(mk_name( $base_name, 'sq' ),$qt); my $trigger_name = quote(mk_name( $base_name, 'ai' ),$qt); push @create, qq[DROP SEQUENCE $seq_name] if $options->{add_drop_table}; push @create, "CREATE SEQUENCE $seq_name"; my $trigger = "CREATE OR REPLACE TRIGGER $trigger_name\n" . "BEFORE INSERT ON $table_name_q\n" . "FOR EACH ROW WHEN (\n" . " new.$field_name_q IS NULL". " OR new.$field_name_q = 0\n". ")\n". "BEGIN\n" . " SELECT $seq_name.nextval\n" . " INTO :new." . $field_name_q."\n" . " FROM dual;\n" . "END;\n"; push @trigger_defs, $trigger; } if ( lc $field->data_type eq 'timestamp' ) { my $base_name = $table_name . "_". $field_name; my $trig_name = quote(mk_name( $base_name, 'ts' ), $qt); my $trigger = "CREATE OR REPLACE TRIGGER $trig_name\n". "BEFORE INSERT OR UPDATE ON $table_name_q\n". "FOR EACH ROW WHEN (new.$field_name_q IS NULL)\n". "BEGIN\n". " SELECT sysdate INTO :new.$field_name_q FROM dual;\n". "END;\n"; push @trigger_defs, $trigger; } push @field_defs, $field_def; if ( my $comment = $field->comments ) { $comment =~ __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON COLUMN $table_name_q.$field_name_q is\n $comment;" unless $options->{no_comments}; } return \@create, \@field_defs, \@trigger_defs, \@field_comments; } sub create_view { my ($view, $options) = @_; my $qt = $options->{quote_table_names}; my $view_name = quote($view->name,$qt); my $extra = $view->extra; my $view_type = 'VIEW'; my $view_options = ''; if ( my $materialized = $extra->{materialized} ) { $view_type = 'MATERIALIZED VIEW'; $view_options .= ' '.$materialized; } my @create; push @create, qq[DROP $view_type $view_name] if $options->{add_drop_view}; push @create, sprintf("CREATE %s %s%s AS\n%s", $view_type, $view_name, $view_options, $view->sql); return \@create; } sub mk_name { my $basename = shift || ''; my $type = shift || ''; $type = '' if $type =~ /^\d/; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? $max_id_length - (length($type) + 1) : $max_id_length; $basename = substr( $basename, 0, $max_name ) if length( $basename ) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ( $basename ne $basename_orig and $critical ) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n" if $WARN; $truncated{ $basename_orig } = $name; } $scope ||= \%global_names; if ( my $prev = $scope->{ $name } ) { my $name_orig = $name; substr($name, $max_id_length - 2) = "" if length( $name ) >= $max_id_length - 1; $name .= sprintf( "%02d", $prev++ ); warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{ $name_orig }++; } $scope->{ $name }++; return $name; } 1; sub quote { my ($name, $q) = @_; return $name unless $q && $name; $name =~ s/\Q$quote_char/$quote_char$quote_char/g; return "$quote_char$name$quote_char"; } # ------------------------------------------------------------------- # All bad art is the result of good intentions. # Oscar Wilde # ------------------------------------------------------------------- =pod =head1 CREDITS Mad props to Tim Bunce for much of the logic stolen from his "mysql2ora" script. =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Alexander Hartmaier Eabraxxa@cpan.orgE, Fabien Wernli Efaxmodem@cpan.orgE. =head1 SEE ALSO SQL::Translator, DDL::Oracle, mysql2ora. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/HTML.pm0000644000000000000000000002470113473550070022457 0ustar00rootroot00000000000000package SQL::Translator::Producer::HTML; use strict; use warnings; use Data::Dumper; our $VERSION = '1.60'; our $NAME = __PACKAGE__; our $NOWRAP = 0 unless defined $NOWRAP; our $NOLINKTABLE = 0 unless defined $NOLINKTABLE; # Emit XHTML by default $CGI::XHTML = $CGI::XHTML = 42; use SQL::Translator::Schema::Constants; # ------------------------------------------------------------------- # Main entry point. Returns a string containing HTML. # ------------------------------------------------------------------- sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $schema_name = $schema->name || 'Schema'; my $title = $args->{'title'} || "Description of $schema_name"; my $wrap = ! (defined $args->{'nowrap'} ? $args->{'nowrap'} : $NOWRAP); my $linktable = ! (defined $args->{'nolinktable'} ? $args->{'nolinktable'} : $NOLINKTABLE); my %stylesheet = defined $args->{'stylesheet'} ? ( -style => { src => $args->{'stylesheet'} } ) : ( ); my @html; my $q = defined $args->{'pretty'} ? do { require CGI::Pretty; import CGI::Pretty; CGI::Pretty->new } : do { require CGI; import CGI; CGI->new }; my ($table, @table_names); if ($wrap) { push @html, $q->start_html({ -title => $title, %stylesheet, -meta => { generator => $NAME }, }), $q->h1({ -class => 'SchemaDescription' }, $title), $q->hr; } @table_names = grep { length $_->name } $schema->get_tables; if ($linktable) { # Generate top menu, with links to full table information my $count = scalar(@table_names); $count = sprintf "%d table%s", $count, $count == 1 ? '' : 's'; # Leading table of links push @html, $q->comment("Table listing ($count)"), $q->a({ -name => 'top' }), $q->start_table({ -width => '100%', -class => 'LinkTable'}), # XXX This needs to be colspan="$#{$table->fields}" class="LinkTableHeader" $q->Tr( $q->td({ -class => 'LinkTableCell' }, $q->h2({ -class => 'LinkTableTitle' }, 'Tables' ), ), ); for my $table (@table_names) { my $table_name = $table->name; push @html, $q->comment("Start link to table '$table_name'"), $q->Tr({ -class => 'LinkTableRow' }, $q->td({ -class => 'LinkTableCell' }, qq[$table_name] ) ), $q->comment("End link to table '$table_name'"); } push @html, $q->end_table; } for my $table ($schema->get_tables) { my $table_name = $table->name or next; my @fields = $table->get_fields or next; push @html, $q->comment("Starting table '$table_name'"), $q->a({ -name => $table_name }), $q->table({ -class => 'TableHeader', -width => '100%' }, $q->Tr({ -class => 'TableHeaderRow' }, $q->td({ -class => 'TableHeaderCell' }, $q->h3($table_name)), qq[], $q->td({ -class => 'TableHeaderCell', -align => 'right' }, qq[Top] ) ) ); if ( my @comments = map { $_ ? $_ : () } $table->comments ) { push @html, $q->b("Comments:"), $q->br, $q->em(map { $q->br, $_ } @comments); } # # Fields # push @html, $q->start_table({ -border => 1 }), $q->Tr( $q->th({ -class => 'FieldHeader' }, [ 'Field Name', 'Data Type', 'Size', 'Default Value', 'Other', 'Foreign Key' ] ) ); my $i = 0; for my $field ( @fields ) { my $name = $field->name || ''; $name = qq[$name]; my $data_type = $field->data_type || ''; my $size = defined $field->size ? $field->size : ''; my $default = defined $field->default_value ? $field->default_value : ''; my $comment = $field->comments || ''; my $fk = ''; if ($field->is_foreign_key) { my $c = $field->foreign_key_reference; my $ref_table = $c->reference_table || ''; my $ref_field = ($c->reference_fields)[0] || ''; $fk = qq[$ref_table.$ref_field]; } my @other = (); push @other, 'PRIMARY KEY' if $field->is_primary_key; push @other, 'UNIQUE' if $field->is_unique; push @other, 'NOT NULL' unless $field->is_nullable; push @other, $comment if $comment; my $class = $i++ % 2 ? 'even' : 'odd'; push @html, $q->Tr( { -class => "tr-$class" }, $q->td({ -class => "FieldCellName" }, $name), $q->td({ -class => "FieldCellType" }, $data_type), $q->td({ -class => "FieldCellSize" }, $size), $q->td({ -class => "FieldCellDefault" }, $default), $q->td({ -class => "FieldCellOther" }, join(', ', @other)), $q->td({ -class => "FieldCellFK" }, $fk), ); } push @html, $q->end_table; # # Indices # if ( my @indices = $table->get_indices ) { push @html, $q->h3('Indices'), $q->start_table({ -border => 1 }), $q->Tr({ -class => 'IndexRow' }, $q->th([ 'Name', 'Fields' ]) ); for my $index ( @indices ) { my $name = $index->name || ''; my $fields = join( ', ', $index->fields ) || ''; push @html, $q->Tr({ -class => 'IndexCell' }, $q->td( [ $name, $fields ] ) ); } push @html, $q->end_table; } # # Constraints # my @constraints = grep { $_->type ne PRIMARY_KEY } $table->get_constraints; if ( @constraints ) { push @html, $q->h3('Constraints'), $q->start_table({ -border => 1 }), $q->Tr({ -class => 'IndexRow' }, $q->th([ 'Type', 'Fields' ]) ); for my $c ( @constraints ) { my $type = $c->type || ''; my $fields = join( ', ', $c->fields ) || ''; push @html, $q->Tr({ -class => 'IndexCell' }, $q->td( [ $type, $fields ] ) ); } push @html, $q->end_table; } push @html, $q->hr; } my $sqlt_version = $t->version; if ($wrap) { push @html, qq[Created by ], qq[SQL::Translator $sqlt_version], $q->end_html; } return join "\n", @html; } 1; # ------------------------------------------------------------------- # Always be ready to speak your mind, # and a base man will avoid you. # William Blake # ------------------------------------------------------------------- =head1 NAME SQL::Translator::Producer::HTML - HTML producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator::Producer::HTML; =head1 DESCRIPTION Creates an HTML document describing the tables. The HTML produced is composed of a number of tables: =over 4 =item Links A link table sits at the top of the output, and contains anchored links to elements in the rest of the document. If the I producer arg is present, then this table is not produced. =item Tables Each table in the schema has its own HTML table. The top row is a row of EthE elements, with a class of B; these elements are I, I, I, I, I and I. Each successive row describes one field in the table, and has a class of B, where $item id corresponds to the label of the column. For example: id int 11 PRIMARY KEY, NOT NULL foo varchar 255 NOT NULL updated timestamp 0 =back Unless the I producer arg is present, the HTML will be enclosed in a basic HTML header and footer. If the I producer arg is present, the generated HTML will be nicely spaced and human-readable. Otherwise, it will have very little insignificant whitespace and be generally smaller. =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Darren Chamberlain Edarren@cpan.orgE. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/TT/0000755000000000000000000000000013473557372021714 5ustar00rootroot00000000000000SQL-Translator-1.60/lib/SQL/Translator/Producer/TT/Table.pm0000644000000000000000000002147013473550070023271 0ustar00rootroot00000000000000package SQL::Translator::Producer::TT::Table; =pod =head1 NAME SQL::Translator::Producer::TT::Table - Produces output using the Template Toolkit from a SQL schema, per table. =head1 SYNOPSIS # Normal STDOUT version # my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'TT::Table', producer_args => { tt_table => 'foo_table.tt', }, ); print $translator->translate; # To generate a file per table # my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'TT::Table', producer_args => { tt_table => 'foo_table.tt.html', mk_files => 1, mk_files_base => "./doc/tables", mk_file_ext => ".html", on_exists => "replace", }, ); # # ./doc/tables/ now contains the templated tables as $tablename.html # =head1 DESCRIPTION Produces schema output using a given Template Tookit template, processing that template for each table in the schema. Optionally allows you to write the result for each table to a separate file. It needs one additional producer_arg of C which is the file name of the template to use. This template will be passed a template var of C, which is the current L table we are producing, which you can then use to walk the schema via the methods documented in that module. You also get C as a shortcut to the L for the table and C, the L object for this parse in case you want to get access to any of the options etc set here. Here's a brief example of what the template could look like: [% table.name %] ================ [% FOREACH field = table.get_fields %] [% field.name %] [% field.data_type %]([% field.size %]) [% END -%] See F for a more complete example. You can also set any of the options used to initialize the Template object by adding them to your producer_args. See Template Toolkit docs for details of the options. $translator = SQL::Translator->new( to => 'TT', producer_args => { ttfile => 'foo_template.tt', INCLUDE_PATH => '/foo/templates/tt', INTERPOLATE => 1, }, ); If you set C and its additional options the producer will write a separate file for each table in the schema. This is useful for producing things like HTML documentation where every table gets its own page (you could also use TTSchema producer to add an index page). It's also particularly good for code generation where you want to produce a class file per table. =head1 OPTIONS =over 4 =item tt_table File name of the template to run for each table. =item mk_files Set to true to output a file for each table in the schema (as well as returning the whole lot back to the Translalor and hence STDOUT). The file will be named after the table, with the optional C added and placed in the directory C. =item mk_files_ext Extension (without the dot) to add to the filename when using mk_files. =item mk_files_base = DIR Dir to build the table files into when using mk_files. Defaults to the current directory. =item mk_file_dir Set true and if the file needs to written to a directory that doesn't exist, it will be created first. =item on_exists [Default:replace] What to do if we are running with mk_files and a file already exists where we want to write our output. One of "skip", "die", "replace", "insert". The default is die. B - Over-write the existing file with the new one, clobbering anything already there. B - Leave the original file as it was and don't write the new version anywhere. B - Die with an existing file error. B - Insert the generated output into the file between a set of special comments (defined by the following options.) Any code between the comments will be overwritten (ie the results from a previous produce) but the rest of the file is left alone (your custom code). This is particularly useful for code generation as it allows you to generate schema derived code and then add your own custom code to the file. Then when the schema changes you just re-produce to insert the new code. =item insert_comment_start The comment to look for in the file when on_exists is C. Default is C. Must appear on it own line, with only whitespace either side, to be recognised. =item insert_comment_end The end comment to look for in the file when on_exists is C. Default is C. Must appear on it own line, with only whitespace either side, to be recognised. =back =cut use strict; use warnings; our ( $DEBUG, @EXPORT_OK ); our $VERSION = '1.60'; $DEBUG = 0 unless defined $DEBUG; use File::Path; use Template; use Data::Dumper; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils 'debug'; my $Translator; sub produce { $Translator = shift; local $DEBUG = $Translator->debug; my $scma = $Translator->schema; my $pargs = $Translator->producer_args; my $file = $pargs->{'tt_table'} or die "No template file given!"; $pargs->{on_exists} ||= "die"; debug "Processing template $file\n"; my $out; my $tt = Template->new( DEBUG => $DEBUG, ABSOLUTE => 1, # Set so we can use from the command line sensibly RELATIVE => 1, # Maybe the cmd line code should set it! Security! %$pargs, # Allow any TT opts to be passed in the producer_args ) || die "Failed to initialize Template object: ".Template->error; for my $tbl ( sort {$a->order <=> $b->order} $scma->get_tables ) { my $outtmp; $tt->process( $file, { translator => $Translator, schema => $scma, table => $tbl, }, \$outtmp ) or die "Error processing template '$file' for table '".$tbl->name ."': ".$tt->error; $out .= $outtmp; # Write out the file... write_file( table_file($tbl), $outtmp ) if $pargs->{mk_files}; } return $out; }; # Work out the filename for a given table. sub table_file { my ($tbl) = shift; my $pargs = $Translator->producer_args; my $root = $pargs->{mk_files_base}; my $ext = $pargs->{mk_file_ext}; return "$root/$tbl.$ext"; } # Write the src given to the file given, handling the on_exists arg. sub write_file { my ($file, $src) = @_; my $pargs = $Translator->producer_args; my $root = $pargs->{mk_files_base}; if ( -e $file ) { if ( $pargs->{on_exists} eq "skip" ) { warn "Skipping existing $file\n"; return 1; } elsif ( $pargs->{on_exists} eq "die" ) { die "File $file already exists.\n"; } elsif ( $pargs->{on_exists} eq "replace" ) { warn "Replacing $file.\n"; } elsif ( $pargs->{on_exists} eq "insert" ) { warn "Inserting into $file.\n"; $src = insert_code($file, $src); } else { die "Unknown on_exists action: $pargs->{on_exists}\n"; } } else { if ( my $interactive = -t STDIN && -t STDOUT ) { warn "Creating $file.\n"; } } my ($dir) = $file =~ m!^(.*)/!; # Want greedy, everything before the last / if ( $dir and not -d $dir and $pargs->{mk_file_dir} ) { mkpath($dir); } debug "Writing to $file\n"; open( FILE, ">$file") or die "Error opening file $file : $!\n"; print FILE $src; close(FILE); } # Reads file and inserts code between the insert comments and returns the new # source. sub insert_code { my ($file, $src) = @_; my $pargs = $Translator->producer_args; my $cstart = $pargs->{insert_comment_start} || "SQLF_INSERT_START"; my $cend = $pargs->{insert_comment_end} || "SQLF_INSERT_END"; # Slurp in the original file open ( FILE, "<", "$file") or die "Error opening file $file : $!\n"; local $/ = undef; my $orig = ; close(FILE); # Insert the new code between the insert comments unless ( $orig =~ s/^\s*?$cstart\s*?\n.*?^\s*?$cend\s*?\n/\n$cstart\n$src\n$cend\n/ms ) { warn "No insert done\n"; } return $orig; } 1; =pod =head1 AUTHOR Mark Addison Egrommit@users.sourceforge.netE. =head1 TODO - Some tests for the various on exists options (they have been tested implicitly through use in a project but need some proper tests). - More docs on code generation strategies. - Better hooks for filename generation. - Integrate with L and L. =head1 SEE ALSO SQL::Translator. =cut SQL-Translator-1.60/lib/SQL/Translator/Producer/TT/Base.pm0000644000000000000000000002062413473550070023114 0ustar00rootroot00000000000000package SQL::Translator::Producer::TT::Base; =pod =head1 NAME SQL::Translator::Producer::TT::Base - TT (Template Toolkit) based Producer base class. =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.60'; use Template; use Data::Dumper; use IO::Handle; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils 'debug'; # Hack to convert the produce call into an object. ALL sub-classes need todo # this so that the correct class gets created. sub produce { return __PACKAGE__->new( translator => shift )->run; }; sub new { my $proto = shift; my $class = ref $proto || $proto; my %args = @_; my $me = bless {}, $class; $me->{translator} = delete $args{translator} || die "Need a translator."; return $me; } sub translator { shift->{translator}; } sub schema { shift->{translator}->schema(@_); } # Util args access method. # No args - Return hashref (the actual hash in Translator) or hash of args. # 1 arg - Return that named args value. # Args - List of names. Return values of the given arg names in list context # or return as hashref in scalar context. Any names given that don't # exist in the args are returned as undef. sub args { my $me = shift; # No args unless (@_) { return wantarray ? %{ $me->{translator}->producer_args } : $me->{translator}->producer_args ; } # 1 arg. Return the value whatever the context. return $me->{translator}->producer_args->{$_[0]} if @_ == 1; # More args so return values list or hash ref my %args = %{ $me->{translator}->producer_args }; return wantarray ? @args{@_} : { map { ($_=>$args{$_}) } @_ }; } # Run the produce and return the result. sub run { my $me = shift; my $scma = $me->schema; my %args = %{$me->args}; my $tmpl = $me->tt_schema or die "No template!"; debug "Processing template $tmpl\n"; my $out; my $tt = Template->new( #DEBUG => $me->translator->debug, ABSOLUTE => 1, # Set so we can use from the command line sensibly RELATIVE => 1, # Maybe the cmd line code should set it! Security! $me->tt_config, # Hook for sub-classes to add config %args, # Allow any TT opts to be passed in the producer_args ) || die "Failed to initialize Template object: ".Template->error; $tt->process( $tmpl, { $me->tt_default_vars, $me->tt_vars, # Sub-class hook for adding vars }, \$out ) or die "Error processing template '$tmpl': ".$tt->error; return $out; } # Sub class hooks #----------------------------------------------------------------------------- sub tt_config { () }; sub tt_schema { my $me = shift; my $class = ref $me; my $file = $me->args("ttfile"); return $file if $file; no strict 'refs'; my $ref = *{"$class\:\:DATA"}{IO}; if ( $ref->opened ) { local $/ = undef; # Slurp mode return \<$ref>; } undef; }; sub tt_default_vars { my $me = shift; return ( translator => $me->translator, schema => $me->pre_process_schema($me->translator->schema), ); } sub pre_process_schema { $_[1] } sub tt_vars { () }; 1; =pod =head1 SYNOPSIS # Create a producer using a template in the __DATA__ section. package SQL::Translator::Producer::Foo; use base qw/SQL::Translator::Producer::TT::Base/; # Convert produce call into a method call on our new class sub produce { return __PACKAGE__->new( translator => shift )->run; }; # Configure the Template object. sub tt_config { ( INTERPOLATE => 1 ); } # Extra vars to add to the template sub tt_vars { ( foo => "bar" ); } # Put template in DATA section (or use file with ttfile producer arg) __DATA__ Schema Database: [% schema.database %] Foo: $foo ... =head1 DESCRIPTION A base class producer designed to be sub-classed to create new TT based producers cheaply - by simply giving the template to use and sprinkling in some extra template variables and config. You can find an introduction to this module in L. The 1st thing the module does is convert the produce sub routine call we get from SQL::Translator into a method call on an object, which we can then sub-class. This is done with the following code which needs to appear in B sub classes. # Convert produce call into an object method call sub produce { return __PACKAGE__->new( translator => shift )->run; }; See L below for details. The upshot of this is we can make new template producers by sub classing this base class, adding the above snippet and a template. The module also provides a number of hooks into the templating process, see L for details. See the L above for an example of creating a simple producer using a single template stored in the producers DATA section. =head1 SUB CLASS HOOKS Sub-classes can override these methods to control the templating by giving the template source, adding variables and giving config to the Tempate object. =head2 tt_config sub tt_config { ( INTERPOLATE => 1 ); } Return hash of Template config to add to that given to the L