SQL-Translator-1.62/0000755000000000000000000000000013727667056014251 5ustar00rootroot00000000000000SQL-Translator-1.62/META.json0000644000000000000000000000562213727667056015677 0ustar00rootroot00000000000000{ "abstract" : "SQL DDL transformations and more", "author" : [ "Ken Youens-Clark " ], "dynamic_config" : 0, "generated_by" : "ExtUtils::MakeMaker version 7.44, CPAN::Meta::Converter version 2.150010", "license" : [ "perl_5" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : 2 }, "name" : "SQL-Translator", "no_index" : { "directory" : [ "t", "inc", "maint", "share", "xt" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : "6.54", "File::ShareDir::Install" : "0" } }, "develop" : { "requires" : { "GD" : "0", "Graph::Directed" : "0", "GraphViz" : "0", "Software::LicenseUtils" : "0", "Spreadsheet::ParseExcel" : "0.41", "Template" : "2.20", "Test::EOL" : "1.1", "Test::NoTabs" : "1.1", "Text::RecordParser" : "0.02", "XML::LibXML" : "1.69" } }, "runtime" : { "recommends" : { "GD" : "0", "Graph::Directed" : "0", "GraphViz" : "0", "Spreadsheet::ParseExcel" : "0.41", "Template" : "2.20", "Text::RecordParser" : "0.02", "XML::LibXML" : "1.69" }, "requires" : { "Carp::Clan" : "0", "DBI" : "1.54", "Digest::SHA" : "0", "File::ShareDir" : "1.0", "Moo" : "1.000003", "Package::Variant" : "1.001001", "Parse::RecDescent" : "1.967009", "Scalar::Util" : "0", "Sub::Quote" : "0", "Try::Tiny" : "0.04", "perl" : "5.008001" } }, "test" : { "requires" : { "JSON::MaybeXS" : "1.003003", "Test::Differences" : "0", "Test::Exception" : "0.31", "Test::More" : "0.88", "Text::ParseWords" : "0", "XML::Writer" : "0.500", "YAML" : "0.66" } } }, "release_status" : "stable", "resources" : { "bugtracker" : { "mailto" : "bug-SQL-Translator@rt.cpan.org", "web" : "https://rt.cpan.org/NoAuth/Bugs.html?Dist=SQL-Translator" }, "license" : [ "http://dev.perl.org/licenses/" ], "repository" : { "type" : "git", "web" : "https://github.com/dbsrgits/sql-translator/" }, "x_IRC" : "irc://irc.perl.org/#sql-translator", "x_Ratings" : "http://cpanratings.perl.org/d/SQL-Translator" }, "version" : "1.62", "x_authority" : "cpan:JROBINSON", "x_serialization_backend" : "JSON::PP version 4.04" } SQL-Translator-1.62/META.yml0000644000000000000000000000271013727667056015522 0ustar00rootroot00000000000000--- abstract: 'SQL DDL transformations and more' author: - 'Ken Youens-Clark ' build_requires: ExtUtils::MakeMaker: '0' JSON::MaybeXS: '1.003003' Test::Differences: '0' Test::Exception: '0.31' Test::More: '0.88' Text::ParseWords: '0' XML::Writer: '0.500' YAML: '0.66' configure_requires: ExtUtils::MakeMaker: '6.54' File::ShareDir::Install: '0' dynamic_config: 0 generated_by: 'ExtUtils::MakeMaker version 7.44, CPAN::Meta::Converter version 2.150010' license: perl meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: SQL-Translator no_index: directory: - t - inc - maint - share - xt recommends: GD: '0' Graph::Directed: '0' GraphViz: '0' Spreadsheet::ParseExcel: '0.41' Template: '2.20' Text::RecordParser: '0.02' XML::LibXML: '1.69' requires: Carp::Clan: '0' DBI: '1.54' Digest::SHA: '0' File::ShareDir: '1.0' Moo: '1.000003' Package::Variant: '1.001001' Parse::RecDescent: '1.967009' Scalar::Util: '0' Sub::Quote: '0' Try::Tiny: '0.04' perl: '5.008001' resources: IRC: irc://irc.perl.org/#sql-translator Ratings: http://cpanratings.perl.org/d/SQL-Translator bugtracker: https://rt.cpan.org/NoAuth/Bugs.html?Dist=SQL-Translator license: http://dev.perl.org/licenses/ repository: https://github.com/dbsrgits/sql-translator/ version: '1.62' x_authority: cpan:JROBINSON x_serialization_backend: 'CPAN::Meta::YAML version 0.018' SQL-Translator-1.62/AUTHORS0000644000000000000000000000514413647112552015310 0ustar00rootroot00000000000000The following people have contributed to the SQLFairy project: - Aaron Schrab - Adam Strzelecki - Alexander Hartmaier - Allen Day - Amiri Barksdale - Anders Nor Berle - André Walker - Andreas 'ac0v' Specht - Andrew Moore - Andrew Gregory - Andrew Pam - Arthur Axel "fREW" Schmidt - Ben Faga - Cedric Carree - Chris Hilton - Chris Mungall - Chris To - Colin Newell - Dagfinn Ilmari Mannsåker - Daniel Ruoso - Darren Chamberlain - Dave Cash - Fabien Wernli - Fabrice Gabolde - Geoff Cant - Gudmundur A. Thorisson - Guillermo Roditi - Ivan Baidakou (basiliscos) - Jaime Soriano Pastor - Jason Williams - Johan Viklund - John Goulah - John Napiorkowski - Jonathan Yu - Karen Etheridge - Ken Youens-Clark - Kevin McClellan - Lukas 'mauke' Mai - Lukas Thiemeier - Mark Addison - Maximilian Gass - H.Merijn Brand - Michal Jurosz - Mikey Melillo - Moritz Onken - Nick Wellnhofer - Paul Harrington - Peter Rabbitson - Robert Bohne - Ross Smith II - Ryan D Johnson - Salvatore Bonaccorso - Sam Angiuoli - Sebastian Knapp - Stephen Bennett - Stephen Clouse - SymKat - Tina Müller - Vincent Bachelier - Wallace Reis - William Wolf - Ying Zhang - Zefram SQL-Translator-1.62/maint/0000755000000000000000000000000013727667056015361 5ustar00rootroot00000000000000SQL-Translator-1.62/maint/Makefile.PL.include0000644000000000000000000000641413466754536020762 0ustar00rootroot00000000000000BEGIN { -e 'Distar' or system("git clone git://git.shadowcat.co.uk/p5sagit/Distar.git") } use lib 'Distar/lib'; use Distar 0.001; author 'Ken Youens-Clark '; manifest_include 'script' => qr/.+/; manifest_include 't/data' => qr/.+/; manifest_include 'share' => qr/.+/; manifest_include '' => qr/\A(?:AUTHORS)\z/; # eval so can generate deps for cpanm --installdeps . eval { _recompile_grammars(); _recreate_rt_source(); }; print "Got errors:\n\n$@" if $@; sub _recompile_grammars { return; # disabled until RT#74593 is resolved require File::Spec; my $compiled_parser_dir = File::Spec->catdir(qw/ share PrecompiledParsers Parse RecDescent DDL SQLT /); # Currently consider only single-name parsers containing a grammar marker # This is somewhat fragile, but better than loading all kinds of parsers # to some of which we may not even have the deps my $parser_libdir = 'lib/SQL/Translator/Parser'; for my $parser_fn (glob "$parser_libdir/*.pm") { die "$parser_fn does not look like a readable file\n" unless ( -f $parser_fn and -r $parser_fn ); my ($type) = $parser_fn =~ /^\Q$parser_libdir\E\/(.+)\.pm$/i or die "$parser_fn not named in expected format\n"; my $parser_source = do { local (@ARGV, $/) = $parser_fn; <> }; next unless $parser_source =~ /\$GRAMMAR.+?END_OF_GRAMMAR/s; my $precomp_parser_fn = File::Spec->catfile($compiled_parser_dir, "$type.pm"); next if ( -f $precomp_parser_fn and (stat($parser_fn))[9] <= (stat($precomp_parser_fn))[9] ); print "Precompiling parser for $type\n"; require $parser_fn; require Parse::RecDescent; Parse::RecDescent->Precompile( do { no strict 'refs'; ${"SQL::Translator::Parser::${type}::GRAMMAR"} || die "No \$GRAMMAR global found in SQL::Translator::Parser::$type ($parser_fn)\n" }, "Parse::RecDescent::DDL::SQLT::$type" ); rename( "$type.pm", $precomp_parser_fn ) or die "Unable to move $type.pm to $compiled_parser_dir: $!\n"; } } sub _recreate_rt_source { my $base_xml = "t/data/roundtrip.xml"; my $autogen_yaml = "t/data/roundtrip_autogen.yaml"; print "Updating $autogen_yaml\n"; unlink $autogen_yaml; eval { use lib 'lib'; require SQL::Translator; require SQL::Translator::Parser::XML; open (my $fh, '>', $autogen_yaml) or die "$autogen_yaml: $!\n"; my $tr = SQL::Translator->new; my $yaml = $tr->translate ( parser => 'XML', file => $base_xml, producer => 'YAML', ) or die sprintf ("Unable to translate %s to YAML: %s\n", $base_xml, $tr->error || 'error unknown' ); print $fh $yaml; close $fh; }; if ($@) { die < } = { => , ... } my %ATTRIBUTES = ( field => { name => undef, data_type => '', default_value => undef, size => '0', is_primary_key => 0, is_unique => 0, is_nullable => 1, is_foreign_key => 0, is_auto_increment => 0, comments => '', extra => {}, # foreign_key_reference, is_valid => 1, # order }, constraint => { name => '', type => '', deferrable => 1, expression => '', is_valid => 1, fields => [], match_type => '', options => [], on_delete => '', on_update => '', reference_fields => [], reference_table => '', extra => {}, }, index => { fields => [], is_valid => 1, name => "", options => [], type => NORMAL, extra => {}, }, view => { name => "", sql => "", fields => [], is_valid => 1, extra => {}, }, trigger => { name => '', perform_action_when => undef, database_events => undef, on_table => undef, action => undef, is_valid => 1, extra => {}, }, procedure => { name => '', sql => '', parameters => [], owner => '', comments => '', extra => {}, }, table => { comments => undef, name => '', #primary_key => undef, # pkey constraint options => [], #order => 0, fields => undef, constraints => undef, indices => undef, is_valid => 1, extra => {}, }, schema => { name => '', database => '', procedures => undef, # [] when set tables => undef, # [] when set triggers => undef, # [] when set views => undef, # [] when set is_valid => 1, extra => {}, } ); # Given a test hash and schema object name set any attribute keys not present in # the test hash to their default value for that schema object type. # e.g. default_attribs( $test, "field" ); sub default_attribs { my ($hashref, $object_type) = @_; if ( !exists $ATTRIBUTES{ $object_type } ) { die "Can't add default attribs for unknown Schema " . "object type '$object_type'."; } for my $attr ( grep { !exists $hashref->{ $_ } } keys %{ $ATTRIBUTES{ $object_type } } ) { $hashref->{ $attr } = $ATTRIBUTES{ $object_type }{ $attr } } return $hashref; } # Format test name so it will prepend the test names used below. sub t_name { my $name = shift; $name ||= ""; $name = "$name - " if $name; return $name; } sub field_ok { my ($f1,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"field"); unless ($f1) { fail " Field '$test->{name}' doesn't exist!"; # TODO Do a skip on the following tests. Currently the test counts wont # match at the end. So at least it fails. return; } my $full_name = $f1->table->name.".".$test->{name}; is( $f1->name, $test->{name}, "${t_name}Field '$full_name'" ); is( $f1->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); is( $f1->data_type, $test->{data_type}, "$t_name type is '$test->{data_type}'" ); is( $f1->size, $test->{size}, "$t_name size is '$test->{size}'" ); is( $f1->default_value, $test->{default_value}, "$t_name default value is " .(defined($test->{default_value}) ? "'$test->{default_value}'" : "UNDEF" ) ); is( $f1->is_nullable, $test->{is_nullable}, "$t_name ".($test->{is_nullable} ? 'can' : 'cannot').' be null' ); is( $f1->is_unique, $test->{is_unique}, "$t_name ".($test->{is_unique} ? 'can' : 'cannot').' be unique' ); is( $f1->is_primary_key, $test->{is_primary_key}, "$t_name is ".($test->{is_primary_key} ? '' : 'not ').'a primary_key' ); is( $f1->is_foreign_key, $test->{is_foreign_key}, "$t_name is ".($test->{is_foreign_key} ? '' : 'not').' a foreign_key' ); is( $f1->is_auto_increment, $test->{is_auto_increment}, "$t_name is " .($test->{is_auto_increment} ? '' : 'not ').'an auto_increment' ); is( $f1->comments, $test->{comments}, "$t_name comments" ); is_deeply( { $f1->extra }, $test->{extra}, "$t_name extra" ); } sub constraint_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"constraint"); is( $obj->name, $test->{name}, "${t_name}Constraint '$test->{name}'" ); is( $obj->type, $test->{type}, "$t_name type is '$test->{type}'" ); is( $obj->deferrable, $test->{deferrable}, "$t_name ".($test->{deferrable} ? 'can' : 'cannot').' be deferred' ); is( $obj->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); is($obj->table->name,$test->{table},"$t_name table is '$test->{table}'" ); is( $obj->expression, $test->{expression}, "$t_name expression is '$test->{expression}'" ); is_deeply( [$obj->fields], $test->{fields}, "$t_name fields are '".join(",",@{$test->{fields}})."'" ); is( $obj->reference_table, $test->{reference_table}, "$t_name reference_table is '$test->{reference_table}'" ); is_deeply( [$obj->reference_fields], $test->{reference_fields}, "$t_name reference_fields are '".join(",",@{$test->{reference_fields}})."'" ); is( $obj->match_type, $test->{match_type}, "$t_name match_type is '$test->{match_type}'" ); is( $obj->on_delete, $test->{on_delete}, "$t_name on_delete is '$test->{on_delete}'" ); is( $obj->on_update, $test->{on_update}, "$t_name on_update is '$test->{on_update}'" ); is_deeply( [$obj->options], $test->{options}, "$t_name options are '".join(",",@{$test->{options}})."'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); } sub index_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"index"); is( $obj->name, $test->{name}, "${t_name}Index '$test->{name}'" ); is( $obj->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); is( $obj->type, $test->{type}, "$t_name type is '$test->{type}'" ); is_deeply( [$obj->fields], $test->{fields}, "$t_name fields are '".join(",",@{$test->{fields}})."'" ); is_deeply( [$obj->options], $test->{options}, "$t_name options are '".join(",",@{$test->{options}})."'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); } sub trigger_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"index"); is( $obj->name, $test->{name}, "${t_name}Trigger '$test->{name}'" ); is( $obj->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); is( $obj->perform_action_when, $test->{perform_action_when}, "$t_name perform_action_when is '$test->{perform_action_when}'" ); is( join(',', $obj->database_events), $test->{database_events}, sprintf("%s database_events is '%s'", $t_name, $test->{'database_events'}, ) ); is( $obj->on_table, $test->{on_table}, "$t_name on_table is '$test->{on_table}'" ); is( $obj->scope, $test->{scope}, "$t_name scope is '$test->{scope}'" ) if exists $test->{scope}; is( $obj->action, $test->{action}, "$t_name action is '$test->{action}'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); } sub view_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"index"); #isa_ok( $v, 'SQL::Translator::Schema::View', 'View' ); is( $obj->name, $test->{name}, "${t_name}View '$test->{name}'" ); is( $obj->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); is( $obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'" ); is_deeply( [$obj->fields], $test->{fields}, "$t_name fields are '".join(",",@{$test->{fields}})."'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); } sub procedure_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"index"); #isa_ok( $v, 'SQL::Translator::Schema::View', 'View' ); is( $obj->name, $test->{name}, "${t_name}Procedure '$test->{name}'" ); is( $obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'" ); is_deeply( [$obj->parameters], $test->{parameters}, "$t_name parameters are '".join(",",@{$test->{parameters}})."'" ); is( $obj->comments, $test->{comments}, "$t_name comments is '$test->{comments}'" ); is( $obj->owner, $test->{owner}, "$t_name owner is '$test->{owner}'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); } sub table_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"table"); my %arg = %$test; my $tbl_name = $arg{name} || die "Need a table name to test."; is( $obj->{name}, $arg{name}, "${t_name}Table '$arg{name}'" ); is_deeply( [$obj->options], $test->{options}, "$t_name options are '".join(",",@{$test->{options}})."'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); # Fields if ( $arg{fields} ) { my @fldnames = map {$_->{name}} @{$arg{fields}}; is_deeply( [ map {$_->name} $obj->get_fields ], [ @fldnames ], "${t_name} field names are ".join(", ",@fldnames) ); foreach ( @{$arg{fields}} ) { my $f_name = $_->{name} || die "Need a field name to test."; next unless my $fld = $obj->get_field($f_name); field_ok( $fld, $_, $name ); } } else { is(scalar($obj->get_fields), undef, "${t_name} has no fields."); } # Constraints and Indices _test_kids($obj, $test, $name, { constraint => 'constraints', index => 'indices', }); } sub _test_kids { my ( $obj, $test, $name, $kids ) = @_; my $t_name = t_name($name); my $obj_name = ref $obj; ($obj_name) = $obj_name =~ m/^.*::(.*)$/; while ( my ( $object_type, $plural ) = each %$kids ) { next unless defined $test->{ $plural }; if ( my @tests = @{ $test->{ $plural } } ) { my $meth = "get_$plural"; my @objects = $obj->$meth; is( scalar(@objects), scalar(@tests), "${t_name}$obj_name has " . scalar(@tests) . " $plural" ); for my $object (@objects) { my $ans = { lc($obj_name) => $obj->name, %{ shift @tests } }; my $meth = "${object_type}_ok"; { no strict 'refs'; $meth->( $object, $ans, $name ); } } } } } sub schema_ok { my ($obj,$test,$name) = @_; my $t_name = t_name($name); default_attribs($test,"schema"); is( $obj->name, $test->{name}, "${t_name}Schema name is '$test->{name}'" ); is( $obj->database, $test->{database}, "$t_name database is '$test->{database}'" ); is_deeply( { $obj->extra }, $test->{extra}, "$t_name extra" ); is( $obj->is_valid, $test->{is_valid}, "$t_name is ".($test->{is_valid} ? '' : 'not ').'valid' ); # Tables if ( $test->{tables} ) { is_deeply( [ map {$_->name} $obj->get_tables ], [ map {$_->{name}} @{$test->{tables}} ], "${t_name} table names match" ); foreach ( @{$test->{tables}} ) { my $t_name = $_->{name} || die "Need a table name to test."; table_ok( $obj->get_table($t_name), $_, $name ); } } else { is(scalar($obj->get_tables), undef, "${t_name} has no tables."); } # Procedures, Triggers, Views _test_kids($obj, $test, $name, { procedure => 'procedures', trigger => 'triggers', view => 'views', }); } # maybe_plan($ntests, @modules) # # Calls plan $ntests if @modules can all be loaded; otherwise, # calls skip_all with an explanation of why the tests were skipped. sub maybe_plan { my ($ntests, @modules) = @_; my @errors; for my $module (@modules) { eval "use $module;"; next if !$@; if ($@ =~ /Can't locate (\S+)/) { my $mod = $1; $mod =~ s/\.pm$//; $mod =~ s#/#::#g; push @errors, $mod; } elsif ($@ =~ /([\w\:]+ version [\d\.]+) required.+?this is only version/) { push @errors, $1; } elsif ($@ =~ /Can't load .+? for module .+?DynaLoader\.pm/i ) { push @errors, $module; } else { (my $err = $@) =~ s/\n+/\\n/g; # Can't have newlines in the skip message push @errors, "$module: $err"; } } if (@errors) { my $msg = sprintf "Missing dependenc%s: %s", @errors == 1 ? 'y' : 'ies', join ", ", @errors; plan skip_all => $msg; } return unless defined $ntests; if ($ntests ne 'no_plan') { plan tests => $ntests; } else { plan 'no_plan'; } } 1; # compile please =========================================================== __END__ =pod =head1 SYNOPSIS # t/magic.t use FindBin '$Bin'; use Test::More; use Test::SQL::Translator; # Run parse my $sqlt = SQL::Translator->new( parser => "Magic", filename => "$Bin/data/magic/test.magic", ... ); ... my $schema = $sqlt->schema; # Test the table it produced. table_ok( $schema->get_table("Customer"), { name => "Customer", fields => [ { name => "CustomerID", data_type => "INT", size => 12, default_value => undef, is_nullable => 0, is_primary_key => 1, }, { name => "bar", data_type => "VARCHAR", size => 255, is_nullable => 0, }, ], constraints => [ { type => "PRIMARY KEY", fields => "CustomerID", }, ], indices => [ { name => "barindex", fields => ["bar"], }, ], }); =head1 DESCRIPTION Provides a set of Test::More tests for Schema objects. Testing a parsed schema is then as easy as writing a perl data structure describing how you expect the schema to look. Also provides C for conditionally running tests based on their dependencies. The data structures given to the test subs don't have to include all the possible values, only the ones you expect to have changed. Any left out will be tested to make sure they are still at their default value. This is a useful check that you your parser hasn't accidentally set schema values you didn't expect it to. For an example of the output run the F test. =head1 Tests All the tests take a first arg of the schema object to test, followed by a hash ref describing how you expect that object to look (you only need give the attributes you expect to have changed from the default). The 3rd arg is an optional test name to prepend to all the generated test names. =head2 table_ok =head2 field_ok =head2 constraint_ok =head2 index_ok =head2 view_ok =head2 trigger_ok =head2 procedure_ok =head1 CONDITIONAL TESTS The C function handles conditionally running an individual test. It is here to enable running the test suite even when dependencies are missing; not having (for example) GraphViz installed should not keep the test suite from passing. C takes the number of tests to (maybe) run, and a list of modules on which test execution depends: maybe_plan(180, 'SQL::Translator::Parser::MySQL'); If one of C's dependencies does not exist, then the test will be skipped. Instead of a number of tests, you can pass C if you're using C, or C<'no_plan'> if you don't want a plan at all. =head1 EXPORTS table_ok, field_ok, constraint_ok, index_ok, view_ok, trigger_ok, procedure_ok, maybe_plan =head1 TODO =over 4 =item Test the tests! =item Test Count Constants Constants to give the number of tests each C<*_ok> sub uses. e.g. How many tests does C run? Can then use these to set up the test plan easily. =item Test skipping As the test subs wrap up lots of tests in one call you can't skip individual tests only whole sets e.g. a whole table or field. We could add C items to the test hashes to allow per test skips. e.g. skip_is_primary_key => "Need to fix primary key parsing.", =item yaml test specs Maybe have the test subs also accept yaml for the test hash ref as it is much nicer for writing big data structures. We can then define tests as in input schema file and test yaml file to compare it against. =back =head1 AUTHOR Mark D. Addison Emark.addison@itn.co.ukE, Darren Chamberlain . Thanks to Ken Y. Clark for the original table and field test code taken from his mysql test. =head1 SEE ALSO perl(1), SQL::Translator, SQL::Translator::Schema, Test::More. =cut SQL-Translator-1.62/lib/SQL/0000755000000000000000000000000013727667056015456 5ustar00rootroot00000000000000SQL-Translator-1.62/lib/SQL/Translator.pm0000644000000000000000000010326313727666430020145 0ustar00rootroot00000000000000package SQL::Translator; use Moo; our ( $DEFAULT_SUB, $DEBUG, $ERROR ); our $VERSION = '1.62'; $VERSION =~ tr/_//d; $DEBUG = 0 unless defined $DEBUG; $ERROR = ""; use Carp qw(carp croak); use Data::Dumper; use File::Find; use File::Spec::Functions qw(catfile); use File::Basename qw(dirname); use IO::Dir; use Sub::Quote qw(quote_sub); use SQL::Translator::Producer; use SQL::Translator::Schema; use SQL::Translator::Utils qw(throw ex2err carp_ro normalize_quote_options); $DEFAULT_SUB = sub { $_[0]->schema } unless defined $DEFAULT_SUB; with qw( SQL::Translator::Role::Debug SQL::Translator::Role::Error SQL::Translator::Role::BuildArgs ); around BUILDARGS => sub { my $orig = shift; my $self = shift; my $config = $self->$orig(@_); # If a 'parser' or 'from' parameter is passed in, use that as the # parser; if a 'producer' or 'to' parameter is passed in, use that # as the producer; both default to $DEFAULT_SUB. $config->{parser} ||= $config->{from} if defined $config->{from}; $config->{producer} ||= $config->{to} if defined $config->{to}; $config->{filename} ||= $config->{file} if defined $config->{file}; my $quote = normalize_quote_options($config); $config->{quote_identifiers} = $quote if defined $quote; return $config; }; sub BUILD { my ($self) = @_; # Make sure all the tool-related stuff is set up foreach my $tool (qw(producer parser)) { $self->$tool($self->$tool); } } has $_ => ( is => 'rw', default => quote_sub(q{ 0 }), coerce => quote_sub(q{ $_[0] ? 1 : 0 }), ) foreach qw(add_drop_table no_comments show_warnings trace validate); # quote_identifiers is on by default, use a 0-but-true as indicator # so we can allow individual producers to change the default has quote_identifiers => ( is => 'rw', default => quote_sub(q{ '0E0' }), coerce => quote_sub(q{ $_[0] || 0 }), ); sub quote_table_names { (@_ > 1 and ($_[1] xor $_[0]->quote_identifiers) ) ? croak 'Using quote_table_names as a setter is no longer supported' : $_[0]->quote_identifiers; } sub quote_field_names { (@_ > 1 and ($_[1] xor $_[0]->quote_identifiers) ) ? croak 'Using quote_field_names as a setter is no longer supported' : $_[0]->quote_identifiers; } after quote_identifiers => sub { if (@_ > 1) { # synchronize for old code reaching directly into guts $_[0]->{quote_table_names} = $_[0]->{quote_field_names} = $_[1] ? 1 : 0; } }; has producer => ( is => 'rw', default => sub { $DEFAULT_SUB } ); around producer => sub { my $orig = shift; shift->_tool({ orig => $orig, name => 'producer', path => "SQL::Translator::Producer", default_sub => "produce", }, @_); }; has producer_type => ( is => 'rwp', init_arg => undef ); around producer_type => carp_ro('producer_type'); has producer_args => ( is => 'rw', default => quote_sub(q{ +{} }) ); around producer_args => sub { my $orig = shift; shift->_args($orig, @_); }; has parser => ( is => 'rw', default => sub { $DEFAULT_SUB } ); around parser => sub { my $orig = shift; shift->_tool({ orig => $orig, name => 'parser', path => "SQL::Translator::Parser", default_sub => "parse", }, @_); }; has parser_type => ( is => 'rwp', init_arg => undef ); around parser_type => carp_ro('parser_type'); has parser_args => ( is => 'rw', default => quote_sub(q{ +{} }) ); around parser_args => sub { my $orig = shift; shift->_args($orig, @_); }; has filters => ( is => 'rw', default => quote_sub(q{ [] }), coerce => sub { my @filters; # Set. Convert args to list of [\&code,@args] foreach (@{$_[0]||[]}) { my ($filt,@args) = ref($_) eq "ARRAY" ? @$_ : $_; if ( isa($filt,"CODE") ) { push @filters, [$filt,@args]; next; } else { __PACKAGE__->debug("Adding $filt filter. Args:".Dumper(\@args)."\n") if __PACKAGE__->debugging; $filt = _load_sub("$filt\::filter", "SQL::Translator::Filter") || throw(__PACKAGE__->error); push @filters, [$filt,@args]; } } return \@filters; }, ); around filters => sub { my $orig = shift; my $self = shift; return @{$self->$orig([@{$self->$orig}, @_])} if @_; return @{$self->$orig}; }; has filename => ( is => 'rw', isa => sub { foreach my $filename (ref($_[0]) eq 'ARRAY' ? @{$_[0]} : $_[0]) { if (-d $filename) { throw("Cannot use directory '$filename' as input source"); } elsif (not -f _ && -r _) { throw("Cannot use '$filename' as input source: ". "file does not exist or is not readable."); } } }, ); around filename => \&ex2err; has data => ( is => 'rw', builder => 1, lazy => 1, coerce => sub { # Set $self->data based on what was passed in. We will # accept a number of things; do our best to get it right. my $data = shift; if (isa($data, 'ARRAY')) { $data = join '', @$data; } elsif (isa($data, 'GLOB')) { seek ($data, 0, 0) if eof ($data); local $/; $data = <$data>; } return isa($data, 'SCALAR') ? $data : \$data; }, ); around data => sub { my $orig = shift; my $self = shift; if (@_ > 1 && !ref $_[0]) { return $self->$orig(\join('', @_)); } elsif (@_) { return $self->$orig(@_); } return ex2err($orig, $self); }; sub _build_data { my $self = shift; # If we have a filename but no data yet, populate. if (my $filename = $self->filename) { $self->debug("Opening '$filename' to get contents.\n"); local $/; my $data; my @files = ref($filename) eq 'ARRAY' ? @$filename : ($filename); foreach my $file (@files) { open my $fh, '<', $file or throw("Can't read file '$file': $!"); $data .= <$fh>; close $fh or throw("Can't close file '$file': $!"); } return \$data; } } has schema => ( is => 'lazy', init_arg => undef, clearer => 'reset', predicate => '_has_schema', ); around schema => carp_ro('schema'); around reset => sub { my $orig = shift; my $self = shift; $self->$orig(@_); return 1 }; sub _build_schema { SQL::Translator::Schema->new(translator => shift) } sub translate { my $self = shift; my ($args, $parser, $parser_type, $producer, $producer_type); my ($parser_output, $producer_output, @producer_output); # Parse arguments if (@_ == 1) { # Passed a reference to a hash? if (isa($_[0], 'HASH')) { # yep, a hashref $self->debug("translate: Got a hashref\n"); $args = $_[0]; } # Passed a GLOB reference, i.e., filehandle elsif (isa($_[0], 'GLOB')) { $self->debug("translate: Got a GLOB reference\n"); $self->data($_[0]); } # Passed a reference to a string containing the data elsif (isa($_[0], 'SCALAR')) { # passed a ref to a string $self->debug("translate: Got a SCALAR reference (string)\n"); $self->data($_[0]); } # Not a reference; treat it as a filename elsif (! ref $_[0]) { # Not a ref, it's a filename $self->debug("translate: Got a filename\n"); $self->filename($_[0]); } # Passed something else entirely. else { # We're not impressed. Take your empty string and leave. # return ""; # Actually, if data, parser, and producer are set, then we # can continue. Too bad, because I like my comment # (above)... return "" unless ($self->data && $self->producer && $self->parser); } } else { # You must pass in a hash, or you get nothing. return "" if @_ % 2; $args = { @_ }; } # ---------------------------------------------------------------------- # Can specify the data to be transformed using "filename", "file", # "data", or "datasource". # ---------------------------------------------------------------------- if (my $filename = ($args->{'filename'} || $args->{'file'})) { $self->filename($filename); } if (my $data = ($args->{'data'} || $args->{'datasource'})) { $self->data($data); } # ---------------------------------------------------------------- # Get the data. # ---------------------------------------------------------------- my $data = $self->data; # ---------------------------------------------------------------- # Local reference to the parser subroutine # ---------------------------------------------------------------- if ($parser = ($args->{'parser'} || $args->{'from'})) { $self->parser($parser); } $parser = $self->parser; $parser_type = $self->parser_type; # ---------------------------------------------------------------- # Local reference to the producer subroutine # ---------------------------------------------------------------- if ($producer = ($args->{'producer'} || $args->{'to'})) { $self->producer($producer); } $producer = $self->producer; $producer_type = $self->producer_type; # ---------------------------------------------------------------- # Execute the parser, the filters and then execute the producer. # Allowances are made for each piece to die, or fail to compile, # since the referenced subroutines could be almost anything. In # the future, each of these might happen in a Safe environment, # depending on how paranoid we want to be. # ---------------------------------------------------------------- # Run parser unless ( $self->_has_schema ) { eval { $parser_output = $parser->($self, $$data) }; if ($@ || ! $parser_output) { my $msg = sprintf "translate: Error with parser '%s': %s", $parser_type, ($@) ? $@ : " no results"; return $self->error($msg); } } $self->debug("Schema =\n", Dumper($self->schema), "\n") if $self->debugging;; # Validate the schema if asked to. if ($self->validate) { my $schema = $self->schema; return $self->error('Invalid schema') unless $schema->is_valid; } # Run filters my $filt_num = 0; foreach ($self->filters) { $filt_num++; my ($code,@args) = @$_; eval { $code->($self->schema, @args) }; my $err = $@ || $self->error || 0; return $self->error("Error with filter $filt_num : $err") if $err; } # Run producer # Calling wantarray in the eval no work, wrong scope. my $wantarray = wantarray ? 1 : 0; eval { if ($wantarray) { @producer_output = $producer->($self); } else { $producer_output = $producer->($self); } }; if ($@ || !( $producer_output || @producer_output)) { my $err = $@ || $self->error || "no results"; my $msg = "translate: Error with producer '$producer_type': $err"; return $self->error($msg); } return wantarray ? @producer_output : $producer_output; } sub list_parsers { return shift->_list("parser"); } sub list_producers { return shift->_list("producer"); } # ====================================================================== # Private Methods # ====================================================================== # ---------------------------------------------------------------------- # _args($type, \%args); # # Gets or sets ${type}_args. Called by parser_args and producer_args. # ---------------------------------------------------------------------- sub _args { my $self = shift; my $orig = shift; if (@_) { # If the first argument is an explicit undef (remember, we # don't get here unless there is stuff in @_), then we clear # out the producer_args hash. if (! defined $_[0]) { shift @_; $self->$orig({}); } my $args = isa($_[0], 'HASH') ? shift : { @_ }; return $self->$orig({ %{$self->$orig}, %$args }); } return $self->$orig; } # ---------------------------------------------------------------------- # Does the get/set work for parser and producer. e.g. # return $self->_tool({ # name => 'producer', # path => "SQL::Translator::Producer", # default_sub => "produce", # }, @_); # ---------------------------------------------------------------------- sub _tool { my ($self,$args) = (shift, shift); my $name = $args->{name}; my $orig = $args->{orig}; return $self->{$name} unless @_; # get accessor my $path = $args->{path}; my $default_sub = $args->{default_sub}; my $tool = shift; # passed an anonymous subroutine reference if (isa($tool, 'CODE')) { $self->$orig($tool); $self->${\"_set_${name}_type"}("CODE"); $self->debug("Got $name: code ref\n"); } # Module name was passed directly # We try to load the name; if it doesn't load, there's a # possibility that it has a function name attached to it, # so we give it a go. else { $tool =~ s/-/::/g if $tool !~ /::/; my ($code,$sub); ($code,$sub) = _load_sub("$tool\::$default_sub", $path); unless ($code) { if ( __PACKAGE__->error =~ m/Can't find module/ ) { # Mod not found so try sub ($code,$sub) = _load_sub("$tool", $path) unless $code; die "Can't load $name subroutine '$tool' : ".__PACKAGE__->error unless $code; } else { die "Can't load $name '$tool' : ".__PACKAGE__->error; } } # get code reference and assign my (undef,$module,undef) = $sub =~ m/((.*)::)?(\w+)$/; $self->$orig($code); $self->${\"_set_$name\_type"}($sub eq "CODE" ? "CODE" : $module); $self->debug("Got $name: $sub\n"); } # At this point, $self->{$name} contains a subroutine # reference that is ready to run # Anything left? If so, it's args my $meth = "$name\_args"; $self->$meth(@_) if (@_); return $self->{$name}; } # ---------------------------------------------------------------------- # _list($type) # ---------------------------------------------------------------------- sub _list { my $self = shift; my $type = shift || return (); my $uctype = ucfirst lc $type; # # First find all the directories where SQL::Translator # parsers or producers (the "type") appear to live. # load("SQL::Translator::$uctype") or return (); my $path = catfile "SQL", "Translator", $uctype; my @dirs; for (@INC) { my $dir = catfile $_, $path; $self->debug("_list_${type}s searching $dir\n"); next unless -d $dir; push @dirs, $dir; } # # Now use File::File::find to look recursively in those # directories for all the *.pm files, then present them # with the slashes turned into dashes. # my %found; find( sub { if ( -f && m/\.pm$/ ) { my $mod = $_; $mod =~ s/\.pm$//; my $cur_dir = $File::Find::dir; my $base_dir = quotemeta catfile 'SQL', 'Translator', $uctype; # # See if the current directory is below the base directory. # if ( $cur_dir =~ m/$base_dir(.*)/ ) { $cur_dir = $1; $cur_dir =~ s!^/!!; # kill leading slash $cur_dir =~ s!/!-!g; # turn other slashes into dashes } else { $cur_dir = ''; } $found{ join '-', map { $_ || () } $cur_dir, $mod } = 1; } }, @dirs ); return sort { lc $a cmp lc $b } keys %found; } # ---------------------------------------------------------------------- # load(MODULE [,PATH[,PATH]...]) # # Loads a Perl module. Short circuits if a module is already loaded. # # MODULE - is the name of the module to load. # # PATH - optional list of 'package paths' to look for the module in. e.g # If you called load('Super::Foo' => 'My', 'Other') it will # try to load the mod Super::Foo then My::Super::Foo then Other::Super::Foo. # # Returns package name of the module actually loaded or false and sets error. # # Note, you can't load a name from the root namespace (ie one without '::' in # it), therefore a single word name without a path fails. # ---------------------------------------------------------------------- sub load { my $name = shift; my @path; push @path, "" if $name =~ /::/; # Empty path to check name on its own first push @path, @_ if @_; foreach (@path) { my $module = $_ ? "$_\::$name" : $name; my $file = $module; $file =~ s[::][/]g; $file .= ".pm"; __PACKAGE__->debug("Loading $name as $file\n"); return $module if $INC{$file}; # Already loaded eval { require $file }; next if $@ =~ /Can't locate $file in \@INC/; eval { $module->import() } unless $@; return __PACKAGE__->error("Error loading $name as $module : $@") if $@ && $@ !~ /"SQL::Translator::Producer" is not exported/; return $module; # Module loaded ok } return __PACKAGE__->error("Can't find module $name. Path:".join(",",@path)); } # ---------------------------------------------------------------------- # Load the sub name given (including package), optionally using a base package # path. Returns code ref and name of sub loaded, including its package. # (\&code, $sub) = load_sub( 'MySQL::produce', "SQL::Translator::Producer" ); # (\&code, $sub) = load_sub( 'MySQL::produce', @path ); # ---------------------------------------------------------------------- sub _load_sub { my ($tool, @path) = @_; my (undef,$module,$func_name) = $tool =~ m/((.*)::)?(\w+)$/; if ( my $module = load($module => @path) ) { my $sub = "$module\::$func_name"; return wantarray ? ( \&{ $sub }, $sub ) : \&$sub; } return undef; } sub format_table_name { return shift->_format_name('_format_table_name', @_); } sub format_package_name { return shift->_format_name('_format_package_name', @_); } sub format_fk_name { return shift->_format_name('_format_fk_name', @_); } sub format_pk_name { return shift->_format_name('_format_pk_name', @_); } # ---------------------------------------------------------------------- # The other format_*_name methods rely on this one. It optionally # accepts a subroutine ref as the first argument (or uses an identity # sub if one isn't provided or it doesn't already exist), and applies # it to the rest of the arguments (if any). # ---------------------------------------------------------------------- sub _format_name { my $self = shift; my $field = shift; my @args = @_; if (ref($args[0]) eq 'CODE') { $self->{$field} = shift @args; } elsif (! exists $self->{$field}) { $self->{$field} = sub { return shift }; } return @args ? $self->{$field}->(@args) : $self->{$field}; } sub isa($$) { my ($ref, $type) = @_; return UNIVERSAL::isa($ref, $type); } sub version { my $self = shift; return $VERSION; } # Must come after all 'has' declarations around new => \&ex2err; 1; # ---------------------------------------------------------------------- # Who killed the pork chops? # What price bananas? # Are you my Angel? # Allen Ginsberg # ---------------------------------------------------------------------- =pod =head1 NAME SQL::Translator - manipulate structured data definitions (SQL and more) =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( # Print debug info debug => 1, # Print Parse::RecDescent trace trace => 0, # Don't include comments in output no_comments => 0, # Print name mutations, conflicts show_warnings => 0, # Add "drop table" statements add_drop_table => 1, # to quote or not to quote, thats the question quote_identifiers => 1, # Validate schema object validate => 1, # Make all table names CAPS in producers which support this option format_table_name => sub {my $tablename = shift; return uc($tablename)}, # Null-op formatting, only here for documentation's sake format_package_name => sub {return shift}, format_fk_name => sub {return shift}, format_pk_name => sub {return shift}, ); my $output = $translator->translate( from => 'MySQL', to => 'Oracle', # Or an arrayref of filenames, i.e. [ $file1, $file2, $file3 ] filename => $file, ) or die $translator->error; print $output; =head1 DESCRIPTION This documentation covers the API for SQL::Translator. For a more general discussion of how to use the modules and scripts, please see L. SQL::Translator is a group of Perl modules that converts vendor-specific SQL table definitions into other formats, such as other vendor-specific SQL, ER diagrams, documentation (POD and HTML), XML, and Class::DBI classes. The main focus of SQL::Translator is SQL, but parsers exist for other structured data formats, including Excel spreadsheets and arbitrarily delimited text files. Through the separation of the code into parsers and producers with an object model in between, it's possible to combine any parser with any producer, to plug in custom parsers or producers, or to manipulate the parsed data via the built-in object model. Presently only the definition parts of SQL are handled (CREATE, ALTER), not the manipulation of data (INSERT, UPDATE, DELETE). =head1 CONSTRUCTOR =head2 new The constructor is called C, and accepts a optional hash of options. Valid options are: =over 4 =item * parser / from =item * parser_args =item * producer / to =item * producer_args =item * filters =item * filename / file =item * data =item * debug =item * add_drop_table =item * quote_identifiers =item * quote_table_names (DEPRECATED) =item * quote_field_names (DEPRECATED) =item * no_comments =item * trace =item * validate =back All options are, well, optional; these attributes can be set via instance methods. Internally, they are; no (non-syntactical) advantage is gained by passing options to the constructor. =head1 METHODS =head2 add_drop_table Toggles whether or not to add "DROP TABLE" statements just before the create definitions. =head2 quote_identifiers Toggles whether or not to quote identifiers (table, column, constraint, etc.) with a quoting mechanism suitable for the chosen Producer. The default (true) is to quote them. =head2 quote_table_names DEPRECATED - A legacy proxy to L =head2 quote_field_names DEPRECATED - A legacy proxy to L =head2 no_comments Toggles whether to print comments in the output. Accepts a true or false value, returns the current value. =head2 producer The C method is an accessor/mutator, used to retrieve or define what subroutine is called to produce the output. A subroutine defined as a producer will be invoked as a function (I) and passed its container C instance, which it should call the C method on, to get the C generated by the parser. It is expected that the function transform the schema structure to a string. The C instance is also useful for informational purposes; for example, the type of the parser can be retrieved using the C method, and the C and C methods can be called when needed. When defining a producer, one of several things can be passed in: A module name (e.g., C), a module name relative to the C namespace (e.g., C), a module name and function combination (C), or a reference to an anonymous subroutine. If a full module name is passed in (for the purposes of this method, a string containing "::" is considered to be a module name), it is treated as a package, and a function called "produce" will be invoked: C<$modulename::produce>. If $modulename cannot be loaded, the final portion is stripped off and treated as a function. In other words, if there is no file named F, C will attempt to load F and use C as the name of the function, instead of the default C. my $tr = SQL::Translator->new; # This will invoke My::Groovy::Producer::produce($tr, $data) $tr->producer("My::Groovy::Producer"); # This will invoke SQL::Translator::Producer::Sybase::produce($tr, $data) $tr->producer("Sybase"); # This will invoke My::Groovy::Producer::transmogrify($tr, $data), # assuming that My::Groovy::Producer::transmogrify is not a module # on disk. $tr->producer("My::Groovy::Producer::transmogrify"); # This will invoke the referenced subroutine directly, as # $subref->($tr, $data); $tr->producer(\&my_producer); There is also a method named C, which is a string containing the classname to which the above C function belongs. In the case of anonymous subroutines, this method returns the string "CODE". Finally, there is a method named C, which is both an accessor and a mutator. Arbitrary data may be stored in name => value pairs for the producer subroutine to access: sub My::Random::producer { my ($tr, $data) = @_; my $pr_args = $tr->producer_args(); # $pr_args is a hashref. Extra data passed to the C method is passed to C: $tr->producer("xSV", delimiter => ',\s*'); # In SQL::Translator::Producer::xSV: my $args = $tr->producer_args; my $delimiter = $args->{'delimiter'}; # value is ,\s* =head2 parser The C method defines or retrieves a subroutine that will be called to perform the parsing. The basic idea is the same as that of C (see above), except the default subroutine name is "parse", and will be invoked as C<$module_name::parse($tr, $data)>. Also, the parser subroutine will be passed a string containing the entirety of the data to be parsed. # Invokes SQL::Translator::Parser::MySQL::parse() $tr->parser("MySQL"); # Invokes My::Groovy::Parser::parse() $tr->parser("My::Groovy::Parser"); # Invoke an anonymous subroutine directly $tr->parser(sub { my $dumper = Data::Dumper->new([ $_[1] ], [ "SQL" ]); $dumper->Purity(1)->Terse(1)->Deepcopy(1); return $dumper->Dump; }); There is also C and C, which perform analogously to C and C =head2 filters Set or retrieve the filters to run over the schema during the translation, before the producer creates its output. Filters are sub routines called, in order, with the schema object to filter as the 1st arg and a hash of options (passed as a list) for the rest of the args. They are free to do whatever they want to the schema object, which will be handed to any following filters, then used by the producer. Filters are set as an array, which gives the order they run in. Like parsers and producers, they can be defined by a module name, a module name relative to the SQL::Translator::Filter namespace, a module name and function name together or a reference to an anonymous subroutine. When using a module name a function called C will be invoked in that package to do the work. To pass args to the filter set it as an array ref with the 1st value giving the filter (name or sub) and the rest its args. e.g. $tr->filters( sub { my $schema = shift; # Do stuff to schema here! }, DropFKeys, [ "Names", table => 'lc' ], [ "Foo", foo => "bar", hello => "world" ], [ "Filter5" ], ); Although you normally set them in the constructor, which calls through to filters. i.e. my $translator = SQL::Translator->new( ... filters => [ sub { ... }, [ "Names", table => 'lc' ], ], ... ); See F for more examples. Multiple set calls to filters are cumulative with new filters added to the end of the current list. Returns the filters as a list of array refs, the 1st value being a reference to the filter sub and the rest its args. =head2 show_warnings Toggles whether to print warnings of name conflicts, identifier mutations, etc. Probably only generated by producers to let the user know when something won't translate very smoothly (e.g., MySQL "enum" fields into Oracle). Accepts a true or false value, returns the current value. =head2 translate The C method calls the subroutine referenced by the C data member, then calls any C and finally calls the C sub routine (these members are described above). It accepts as arguments a number of things, in key => value format, including (potentially) a parser and a producer (they are passed directly to the C and C methods). Here is how the parameter list to C is parsed: =over =item * 1 argument means it's the data to be parsed; which could be a string (filename) or a reference to a scalar (a string stored in memory), or a reference to a hash, which is parsed as being more than one argument (see next section). # Parse the file /path/to/datafile my $output = $tr->translate("/path/to/datafile"); # Parse the data contained in the string $data my $output = $tr->translate(\$data); =item * More than 1 argument means its a hash of things, and it might be setting a parser, producer, or datasource (this key is named "filename" or "file" if it's a file, or "data" for a SCALAR reference. # As above, parse /path/to/datafile, but with different producers for my $prod ("MySQL", "XML", "Sybase") { print $tr->translate( producer => $prod, filename => "/path/to/datafile", ); } # The filename hash key could also be: datasource => \$data, You get the idea. =back =head2 filename, data Using the C method, the filename of the data to be parsed can be set. This method can be used in conjunction with the C method, below. If both the C and C methods are invoked as mutators, the data set in the C method is used. $tr->filename("/my/data/files/create.sql"); or: my $create_script = do { local $/; open CREATE, "/my/data/files/create.sql" or die $!; ; }; $tr->data(\$create_script); C takes a string, which is interpreted as a filename. C takes a reference to a string, which is used as the data to be parsed. If a filename is set, then that file is opened and read when the C method is called, as long as the data instance variable is not set. =head2 schema Returns the SQL::Translator::Schema object. =head2 trace Turns on/off the tracing option of Parse::RecDescent. =head2 validate Whether or not to validate the schema object after parsing and before producing. =head2 version Returns the version of the SQL::Translator release. =head1 AUTHORS See the included AUTHORS file: L =head1 GETTING HELP/SUPPORT If you are stuck with a problem or have doubts about a particular approach do not hesitate to contact us via any of the following options (the list is sorted by "fastest response time"): =over =item * IRC: irc.perl.org#sql-translator =for html (click for instant chatroom login) =item * Mailing list: L =item * RT Bug Tracker: L =back =head1 HOW TO CONTRIBUTE Contributions are always welcome, in all usable forms (we especially welcome documentation improvements). The delivery methods include git- or unified-diff formatted patches, GitHub pull requests, or plain bug reports either via RT or the Mailing list. Contributors are generally granted access to the official repository after their first several patches pass successful review. Don't hesitate to L us with any further questions you may have. This project is maintained in a git repository. The code and related tools are accessible at the following locations: =over =item * Official repo: L =item * Official gitweb: L =item * GitHub mirror: L =item * Authorized committers: L =item * Travis-CI log: L =for html ↪ Stable branch CI status: =back =head1 COPYRIGHT Copyright 2012 the SQL::Translator authors, as listed in L. =head1 LICENSE This library is free software and may be distributed under the same terms as Perl 5 itself. =head1 PRAISE If you find this module useful, please use L to rate it. =head1 SEE ALSO L, L, L, L, L, L, L, L, L. SQL-Translator-1.62/lib/SQL/Translator/0000755000000000000000000000000013727667056017607 5ustar00rootroot00000000000000SQL-Translator-1.62/lib/SQL/Translator/Producer/0000755000000000000000000000000013727667056021372 5ustar00rootroot00000000000000SQL-Translator-1.62/lib/SQL/Translator/Producer/ClassDBI.pm0000644000000000000000000003366413727666430023323 0ustar00rootroot00000000000000package SQL::Translator::Producer::ClassDBI; use strict; use warnings; our $DEBUG; our $VERSION = '1.62'; $DEBUG = 1 unless defined $DEBUG; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); use Data::Dumper; my %CDBI_auto_pkgs = ( MySQL => 'mysql', PostgreSQL => 'Pg', Oracle => 'Oracle', ); sub produce { my $t = shift; local $DEBUG = $t->debug; my $no_comments = $t->no_comments; my $schema = $t->schema; my $args = $t->producer_args; my @create; if ( my $fmt = $args->{'format_pkg_name'} ) { $t->format_package_name( $fmt ); } if ( my $fmt = $args->{'format_fk_name'} ) { $t->format_fk_name( $fmt ); } my $db_user = $args->{'db_user'} || ''; my $db_pass = $args->{'db_password'} || ''; my $main_pkg_name = $args->{'package_name'} || # $args->{'main_pkg_name'} || # keep this? undocumented $t->format_package_name('DBI'); my $header = header_comment( __PACKAGE__, "# " ); my $parser_type = ( split /::/, $t->parser_type )[-1]; my $from = $CDBI_auto_pkgs{$parser_type} || ''; my $dsn = $args->{'dsn'} || sprintf( 'dbi:%s:_', $CDBI_auto_pkgs{ $parser_type } ? $CDBI_auto_pkgs{ $parser_type } : $parser_type ); my $sep = '# ' . '-' x 67; # # Identify "link tables" (have only PK and FK fields). # my %linkable; my %linktable; for my $table ( $schema->get_tables ) { debug("PKG: Table = ", $table->name, "\n"); my $is_link = 1; for my $field ( $table->get_fields ) { unless ( $field->is_primary_key or $field->is_foreign_key ) { $is_link = 0; last; } } next unless $is_link; foreach my $left ( $table->get_fields ) { next unless $left->is_foreign_key; my $lfk = $left->foreign_key_reference or next; my $lr_table = $schema->get_table( $lfk->reference_table ) or next; my $lr_field_name = ( $lfk->reference_fields )[0]; my $lr_field = $lr_table->get_field($lr_field_name); next unless $lr_field->is_primary_key; foreach my $right ( $table->get_fields ) { next if $left->name eq $right->name; my $rfk = $right->foreign_key_reference or next; my $rr_table = $schema->get_table( $rfk->reference_table ) or next; my $rr_field_name = ( $rfk->reference_fields )[0]; my $rr_field = $rr_table->get_field($rr_field_name); next unless $rr_field->is_primary_key; $linkable{ $lr_table->name }{ $rr_table->name } = $table; $linkable{ $rr_table->name }{ $lr_table->name } = $table; $linktable{ $table->name } = $table; } } } # # Iterate over all tables # my ( %packages, $order ); for my $table ( $schema->get_tables ) { my $table_name = $table->name or next; my $table_pkg_name = join '::', $main_pkg_name, $t->format_package_name($table_name); $packages{ $table_pkg_name } = { order => ++$order, pkg_name => $table_pkg_name, base => $main_pkg_name, table => $table_name, }; # # Primary key may have a different accessor method name # # if ( my $constraint = $table->primary_key ) { # my $field = ( $constraint->fields )[0]; # $packages{ $table_pkg_name }{'_columns_primary'} = $field; # # if ( my $pk_xform = $t->format_pk_name ) { # my $pk_name = $pk_xform->( $table_pkg_name, $field ); # # $packages{$table_pkg_name}{'pk_accessor'} = # "#\n# Primary key accessor\n#\n" # . "sub $pk_name {\n shift->$field\n}\n\n"; # } # } my $is_data = 0; foreach my $field ( $table->get_fields ) { if ( !$field->is_foreign_key and !$field->is_primary_key ) { push @{ $packages{$table_pkg_name}{'_columns_essential'} }, $field->name; $is_data++; } elsif ( !$field->is_primary_key ) { push @{ $packages{$table_pkg_name}{'_columns_others'} }, $field->name; } } my %linked; if ($is_data) { foreach my $link ( keys %{ $linkable{$table_name} } ) { my $linkmethodname; if ( my $fk_xform = $t->format_fk_name ) { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE $linkmethodname = $fk_xform->( $linkable{ $table_name }{ $link }->name, ( $schema->get_table( $link )->primary_key->fields )[0] ) . 's'; } else { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE $linkmethodname = $linkable{ $table_name }{ $link }->name . '_' . ( $schema->get_table( $link )->primary_key->fields )[0] . 's'; } my @rk_fields = (); my @lk_fields = (); foreach my $field ( $linkable{$table_name}{$link}->get_fields ) { next unless $field->is_foreign_key; next unless ( $field->foreign_key_reference->reference_table eq $table_name || $field->foreign_key_reference->reference_table eq $link ); push @lk_fields, ( $field->foreign_key_reference->reference_fields )[0] if $field->foreign_key_reference->reference_table eq $link; push @rk_fields, $field->name if $field->foreign_key_reference->reference_table eq $table_name; } # # If one possible traversal via link table. # if ( scalar(@rk_fields) == 1 and scalar(@lk_fields) == 1 ) { foreach my $rk_field (@rk_fields) { push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkmethodname . " { my \$self = shift; " . "return map \$_->" . ( $schema->get_table($link)->primary_key->fields ) [0] . ", \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . " }\n\n"; } # # Else there is more than one way to traverse it. # ack! Let's treat these types of link tables as # a many-to-one (easier) # # NOTE: we need to rethink the link method name, # as the cardinality has shifted on us. # } elsif ( scalar(@rk_fields) == 1 ) { foreach my $rk_field (@rk_fields) { # # ADD CALLBACK FOR PLURALIZATION MANGLING HERE # push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkable{$table_name}{$link}->name . "s { my \$self = shift; return \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . "(\@_) }\n\n"; } } elsif ( scalar(@lk_fields) == 1 ) { # # These will be taken care of on the other end... # } else { # # Many many many. Need multiple iterations here, # data structure revision to handle N FK sources. # This code has not been tested and likely doesn't # work here. # foreach my $rk_field (@rk_fields) { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkable{$table_name}{$link}->name . "_" . $rk_field . "s { my \$self = shift; return \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . "(\@_) }\n\n"; } } } } # # Use foreign keys to set up "has_a/has_many" relationships. # foreach my $field ( $table->get_fields ) { if ( $field->is_foreign_key ) { my $table_name = $table->name; my $field_name = $field->name; # my $fk_method = $t->format_fk_name( $table_name, $field_name ); my $fk_method = join('::', $table_pkg_name, $t->format_fk_name( $table_name, $field_name ) ); my $fk = $field->foreign_key_reference; my $ref_table = $fk->reference_table; my $ref_pkg = $t->format_package_name($ref_table); my $ref_field = ( $fk->reference_fields )[0]; # my $fk_method = join('::', # $table_pkg_name, $t->format_fk_name( $ref_table ) # ); push @{ $packages{$table_pkg_name}{'has_a'} }, "$table_pkg_name->has_a(\n" . " $field_name => '$ref_pkg'\n);\n\n" . "sub $fk_method {\n" . " return shift->$field_name\n}\n\n" ; # if there weren't M-M relationships via the has_many # being set up here, create nice pluralized method alias # rather for user as alt. to ugly tablename_fieldname name # # if ( !$packages{$ref_pkg}{'has_many'}{$table_name} ) { # # # # ADD CALLBACK FOR PLURALIZATION MANGLING HERE # # # push @{ $packages{$ref_pkg}{'has_many'}{$table_name} }, # "sub ${table_name}s {\n " . # "return shift->$table_name\_$field_name\n}\n\n"; # # else ugly # } # else { # } push @{ $packages{$ref_pkg}{'has_many'}{$table_name} }, "$ref_pkg->has_many(\n '${table_name}_${field_name}', " . "'$table_pkg_name' => '$field_name'\n);\n\n"; } } } # # Now build up text of package. # my $base_pkg = sprintf( 'Class::DBI%s', $from ? "::$from" : '' ); push @create, join ( "\n", "package $main_pkg_name;\n", $header, "use strict;", "use base '$base_pkg';\n", "$main_pkg_name->set_db('Main', '$dsn', '$db_user', '$db_pass');\n\n", ); for my $pkg_name ( sort { $packages{ $a }{'order'} <=> $packages{ $b }{'order'} } keys %packages ) { my $pkg = $packages{$pkg_name} or next; next unless $pkg->{'pkg_name'}; push @create, join ( "\n", $sep, "package " . $pkg->{'pkg_name'} . ";", "use base '" . $pkg->{'base'} . "';", "use Class::DBI::Pager;\n\n", ); if ( $from ) { push @create, join('', $pkg->{'pkg_name'}, "->set_up_table('", $pkg->{'table'}, "');\n\n" ); } else { my $table = $schema->get_table( $pkg->{'table'} ); my @field_names = map { $_->name } $table->get_fields; push @create, join("\n", $pkg_name."->table('".$pkg->{'table'}."');\n", $pkg_name."->columns(All => qw/". join(' ', @field_names)."/);\n\n", ); } push @create, "\n"; if ( my $pk = $pkg->{'pk_accessor'} ) { push @create, $pk; } if ( my @has_a = @{ $pkg->{'has_a'} || [] } ) { push @create, $_ for @has_a; } foreach my $has_many_key ( keys %{ $pkg->{'has_many'} } ) { if ( my @has_many = @{ $pkg->{'has_many'}{$has_many_key} || [] } ) { push @create, $_ for @has_many; } } } push @create, "1;\n"; return wantarray ? @create : join('', @create); } 1; =pod =head1 NAME SQL::Translator::Producer::ClassDBI - create Class::DBI classes from schema =head1 SYNOPSIS Use this producer as you would any other from SQL::Translator. See L for details. This package uses SQL::Translator's formatting methods format_package_name(), format_pk_name(), format_fk_name(), and format_table_name() as it creates classes, one per table in the schema provided. An additional base class is also created for database connectivity configuration. See L for details on how this works. =head1 AUTHORS Allen Day Eallenday@ucla.eduE, Ying Zhang Ezyolive@yahoo.comE, Ken Youens-Clark Ekclark@cpan.orgE. SQL-Translator-1.62/lib/SQL/Translator/Producer/Diagram.pm0000644000000000000000000004775113727666430023305 0ustar00rootroot00000000000000package SQL::Translator::Producer::Diagram; =head1 NAME SQL::Translator::Producer::Diagram - ER diagram producer for SQL::Translator =head1 SYNOPSIS Use via SQL::Translator: use SQL::Translator; my $t = SQL::Translator->new( from => 'MySQL', to => 'Diagram', producer_args => { # All args are optional out_file => 'schema.png',# if not provided will return from translate() output_type => 'png', # is default or 'jpeg' title => 'My Schema', # default is filename font_size => 'medium', # is default or 'small,' 'large' imap_file => '', # filename to write image map coords imap_url => '', # base URL for image map gutter => 30 # is default, px distance b/w cols num_columns => 5, # the number of columns no_lines => 1, # do not draw lines to show FKs add_color => 1, # give it some color show_fk_only => 1, # show only fields used in FKs join_pk_only => 1, # use only primary keys to figure PKs natural_join => 1, # intuit FKs if not defined skip_fields => [...], # list* of field names to exclude skip_tables => [...], # list* of table names to exclude skip_tables_like => [...], # list* of regexen to exclude tables } ) or die SQL::Translator->error; $t->translate; * "list" can be either an array-ref or a comma-separated string =cut use strict; use warnings; use GD; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug); our $DEBUG; our $VERSION = '1.62'; $DEBUG = 0 unless defined $DEBUG; use constant VALID_FONT_SIZE => { small => 1, medium => 1, large => 1, huge => 1, }; use constant VALID_IMAGE_TYPE => { png => 1, jpeg => 1, }; sub produce { my $t = shift; my $schema = $t->schema; my $args = $t->producer_args; local $DEBUG = $t->debug; debug("Schema =\n", Dumper( $schema )) if $DEBUG; debug("Producer args =\n", Dumper( $args )) if $DEBUG; my $out_file = $args->{'out_file'} || ''; my $output_type = $args->{'output_type'} || 'png'; my $title = $args->{'title'} || $t->filename; my $font_size = $args->{'font_size'} || 'medium'; my $imap_file = $args->{'imap_file'} || ''; my $imap_url = $args->{'imap_url'} || ''; my $gutter = $args->{'gutter'} || 30; # distance b/w columns my $num_columns = $args->{'num_columns'} || $args->{'no_columns'} || ''; my $no_lines = $args->{'no_lines'}; my $add_color = $args->{'add_color'}; my $show_fk_only = $args->{'show_fk_only'}; my $join_pk_only = $args->{'join_pk_only'}; my $natural_join = $args->{'natural_join'} || $join_pk_only; my %skip_field = map { $_, 1 } ( ref $args->{'skip_fields'} eq 'ARRAY' ? @{ $args->{'skip_fields'} } : split ( /\s*,\s*/, $args->{'skip_fields'}||'' ) ); my %skip_table = map { $_, 1 } ( ref $args->{'skip_tables'} eq 'ARRAY' ? @{ $args->{'skip_tables'} } : split ( /\s*,\s*/, $args->{'skip_tables'}||'' ) ); my @skip_tables_like = map { qr/$_/ } ( ref $args->{'skip_tables_like'} eq 'ARRAY' ? @{ $args->{'skip_tables_like'} } : split ( /\s*,\s*/, $args->{'skip_tables_like'}||'' ) ); my @table_names; if ( $natural_join ) { $schema->make_natural_joins( join_pk_only => $join_pk_only, skip_fields => $args->{'skip_fields'}, ); my $g = $schema->as_graph_pm; my $d = Graph::Traversal::DFS->new( $g, next_alphabetic => 1 ); $d->preorder; @table_names = $d->dfs; } else { @table_names = map { $_->name } $schema->get_tables; } die "Invalid image type '$output_type'" unless VALID_IMAGE_TYPE->{ $output_type }; die "Invalid font size '$font_size'" unless VALID_FONT_SIZE->{ $font_size }; # # Layout the image. # my $font = $font_size eq 'small' ? gdTinyFont : $font_size eq 'medium' ? gdSmallFont : $font_size eq 'large' ? gdLargeFont : gdGiantFont; my $num_tables = scalar @table_names; $num_columns = 0 unless $num_columns =~ /^\d+$/; $num_columns ||= sprintf( "%.0f", sqrt( $num_tables ) + .5 ); $num_columns ||= .5; my $no_per_col = sprintf( "%.0f", $num_tables/$num_columns + .5 ); my @shapes; my ( $max_x, $max_y ); # the furthest x and y used my $orig_y = 40; # used to reset y for each column my ( $x, $y ) = (30,$orig_y); # where to start my $cur_col = 1; # the current column my $no_this_col = 0; # number of tables in current column my $this_col_x = $x; # current column's x my %nj_registry; # for locations of fields for natural joins my @fk_registry; # for locations of fields for foreign keys my %table_x; # for max x of each table my $field_no; # counter to give distinct no. to each field my %coords; # holds fields coordinates my @imap_coords; # for making clickable image map my %legend; TABLE: for my $table_name ( @table_names ) { my $table = $schema->get_table( $table_name ); if ( @skip_tables_like or keys %skip_table ) { next TABLE if $skip_table{ $table_name }; for my $regex ( @skip_tables_like ) { next TABLE if $table_name =~ $regex; } } my $top = $y; push @shapes, [ 'string', $font, $this_col_x, $y, $table_name, 'black' ]; $y += $font->height + 2; my $below_table_name = $y; $y += 2; my $this_max_x = $this_col_x + ($font->width * length($table_name)); debug("Processing table '$table_name'"); my @fields = $table->get_fields; debug("Fields = ", join(', ', map { $_->name } @fields)) if $DEBUG; my ( @fld_desc, $max_name, $max_desc ); for my $f ( @fields ) { my $name = $f->name or next; my $is_pk = $f->is_primary_key; my @attr; # # Decide if we should skip this field. # if ( $show_fk_only ) { next unless $is_pk || $f->is_foreign_key; } if ( $is_pk ) { push @attr, 'PK'; $legend{'Primary key'} = '[PK]'; } if ( $f->is_unique ) { push @attr, 'U'; $legend{'Unique constraint'} = '[U]'; } if ( $f->is_foreign_key ) { push @attr, 'FK'; $legend{'Foreign Key'} = '[FK]'; } my $attr = ''; if ( @attr ) { $attr .= '[' . join(', ', @attr) . ']'; } my $desc = $f->data_type; $desc .= '('.$f->size.')' if $f->size && $f->data_type =~ /^(VAR)?CHAR2?$/i; my $nlen = length $name; my $dlen = length $desc; $max_name = $nlen if $nlen > ($max_name||0); $max_desc = $dlen if $dlen > ($max_desc||0); push @fld_desc, [ $name, $desc, $f->{'name'}, $is_pk, $attr ]; } $max_name += 2; $max_desc += 2; for my $fld_desc ( @fld_desc ) { my ( $name, $desc, $orig_name, $is_pk, $attr ) = @$fld_desc; my $diff1 = $max_name - length $name; my $diff2 = $max_desc - length $desc; $name .= ' ' x $diff1; $desc .= ' ' x $diff2; $desc = $name . $desc . $attr; push @shapes, [ 'string', $font, $this_col_x, $y, $desc, 'black' ]; $y += $font->height + 2; my $length = $this_col_x + ( $font->width * length( $desc ) ); $this_max_x = $length if $length > $this_max_x; my $constraints = $table->{'fields'}{ $orig_name }{'constraints'}; if ( $natural_join && !$skip_field{ $orig_name } ) { push @{ $nj_registry{ $orig_name } }, $table_name; } my $y_link = $y - $font->height/2; $coords{ $table_name }{ $orig_name }{'coords'} = { left => [ $this_col_x - 6, $y_link ], right => [ $length + 2 , $y_link ], table => $table_name, field_no => ++$field_no, is_pk => $is_pk, fld_name => $orig_name, }; push @imap_coords, [ $imap_url."#$table_name-$orig_name", $this_col_x, $y - $font->height, $length, $y_link, ]; } unless ( $natural_join ) { for my $c ( $table->get_constraints ) { next unless $c->type eq FOREIGN_KEY; my $fk_table = $c->reference_table or next; for my $field_name ( $c->fields ) { for my $fk_field ( $c->reference_fields ) { next unless defined $schema->get_table( $fk_table ); push @fk_registry, [ [ $fk_table , $fk_field ], [ $table_name, $field_name ], ]; } } } } $this_max_x += 5; $table_x{ $table_name } = $this_max_x + 5; push @shapes, [ 'line', $this_col_x - 5, $below_table_name, $this_max_x, $below_table_name, 'black' ]; my @bounds = ( $this_col_x - 5, $top - 5, $this_max_x, $y + 5 ); if ( $add_color ) { unshift @shapes, [ 'filledRectangle', $bounds[0], $bounds[1], $this_max_x, $below_table_name, 'khaki' ]; unshift @shapes, [ 'filledRectangle', @bounds, 'white' ]; } push @imap_coords, [ $imap_url."#$table_name", $bounds[0], $bounds[1], $this_max_x, $below_table_name, ]; push @shapes, [ 'rectangle', @bounds, 'black' ]; $max_x = $this_max_x if $this_max_x > ($max_x||0); $y += 25; if ( ++$no_this_col == $no_per_col ) {# if we've filled up this column $cur_col++; # up the column number $no_this_col = 0; # reset the number of tables $max_x += $gutter; # push the x over for next column $this_col_x = $max_x; # remember the max x for this col $max_y = $y if $y > ($max_y||0); # note the max y $y = $orig_y; # reset the y for next column } } # # Connect the lines. # my %horz_taken; my %done; unless ( $no_lines ) { my @position_bunches; if ( $natural_join ) { for my $field_name ( keys %nj_registry ) { my @positions; my @table_names = @{ $nj_registry{ $field_name } || [] } or next; next if scalar @table_names == 1; for my $table_name ( @table_names ) { push @positions, $coords{ $table_name }{ $field_name }{'coords'}; } push @position_bunches, [ @positions ]; } } else { for my $pair ( @fk_registry ) { push @position_bunches, [ $coords{$pair->[0][0]}{ $pair->[0][1] }{'coords'}, $coords{$pair->[1][0]}{ $pair->[1][1] }{'coords'}, ]; } } my $is_directed = $natural_join ? 0 : 1; for my $bunch ( @position_bunches ) { my @positions = @$bunch; for my $i ( 0 .. $#positions ) { my $pos1 = $positions[ $i ]; my ( $ax, $ay ) = @{ $pos1->{'left'} || [] } or next; my ( $bx, $by ) = @{ $pos1->{'right'} || [] } or next; my $table1 = $pos1->{'table'}; my $fno1 = $pos1->{'field_no'}; my $is_pk = $pos1->{'is_pk'}; next if $join_pk_only and !$is_pk; for my $j ( 0 .. $#positions ) { my $pos2 = $positions[ $j ]; my ( $cx, $cy ) = @{ $pos2->{'left'} || [] } or next; my ( $dx, $dy ) = @{ $pos2->{'right'} || [] } or next; my $table2 = $pos2->{'table'}; my $fno2 = $pos2->{'field_no'}; next if $table1 eq $table2; next if $done{ $fno1 }{ $fno2 }; next if $fno1 == $fno2; my @distances = (); push @distances, [ abs ( $ax - $cx ) + abs ( $ay - $cy ), [ $ax, $ay, $cx, $cy ], [ 'left', 'left' ] ]; push @distances, [ abs ( $ax - $dx ) + abs ( $ay - $dy ), [ $ax, $ay, $dx, $dy ], [ 'left', 'right' ], ]; push @distances, [ abs ( $bx - $cx ) + abs ( $by - $cy ), [ $bx, $by, $cx, $cy ], [ 'right', 'left' ], ]; push @distances, [ abs ( $bx - $dx ) + abs ( $by - $dy ), [ $bx, $by, $dx, $dy ], [ 'right', 'right' ], ]; @distances = sort { $a->[0] <=> $b->[0] } @distances; my $shortest = $distances[0]; my ( $x1, $y1, $x2, $y2 ) = @{ $shortest->[1] }; my ( $side1, $side2 ) = @{ $shortest->[2] }; my ( $start, $end ); my $offset = 9; my $col1_right = $table_x{ $table1 }; my $col2_right = $table_x{ $table2 }; my $diff = 0; if ( $x1 == $x2 ) { while ( $horz_taken{ $x1 + $diff } ) { $diff = $side1 eq 'left' ? $diff - 2 : $diff + 2; } $horz_taken{ $x1 + $diff } = 1; } if ( $side1 eq 'left' ) { $start = $x1 - $offset + $diff; } else { $start = $col1_right + $diff; } if ( $side2 eq 'left' ) { $end = $x2 - $offset + $diff; } else { $end = $col2_right + $diff; } push @shapes, [ 'line', $x1, $y1, $start, $y1, 'cadetblue' ]; push @shapes, [ 'line', $start, $y1, $end, $y2, 'cadetblue' ]; push @shapes, [ 'line', $end, $y2, $x2, $y2, 'cadetblue' ]; if ( $is_directed ) { if ( $side1 eq 'right' && $side2 eq 'left' || $side1 eq 'left' && $side2 eq 'left' ) { push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2 - 3, $y2 +3, 'cadetblue' ]; } else { push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2 + 3, $y2 +3, 'cadetblue' ]; } } $done{ $fno1 }{ $fno2 } = 1; $done{ $fno2 }{ $fno1 } = 1; } } } } # # Add the title, legend and signature. # my $large_font = gdLargeFont; my $title_len = $large_font->width * length $title; push @shapes, [ 'string', $large_font, $max_x/2 - $title_len/2, 10, $title, 'black' ]; if ( %legend ) { $max_y += 5; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, 'Legend', 'black' ]; $max_y += $font->height + 4; my $longest; for my $len ( map { length $_ } values %legend ) { $longest = $len if $len > ($longest||0); } $longest += 2; while ( my ( $key, $shape ) = each %legend ) { my $space = $longest - length $shape; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, join( '', $shape, ' ' x $space, $key ), 'black' ]; $max_y += $font->height + 4; } } my $sig = 'Created by SQL::Translator ' . $t->version; my $sig_len = $font->width * length $sig; push @shapes, [ 'string', $font, $max_x - $sig_len, $max_y - $font->height - 4, $sig, 'black' ]; # # Render the image. # my $gd = GD::Image->new( $max_x + 30, $max_y ); unless ( $gd->can( $output_type ) ) { die "GD can't create images of type '$output_type'\n"; } my %colors = map { $_->[0], $gd->colorAllocate( @{$_->[1]} ) } ( [ white => [ 255, 255, 255 ] ], [ beige => [ 245, 245, 220 ] ], [ black => [ 0, 0, 0 ] ], [ lightblue => [ 173, 216, 230 ] ], [ cadetblue => [ 95, 158, 160 ] ], [ lightgoldenrodyellow => [ 250, 250, 210 ] ], [ khaki => [ 240, 230, 140 ] ], [ red => [ 255, 0, 0 ] ], ); $gd->interlaced( 'true' ); my $background_color = $add_color ? 'lightgoldenrodyellow' : 'white'; $gd->fill( 0, 0, $colors{ $background_color } ); for my $shape ( @shapes ) { my $method = shift @$shape; my $color = pop @$shape; $gd->$method( @$shape, $colors{ $color } ); } # # Make image map. # debug("imap file = '$imap_file'"); if ( $imap_file && @imap_coords ) { open my $fh, '>', $imap_file or die "Can't write '$imap_file': $!\n"; print $fh qq[\n]. qq[\n]; for my $rec ( @imap_coords ) { my $href = shift @$rec; print $fh q[\n]; } print $fh qq[]; close $fh; } # # Print the image. # if ( $out_file ) { open my $fh, '>', $out_file or die "Can't write '$out_file': $!\n"; binmode $fh; print $fh $gd->$output_type; close $fh; } else { return $gd->$output_type; } } 1; =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.62/lib/SQL/Translator/Producer/Oracle.pm0000644000000000000000000006372613727666430023146 0ustar00rootroot00000000000000package SQL::Translator::Producer::Oracle; =head1 NAME SQL::Translator::Producer::Oracle - Oracle SQL producer =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'Oracle' ); print $translator->translate( $file ); =head1 DESCRIPTION Creates an SQL DDL suitable for Oracle. =head1 producer_args =over =item delay_constraints This option remove the primary key and other key constraints from the CREATE TABLE statement and adds ALTER TABLEs at the end with it. =item quote_field_names Controls whether quotes are being used around column names in generated DDL. =item quote_table_names Controls whether quotes are being used around table, sequence and trigger names in generated DDL. =back =head1 NOTES =head2 Autoincremental primary keys This producer uses sequences and triggers to autoincrement primary key columns, if necessary. SQLPlus and DBI expect a slightly different syntax of CREATE TRIGGER statement. You might have noticed that this producer returns a scalar containing all statements concatenated by newlines or an array of single statements depending on the context (scalar, array) it has been called in. SQLPlus expects following trigger syntax: CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; / Whereas if you want to create the same trigger using L, you need to omit the last slash: my $dbh = DBI->connect('dbi:Oracle:mysid', 'scott', 'tiger'); $dbh->do(" CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; "); If you call this producer in array context, we expect you want to process the returned array of statements using L like L does. To get this working we removed the slash in those statements in version 0.09002 of L when called in array context. In scalar context the slash will be still there to ensure compatibility with SQLPlus. =cut use strict; use warnings; our ( $DEBUG, $WARN ); our $VERSION = '1.62'; $DEBUG = 0 unless defined $DEBUG; use base 'SQL::Translator::Producer'; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(header_comment); my %translate = ( # # MySQL types # bigint => 'number', double => 'float', decimal => 'number', float => 'float', int => 'number', integer => 'number', mediumint => 'number', smallint => 'number', tinyint => 'number', char => 'char', varchar => 'varchar2', tinyblob => 'blob', blob => 'blob', mediumblob => 'blob', longblob => 'blob', tinytext => 'varchar2', text => 'clob', longtext => 'clob', mediumtext => 'clob', enum => 'varchar2', set => 'varchar2', date => 'date', datetime => 'date', time => 'date', timestamp => 'date', year => 'date', # # PostgreSQL types # numeric => 'number', 'double precision' => 'number', serial => 'number', bigserial => 'number', money => 'number', character => 'char', 'character varying' => 'varchar2', bytea => 'BLOB', interval => 'number', boolean => 'number', point => 'number', line => 'number', lseg => 'number', box => 'number', path => 'number', polygon => 'number', circle => 'number', cidr => 'number', inet => 'varchar2', macaddr => 'varchar2', bit => 'number', 'bit varying' => 'number', # # Oracle types # number => 'number', varchar2 => 'varchar2', long => 'clob', ); # # Oracle 8/9 max size of data types from: # http://www.ss64.com/orasyntax/datatypes.html # my %max_size = ( char => 2000, float => 126, nchar => 2000, nvarchar2 => 4000, number => [ 38, 127 ], raw => 2000, varchar => 4000, # only synonym for varchar2 varchar2 => 4000, ); my $max_id_length = 30; my %used_identifiers = (); my %global_names; my %truncated; # Quote used to escape table, field, sequence and trigger names my $quote_char = '"'; sub produce { my $translator = shift; $DEBUG = $translator->debug; $WARN = $translator->show_warnings || 0; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $oracle_version = $translator->producer_args->{oracle_version} || 0; my $delay_constraints = $translator->producer_args->{delay_constraints}; my ($output, $create, @table_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs); $create .= header_comment unless ($no_comments); my $qt = 1 if $translator->quote_table_names; my $qf = 1 if $translator->quote_field_names; if ( $translator->parser_type =~ /mysql/i ) { $create .= "-- We assume that default NLS_DATE_FORMAT has been changed\n". "-- but we set it here anyway to be self-consistent.\n" unless $no_comments; $create .= "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\n\n"; } for my $table ( $schema->get_tables ) { my ( $table_def, $fk_def, $trigger_def, $index_def, $constraint_def ) = create_table( $table, { add_drop_table => $add_drop_table, show_warnings => $WARN, no_comments => $no_comments, delay_constraints => $delay_constraints, quote_table_names => $qt, quote_field_names => $qf, } ); push @table_defs, @$table_def; push @fk_defs, @$fk_def; push @trigger_defs, @$trigger_def; push @index_defs, @$index_def; push @constraint_defs, @$constraint_def; } my (@view_defs); foreach my $view ( $schema->get_views ) { my ( $view_def ) = create_view( $view, { add_drop_view => $add_drop_table, quote_table_names => $qt, } ); push @view_defs, @$view_def; } if (wantarray) { return defined $create ? $create : (), @table_defs, @view_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs; } else { $create .= join (";\n\n", @table_defs, @view_defs, @fk_defs, @index_defs, @constraint_defs); $create .= ";\n\n"; # If wantarray is not set we have to add "/" in this statement # DBI->do() needs them omitted # triggers may NOT end with a semicolon but a "/" instead $create .= "$_/\n\n" for @trigger_defs; return $create; } } sub create_table { my ($table, $options) = @_; my $qt = $options->{quote_table_names}; my $qf = $options->{quote_field_names}; my $table_name = $table->name; my $table_name_q = quote($table_name,$qt); my $item = ''; my $drop; my (@create, @field_defs, @constraint_defs, @fk_defs, @trigger_defs); push @create, "--\n-- Table: $table_name\n--" unless $options->{no_comments}; push @create, qq[DROP TABLE $table_name_q CASCADE CONSTRAINTS] if $options->{add_drop_table}; my ( %field_name_scope, @field_comments ); for my $field ( $table->get_fields ) { my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($field, $options, \%field_name_scope); push @create, @$field_create if ref $field_create; push @field_defs, @$field_defs if ref $field_defs; push @trigger_defs, @$trigger_defs if ref $trigger_defs; push @field_comments, @$field_comments if ref $field_comments; } # # Table options # my @table_options; for my $opt ( $table->options ) { if ( ref $opt eq 'HASH' ) { my ( $key, $value ) = each %$opt; if ( ref $value eq 'ARRAY' ) { push @table_options, "$key\n(\n". join ("\n", map { " $_->[0]\t$_->[1]" } map { [ each %$_ ] } @$value )."\n)"; } elsif ( !defined $value ) { push @table_options, $key; } else { push @table_options, "$key $value"; } } } # # Table constraints # for my $c ( $table->get_constraints ) { my $name = $c->name || ''; my @fields = map { quote($_,$qf) } $c->fields; my @rfields = map { quote($_,$qf) } $c->reference_fields; next if !@fields && $c->type ne CHECK_C; if ( $c->type eq PRIMARY_KEY ) { # create a name if delay_constraints $name ||= mk_name( $table_name, 'pk' ) if $options->{delay_constraints}; $name = quote($name,$qf); push @constraint_defs, ($name ? "CONSTRAINT $name " : '') . 'PRIMARY KEY (' . join( ', ', @fields ) . ')'; } elsif ( $c->type eq UNIQUE ) { # Don't create UNIQUE constraints identical to the primary key if ( my $pk = $table->primary_key ) { my $u_fields = join(":", @fields); my $pk_fields = join(":", $pk->fields); next if $u_fields eq $pk_fields; } if ($name) { # Force prepend of table_name as ORACLE doesn't allow duplicate # CONSTRAINT names even for different tables (ORA-02264) $name = mk_name( "${table_name}_$name", 'u' ) unless $name =~ /^$table_name/; } else { $name = mk_name( $table_name, 'u' ); } $name = quote($name, $qf); for my $f ( $c->fields ) { my $field_def = $table->get_field( $f ) or next; my $dtype = $translate{ ref $field_def->data_type eq "ARRAY" ? $field_def->data_type->[0] : $field_def->data_type} or next; if ( $WARN && $dtype =~ /clob/i ) { warn "Oracle will not allow UNIQUE constraints on " . "CLOB field '" . $field_def->table->name . '.' . $field_def->name . ".'\n" } } push @constraint_defs, "CONSTRAINT $name UNIQUE " . '(' . join( ', ', @fields ) . ')'; } elsif ( $c->type eq CHECK_C ) { $name ||= mk_name( $name || $table_name, 'ck' ); $name = quote($name, $qf); my $expression = $c->expression || ''; push @constraint_defs, "CONSTRAINT $name CHECK ($expression)"; } elsif ( $c->type eq FOREIGN_KEY ) { $name = mk_name( join('_', $table_name, $c->fields). '_fk' ); $name = quote($name, $qf); my $on_delete = uc ($c->on_delete || ''); my $def = "CONSTRAINT $name FOREIGN KEY "; if ( @fields ) { $def .= '(' . join( ', ', @fields ) . ')'; } my $ref_table = quote($c->reference_table,$qt); $def .= " REFERENCES $ref_table"; if ( @rfields ) { $def .= ' (' . join( ', ', @rfields ) . ')'; } if ( $c->match_type ) { $def .= ' MATCH ' . ( $c->match_type =~ /full/i ) ? 'FULL' : 'PARTIAL'; } if ( $on_delete && $on_delete ne "RESTRICT") { $def .= ' ON DELETE '.$c->on_delete; } # disabled by plu 2007-12-29 - doesn't exist for oracle #if ( $c->on_update ) { # $def .= ' ON UPDATE '. $c->on_update; #} push @fk_defs, sprintf("ALTER TABLE %s ADD %s", $table_name_q, $def); } } # # Index Declarations # my @index_defs = (); for my $index ( $table->get_indices ) { my $index_name = $index->name || ''; my $index_type = $index->type || NORMAL; my @fields = map { quote($_, $qf) } $index->fields; next unless @fields; my @index_options; for my $opt ( $index->options ) { if ( ref $opt eq 'HASH' ) { my ( $key, $value ) = each %$opt; if ( ref $value eq 'ARRAY' ) { push @table_options, "$key\n(\n". join ("\n", map { " $_->[0]\t$_->[1]" } map { [ each %$_ ] } @$value )."\n)"; } elsif ( !defined $value ) { push @index_options, $key; } else { push @index_options, "$key $value"; } } } my $index_options = @index_options ? "\n".join("\n", @index_options) : ''; if ( $index_type eq PRIMARY_KEY ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, 'pk' ); $index_name = quote($index_name, $qf); push @field_defs, 'CONSTRAINT '.$index_name.' PRIMARY KEY '. '(' . join( ', ', @fields ) . ')'; } elsif ( $index_type eq NORMAL ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, $index_name || 'i' ); $index_name = quote($index_name, $qf); push @index_defs, "CREATE INDEX $index_name on $table_name_q (". join( ', ', @fields ). ")$index_options"; } elsif ( $index_type eq UNIQUE ) { $index_name = $index_name ? mk_name( $index_name ) : mk_name( $table_name, $index_name || 'i' ); $index_name = quote($index_name, $qf); push @index_defs, "CREATE UNIQUE INDEX $index_name on $table_name_q (". join( ', ', @fields ). ")$index_options"; } else { warn "Unknown index type ($index_type) on table $table_name.\n" if $WARN; } } if ( my @table_comments = $table->comments ) { for my $comment ( @table_comments ) { next unless $comment; $comment = __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON TABLE $table_name_q is\n $comment" unless $options->{no_comments}; } } my $table_options = @table_options ? "\n".join("\n", @table_options) : ''; push @create, "CREATE TABLE $table_name_q (\n" . join( ",\n", map { " $_" } @field_defs, ($options->{delay_constraints} ? () : @constraint_defs) ) . "\n)$table_options"; @constraint_defs = map { "ALTER TABLE $table_name_q ADD $_" } @constraint_defs; if ( $WARN ) { if ( %truncated ) { warn "Truncated " . keys( %truncated ) . " names:\n"; warn "\t" . join( "\n\t", sort keys %truncated ) . "\n"; } } return \@create, \@fk_defs, \@trigger_defs, \@index_defs, ($options->{delay_constraints} ? \@constraint_defs : []); } sub alter_field { my ($from_field, $to_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($to_field, $options, {}); # Fix ORA-01442 if ($to_field->is_nullable && !$from_field->is_nullable) { die 'Cannot remove NOT NULL from table field'; } elsif (!$from_field->is_nullable && !$to_field->is_nullable) { @$field_defs = map { s/ NOT NULL//; $_} @$field_defs; } my $table_name = quote($to_field->table->name,$qt); return 'ALTER TABLE '.$table_name.' MODIFY ( '.join('', @$field_defs).' )'; } sub add_field { my ($new_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($new_field, $options, {}); my $table_name = quote($new_field->table->name,$qt); my $out = sprintf('ALTER TABLE %s ADD ( %s )', $table_name, join('', @$field_defs)); return $out; } sub create_field { my ($field, $options, $field_name_scope) = @_; my $qf = $options->{quote_field_names}; my $qt = $options->{quote_table_names}; my (@create, @field_defs, @trigger_defs, @field_comments); my $table_name = $field->table->name; my $table_name_q = quote($table_name, $qt); # # Field name # my $field_name = mk_name( $field->name, '', $field_name_scope, 1 ); my $field_name_q = quote($field_name, $qf); my $field_def = quote($field_name, $qf); $field->name( $field_name ); # # Datatype # my $check; my $data_type = lc $field->data_type; my @size = $field->size; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join( ', ', map { __PACKAGE__->_quote_string($_) } @$list ); if ( $data_type eq 'enum' ) { $check = "CHECK ($field_name_q IN ($commalist))"; $data_type = 'varchar2'; } elsif ( $data_type eq 'set' ) { # XXX add a CHECK constraint maybe # (trickier and slower, than enum :) $data_type = 'varchar2'; } else { if (defined $translate{ $data_type }) { if (ref $translate{ $data_type } eq "ARRAY") { ($data_type,$size[0]) = @{$translate{ $data_type }}; } else { $data_type = $translate{ $data_type }; } } $data_type ||= 'varchar2'; } # ensure size is not bigger than max size oracle allows for data type if ( defined $max_size{$data_type} ) { for ( my $i = 0 ; $i < scalar @size ; $i++ ) { my $max = ref( $max_size{$data_type} ) eq 'ARRAY' ? $max_size{$data_type}->[$i] : $max_size{$data_type}; $size[$i] = $max if $size[$i] > $max; } } # # Fixes ORA-02329: column of datatype LOB cannot be # unique or a primary key # if ( $data_type eq 'clob' && $field->is_primary_key ) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a primary key, changing to VARCHAR2\n" if $WARN; } if ( $data_type eq 'clob' && $field->is_unique ) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a unique key, changing to VARCHAR2\n" if $WARN; } # # Fixes ORA-00907: missing right parenthesis # if ( $data_type =~ /(date|clob)/i ) { undef @size; } # # Fixes ORA-00906: missing right parenthesis # if size is 0 or undefined # for (qw/varchar2/) { if ( $data_type =~ /^($_)$/i ) { $size[0] ||= $max_size{$_}; } } $field_def .= " $data_type"; if ( defined $size[0] && $size[0] > 0 ) { $field_def .= '(' . join( ',', @size ) . ')'; } # # Default value # my $default = $field->default_value; if ( defined $default ) { # # Wherein we try to catch a string being used as # a default value for a numerical field. If "true/false," # then sub "1/0," otherwise just test the truthity of the # argument and use that (naive?). # if (ref $default and defined $$default) { $default = $$default; } elsif (ref $default) { $default = 'NULL'; } elsif ( $data_type =~ /^number$/i && $default !~ /^-?\d+$/ && $default !~ m/null/i ) { if ( $default =~ /^true$/i ) { $default = "'1'"; } elsif ( $default =~ /^false$/i ) { $default = "'0'"; } else { $default = $default ? "'1'" : "'0'"; } } elsif ( $data_type =~ /date/ && ( $default eq 'current_timestamp' || $default eq 'now()' ) ) { $default = 'SYSDATE'; } else { $default = $default =~ m/null/i ? 'NULL' : __PACKAGE__->_quote_string($default); } $field_def .= " DEFAULT $default", } # # Not null constraint # unless ( $field->is_nullable ) { $field_def .= ' NOT NULL'; } $field_def .= " $check" if $check; # # Auto_increment # if ( $field->is_auto_increment ) { my $base_name = $table_name . "_". $field_name; my $seq_name = quote(mk_name( $base_name, 'sq' ),$qt); my $trigger_name = quote(mk_name( $base_name, 'ai' ),$qt); push @create, qq[DROP SEQUENCE $seq_name] if $options->{add_drop_table}; push @create, "CREATE SEQUENCE $seq_name"; my $trigger = "CREATE OR REPLACE TRIGGER $trigger_name\n" . "BEFORE INSERT ON $table_name_q\n" . "FOR EACH ROW WHEN (\n" . " new.$field_name_q IS NULL". " OR new.$field_name_q = 0\n". ")\n". "BEGIN\n" . " SELECT $seq_name.nextval\n" . " INTO :new." . $field_name_q."\n" . " FROM dual;\n" . "END;\n"; push @trigger_defs, $trigger; } if ( lc $field->data_type eq 'timestamp' ) { my $base_name = $table_name . "_". $field_name; my $trig_name = quote(mk_name( $base_name, 'ts' ), $qt); my $trigger = "CREATE OR REPLACE TRIGGER $trig_name\n". "BEFORE INSERT OR UPDATE ON $table_name_q\n". "FOR EACH ROW WHEN (new.$field_name_q IS NULL)\n". "BEGIN\n". " SELECT sysdate INTO :new.$field_name_q FROM dual;\n". "END;\n"; push @trigger_defs, $trigger; } push @field_defs, $field_def; if ( my $comment = $field->comments ) { $comment =~ __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON COLUMN $table_name_q.$field_name_q is\n $comment;" unless $options->{no_comments}; } return \@create, \@field_defs, \@trigger_defs, \@field_comments; } sub create_view { my ($view, $options) = @_; my $qt = $options->{quote_table_names}; my $view_name = quote($view->name,$qt); my $extra = $view->extra; my $view_type = 'VIEW'; my $view_options = ''; if ( my $materialized = $extra->{materialized} ) { $view_type = 'MATERIALIZED VIEW'; $view_options .= ' '.$materialized; } my @create; push @create, qq[DROP $view_type $view_name] if $options->{add_drop_view}; push @create, sprintf("CREATE %s %s%s AS\n%s", $view_type, $view_name, $view_options, $view->sql); return \@create; } sub mk_name { my $basename = shift || ''; my $type = shift || ''; $type = '' if $type =~ /^\d/; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? $max_id_length - (length($type) + 1) : $max_id_length; $basename = substr( $basename, 0, $max_name ) if length( $basename ) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ( $basename ne $basename_orig and $critical ) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n" if $WARN; $truncated{ $basename_orig } = $name; } $scope ||= \%global_names; if ( my $prev = $scope->{ $name } ) { my $name_orig = $name; substr($name, $max_id_length - 2) = "" if length( $name ) >= $max_id_length - 1; $name .= sprintf( "%02d", $prev++ ); warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{ $name_orig }++; } $scope->{ $name }++; return $name; } 1; sub quote { my ($name, $q) = @_; return $name unless $q && $name; $name =~ s/\Q$quote_char/$quote_char$quote_char/g; return "$quote_char$name$quote_char"; } # ------------------------------------------------------------------- # All bad art is the result of good intentions. # Oscar Wilde # ------------------------------------------------------------------- =pod =head1 CREDITS Mad props to Tim Bunce for much of the logic stolen from his "mysql2ora" script. =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Alexander Hartmaier Eabraxxa@cpan.orgE, Fabien Wernli Efaxmodem@cpan.orgE. =head1 SEE ALSO SQL::Translator, DDL::Oracle, mysql2ora. =cut SQL-Translator-1.62/lib/SQL/Translator/Producer/TT/0000755000000000000000000000000013727667056021721 5ustar00rootroot00000000000000SQL-Translator-1.62/lib/SQL/Translator/Producer/TT/Table.pm0000644000000000000000000002147013727666430023305 0ustar00rootroot00000000000000package SQL::Translator::Producer::TT::Table; =pod =head1 NAME SQL::Translator::Producer::TT::Table - Produces output using the Template Toolkit from a SQL schema, per table. =head1 SYNOPSIS # Normal STDOUT version # my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'TT::Table', producer_args => { tt_table => 'foo_table.tt', }, ); print $translator->translate; # To generate a file per table # my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'TT::Table', producer_args => { tt_table => 'foo_table.tt.html', mk_files => 1, mk_files_base => "./doc/tables", mk_file_ext => ".html", on_exists => "replace", }, ); # # ./doc/tables/ now contains the templated tables as $tablename.html # =head1 DESCRIPTION Produces schema output using a given Template Tookit template, processing that template for each table in the schema. Optionally allows you to write the result for each table to a separate file. It needs one additional producer_arg of C which is the file name of the template to use. This template will be passed a template var of C, which is the current L table we are producing, which you can then use to walk the schema via the methods documented in that module. You also get C as a shortcut to the L for the table and C, the L object for this parse in case you want to get access to any of the options etc set here. Here's a brief example of what the template could look like: [% table.name %] ================ [% FOREACH field = table.get_fields %] [% field.name %] [% field.data_type %]([% field.size %]) [% END -%] See F for a more complete example. You can also set any of the options used to initialize the Template object by adding them to your producer_args. See Template Toolkit docs for details of the options. $translator = SQL::Translator->new( to => 'TT', producer_args => { ttfile => 'foo_template.tt', INCLUDE_PATH => '/foo/templates/tt', INTERPOLATE => 1, }, ); If you set C and its additional options the producer will write a separate file for each table in the schema. This is useful for producing things like HTML documentation where every table gets its own page (you could also use TTSchema producer to add an index page). It's also particularly good for code generation where you want to produce a class file per table. =head1 OPTIONS =over 4 =item tt_table File name of the template to run for each table. =item mk_files Set to true to output a file for each table in the schema (as well as returning the whole lot back to the Translalor and hence STDOUT). The file will be named after the table, with the optional C added and placed in the directory C. =item mk_files_ext Extension (without the dot) to add to the filename when using mk_files. =item mk_files_base = DIR Dir to build the table files into when using mk_files. Defaults to the current directory. =item mk_file_dir Set true and if the file needs to written to a directory that doesn't exist, it will be created first. =item on_exists [Default:replace] What to do if we are running with mk_files and a file already exists where we want to write our output. One of "skip", "die", "replace", "insert". The default is die. B - Over-write the existing file with the new one, clobbering anything already there. B - Leave the original file as it was and don't write the new version anywhere. B - Die with an existing file error. B - Insert the generated output into the file between a set of special comments (defined by the following options.) Any code between the comments will be overwritten (ie the results from a previous produce) but the rest of the file is left alone (your custom code). This is particularly useful for code generation as it allows you to generate schema derived code and then add your own custom code to the file. Then when the schema changes you just re-produce to insert the new code. =item insert_comment_start The comment to look for in the file when on_exists is C. Default is C. Must appear on it own line, with only whitespace either side, to be recognised. =item insert_comment_end The end comment to look for in the file when on_exists is C. Default is C. Must appear on it own line, with only whitespace either side, to be recognised. =back =cut use strict; use warnings; our ( $DEBUG, @EXPORT_OK ); our $VERSION = '1.62'; $DEBUG = 0 unless defined $DEBUG; use File::Path; use Template; use Data::Dumper; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils 'debug'; my $Translator; sub produce { $Translator = shift; local $DEBUG = $Translator->debug; my $scma = $Translator->schema; my $pargs = $Translator->producer_args; my $file = $pargs->{'tt_table'} or die "No template file given!"; $pargs->{on_exists} ||= "die"; debug "Processing template $file\n"; my $out; my $tt = Template->new( DEBUG => $DEBUG, ABSOLUTE => 1, # Set so we can use from the command line sensibly RELATIVE => 1, # Maybe the cmd line code should set it! Security! %$pargs, # Allow any TT opts to be passed in the producer_args ) || die "Failed to initialize Template object: ".Template->error; for my $tbl ( sort {$a->order <=> $b->order} $scma->get_tables ) { my $outtmp; $tt->process( $file, { translator => $Translator, schema => $scma, table => $tbl, }, \$outtmp ) or die "Error processing template '$file' for table '".$tbl->name ."': ".$tt->error; $out .= $outtmp; # Write out the file... write_file( table_file($tbl), $outtmp ) if $pargs->{mk_files}; } return $out; }; # Work out the filename for a given table. sub table_file { my ($tbl) = shift; my $pargs = $Translator->producer_args; my $root = $pargs->{mk_files_base}; my $ext = $pargs->{mk_file_ext}; return "$root/$tbl.$ext"; } # Write the src given to the file given, handling the on_exists arg. sub write_file { my ($file, $src) = @_; my $pargs = $Translator->producer_args; my $root = $pargs->{mk_files_base}; if ( -e $file ) { if ( $pargs->{on_exists} eq "skip" ) { warn "Skipping existing $file\n"; return 1; } elsif ( $pargs->{on_exists} eq "die" ) { die "File $file already exists.\n"; } elsif ( $pargs->{on_exists} eq "replace" ) { warn "Replacing $file.\n"; } elsif ( $pargs->{on_exists} eq "insert" ) { warn "Inserting into $file.\n"; $src = insert_code($file, $src); } else { die "Unknown on_exists action: $pargs->{on_exists}\n"; } } else { if ( my $interactive = -t STDIN && -t STDOUT ) { warn "Creating $file.\n"; } } my ($dir) = $file =~ m!^(.*)/!; # Want greedy, everything before the last / if ( $dir and not -d $dir and $pargs->{mk_file_dir} ) { mkpath($dir); } debug "Writing to $file\n"; open( FILE, ">$file") or die "Error opening file $file : $!\n"; print FILE $src; close(FILE); } # Reads file and inserts code between the insert comments and returns the new # source. sub insert_code { my ($file, $src) = @_; my $pargs = $Translator->producer_args; my $cstart = $pargs->{insert_comment_start} || "SQLF_INSERT_START"; my $cend = $pargs->{insert_comment_end} || "SQLF_INSERT_END"; # Slurp in the original file open ( FILE, "<", "$file") or die "Error opening file $file : $!\n"; local $/ = undef; my $orig = ; close(FILE); # Insert the new code between the insert comments unless ( $orig =~ s/^\s*?$cstart\s*?\n.*?^\s*?$cend\s*?\n/\n$cstart\n$src\n$cend\n/ms ) { warn "No insert done\n"; } return $orig; } 1; =pod =head1 AUTHOR Mark Addison Egrommit@users.sourceforge.netE. =head1 TODO - Some tests for the various on exists options (they have been tested implicitly through use in a project but need some proper tests). - More docs on code generation strategies. - Better hooks for filename generation. - Integrate with L and L. =head1 SEE ALSO SQL::Translator. =cut SQL-Translator-1.62/lib/SQL/Translator/Producer/TT/Base.pm0000644000000000000000000002062413727666430023130 0ustar00rootroot00000000000000package SQL::Translator::Producer::TT::Base; =pod =head1 NAME SQL::Translator::Producer::TT::Base - TT (Template Toolkit) based Producer base class. =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.62'; use Template; use Data::Dumper; use IO::Handle; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils 'debug'; # Hack to convert the produce call into an object. ALL sub-classes need todo # this so that the correct class gets created. sub produce { return __PACKAGE__->new( translator => shift )->run; }; sub new { my $proto = shift; my $class = ref $proto || $proto; my %args = @_; my $me = bless {}, $class; $me->{translator} = delete $args{translator} || die "Need a translator."; return $me; } sub translator { shift->{translator}; } sub schema { shift->{translator}->schema(@_); } # Util args access method. # No args - Return hashref (the actual hash in Translator) or hash of args. # 1 arg - Return that named args value. # Args - List of names. Return values of the given arg names in list context # or return as hashref in scalar context. Any names given that don't # exist in the args are returned as undef. sub args { my $me = shift; # No args unless (@_) { return wantarray ? %{ $me->{translator}->producer_args } : $me->{translator}->producer_args ; } # 1 arg. Return the value whatever the context. return $me->{translator}->producer_args->{$_[0]} if @_ == 1; # More args so return values list or hash ref my %args = %{ $me->{translator}->producer_args }; return wantarray ? @args{@_} : { map { ($_=>$args{$_}) } @_ }; } # Run the produce and return the result. sub run { my $me = shift; my $scma = $me->schema; my %args = %{$me->args}; my $tmpl = $me->tt_schema or die "No template!"; debug "Processing template $tmpl\n"; my $out; my $tt = Template->new( #DEBUG => $me->translator->debug, ABSOLUTE => 1, # Set so we can use from the command line sensibly RELATIVE => 1, # Maybe the cmd line code should set it! Security! $me->tt_config, # Hook for sub-classes to add config %args, # Allow any TT opts to be passed in the producer_args ) || die "Failed to initialize Template object: ".Template->error; $tt->process( $tmpl, { $me->tt_default_vars, $me->tt_vars, # Sub-class hook for adding vars }, \$out ) or die "Error processing template '$tmpl': ".$tt->error; return $out; } # Sub class hooks #----------------------------------------------------------------------------- sub tt_config { () }; sub tt_schema { my $me = shift; my $class = ref $me; my $file = $me->args("ttfile"); return $file if $file; no strict 'refs'; my $ref = *{"$class\:\:DATA"}{IO}; if ( $ref->opened ) { local $/ = undef; # Slurp mode return \<$ref>; } undef; }; sub tt_default_vars { my $me = shift; return ( translator => $me->translator, schema => $me->pre_process_schema($me->translator->schema), ); } sub pre_process_schema { $_[1] } sub tt_vars { () }; 1; =pod =head1 SYNOPSIS # Create a producer using a template in the __DATA__ section. package SQL::Translator::Producer::Foo; use base qw/SQL::Translator::Producer::TT::Base/; # Convert produce call into a method call on our new class sub produce { return __PACKAGE__->new( translator => shift )->run; }; # Configure the Template object. sub tt_config { ( INTERPOLATE => 1 ); } # Extra vars to add to the template sub tt_vars { ( foo => "bar" ); } # Put template in DATA section (or use file with ttfile producer arg) __DATA__ Schema Database: [% schema.database %] Foo: $foo ... =head1 DESCRIPTION A base class producer designed to be sub-classed to create new TT based producers cheaply - by simply giving the template to use and sprinkling in some extra template variables and config. You can find an introduction to this module in L. The 1st thing the module does is convert the produce sub routine call we get from SQL::Translator into a method call on an object, which we can then sub-class. This is done with the following code which needs to appear in B sub classes. # Convert produce call into an object method call sub produce { return __PACKAGE__->new( translator => shift )->run; }; See L below for details. The upshot of this is we can make new template producers by sub classing this base class, adding the above snippet and a template. The module also provides a number of hooks into the templating process, see L for details. See the L above for an example of creating a simple producer using a single template stored in the producers DATA section. =head1 SUB CLASS HOOKS Sub-classes can override these methods to control the templating by giving the template source, adding variables and giving config to the Tempate object. =head2 tt_config sub tt_config { ( INTERPOLATE => 1 ); } Return hash of Template config to add to that given to the L