SQL-Translator-1.65/0000755000000000000000000000000014551164245014240 5ustar00rootroot00000000000000SQL-Translator-1.65/Makefile.PL0000644000000000000000000000755714551163537016233 0ustar00rootroot00000000000000use strict; use warnings; use ExtUtils::MakeMaker; use File::ShareDir::Install; do './maint/Makefile.PL.include' or die $@ unless -f 'META.yml'; my $eumm_version = eval $ExtUtils::MakeMaker::VERSION; my %eumm_args = ( NAME => 'SQL::Translator', ABSTRACT => 'SQL DDL transformations and more', VERSION_FROM => 'lib/SQL/Translator.pm', LICENSE => 'perl', MIN_PERL_VERSION => '5.008001', CONFIGURE_REQUIRES => { 'ExtUtils::MakeMaker' => '6.54', # to deal with x.y.z versions properly 'File::ShareDir::Install' => '0', }, TEST_REQUIRES => { 'JSON::MaybeXS' => '1.003003', 'YAML' => '0.66', 'XML::Writer' => '0.500', 'Test::More' => '0.88', 'Test::Differences' => '0', 'Test::Exception' => '0.42', 'Text::ParseWords' => '0', }, PREREQ_PM => { 'Digest::SHA' => '0', 'Carp::Clan' => '0', 'Parse::RecDescent' => '1.967009', 'DBI' => '1.54', 'File::ShareDir' => '1.0', 'Moo' => '1.000003', 'Package::Variant' => '1.001001', 'Sub::Quote' => '0', 'Try::Tiny' => '0.04', 'Scalar::Util' => '0', 'List::Util' => '1.33', # support for `any` }, realclean => { FILES => 't/data/roundtrip_autogen.yaml' }, EXE_FILES => [ qw| script/sqlt-diagram script/sqlt-diff script/sqlt-diff-old script/sqlt-dumper script/sqlt-graph script/sqlt | ], META_MERGE => { "meta-spec" => { version => 2 }, dynamic_config => 0, resources => { bugtracker => { web => 'https://github.com/dbsrgits/sql-translator/issues', }, repository => { type => 'git', url => 'git@github.com/dbsrgits/sql-translator.git', web => 'https://github.com/dbsrgits/sql-translator/', }, license => ['http://dev.perl.org/licenses/'], x_IRC => 'irc://irc.perl.org/#sql-translator', x_Ratings => 'http://cpanratings.perl.org/d/SQL-Translator', }, x_authority => 'cpan:JROBINSON', no_index => { directory => [qw(maint share xt)], }, prereqs => { runtime => { recommends => { 'Template' => '2.20', 'GD' => '0', 'GraphViz' => '0', 'Graph::Directed' => '0', 'Spreadsheet::ParseExcel' => '0.41', 'Text::RecordParser' => '0.02', 'XML::LibXML' => '1.69', }, }, develop => { requires => { 'Template' => '2.20', 'GD' => '0', 'DBD::SQLite' => '0', 'CGI' => '0', 'GraphViz' => '0', 'Graph::Directed' => '0', 'Spreadsheet::ParseExcel' => '0.41', 'Text::RecordParser' => '0.02', 'XML::LibXML' => '1.69', 'Test::EOL' => '1.1', 'Test::NoTabs' => '1.1', 'Software::LicenseUtils' => '0', # for Distar }, }, }, }, ); install_share 'share'; sub _move_to { my ($hash, $fromkey, $tokey) = @_; $hash->{$tokey} = { %{ $hash->{$tokey} || {} }, %{ delete($hash->{$fromkey}) || {} }, }; } delete $eumm_args{META_MERGE} if $eumm_version < 6.45_01; delete $eumm_args{CONFIGURE_REQUIRES} if $eumm_version < 6.51_03; # too late to use so just delete _move_to(\%eumm_args, 'TEST_REQUIRES', 'BUILD_REQUIRES') if $eumm_version < 6.63_03; _move_to(\%eumm_args, 'BUILD_REQUIRES', 'PREREQ_PM') if $eumm_version < 6.55_01; $eumm_args{NO_MYMETA} = 1 if $eumm_version >= 6.57_02 and $eumm_version < 6.57_07; WriteMakefile(%eumm_args); package MY; use File::ShareDir::Install qw(postamble); SQL-Translator-1.65/share/0000755000000000000000000000000014551164244015341 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/0000755000000000000000000000000014551164244021144 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/Parse/0000755000000000000000000000000014551164244022216 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/0000755000000000000000000000000014551164244024235 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/0000755000000000000000000000000014551164244024640 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/SQLT/0000755000000000000000000000000014551164244025423 5ustar00rootroot00000000000000SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/SQLT/README0000644000000000000000000000014714316103442026275 0ustar00rootroot00000000000000The contents of this directory are automatically regenerated when invoking Makefile.PL in author mode. SQL-Translator-1.65/share/DiaUml/0000755000000000000000000000000014551164244016514 5ustar00rootroot00000000000000SQL-Translator-1.65/share/DiaUml/diagram.tt20000644000000000000000000000411014316103442020537 0ustar00rootroot00000000000000 #A4# [% content %] SQL-Translator-1.65/share/DiaUml/uml-class-start.tt20000644000000000000000000000673614316103442022206 0ustar00rootroot00000000000000[% # vim:ft=tt2 DEFAULT visible_operations='true' -%] #[% name %]# #[% stereotype %]# #[% comment %]# SQL-Translator-1.65/share/DiaUml/layer.tt20000644000000000000000000000021014316103442020244 0ustar00rootroot00000000000000[%- DEFAULT name="Layer1" visible="true" %] [% content %] SQL-Translator-1.65/share/DiaUml/uml-class-all.tt20000644000000000000000000000737514316103442021621 0ustar00rootroot00000000000000[%# vim:ft=tt2 -%] #[% name %]# #[% stereotype %]# #[% comment %]# [% FOREACH attributes %] [% INCLUDE "uml-attribute.tt2" %] [% END %] SQL-Translator-1.65/share/DiaUml/uml-class-end.tt20000644000000000000000000000004514316103442021602 0ustar00rootroot00000000000000[%# vim:ft=tt2 -%] SQL-Translator-1.65/share/DiaUml/schema.tt20000644000000000000000000000214414316103442020400 0ustar00rootroot00000000000000[%# vim:ft=tt2 -%] [% WRAPPER diagram.tt2 %] [% WRAPPER layer.tt2 name="Background" %] [% FOREACH table IN schema.get_tables %] [% INCLUDE 'uml-class-start.tt2' name = table.name stereotype = 'Table' visible_operations = 'false' %] [% FOREACH field IN table.get_fields; SET type = field.data_type; SET type = "$type($field.size)" IF field.size; INCLUDE "uml-attribute.tt2" name = field.name stereotype = 'Field' type = type value = field.default_value ; END %] [% INCLUDE 'uml-class-end.tt2' %] [% END %] [% END %] [% END %] SQL-Translator-1.65/share/DiaUml/uml-attribute.tt20000644000000000000000000000172714316103442021744 0ustar00rootroot00000000000000[%# vim:ft=tt2 -%] [%- DEFAULT visibility=0 abstract="false" class_scope="false" %] #[% name %]# #[% type %]# #[% value %]# #[% comment %]# SQL-Translator-1.65/share/DiaUml/uml-class.tt20000644000000000000000000000065114316103442021041 0ustar00rootroot00000000000000[%# vim:ft=tt2 -%] [% INCLUDE 'uml-class-start.tt2' %] [%- FOREACH attributes; INCLUDE "uml-attribute.tt2"; END %] [% INCLUDE 'uml-class-end.tt2' %] SQL-Translator-1.65/lib/0000755000000000000000000000000014551164244015005 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/Test/0000755000000000000000000000000014551164244015724 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/Test/SQL/0000755000000000000000000000000014551164244016363 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/Test/SQL/Translator.pm0000644000000000000000000004201114551163724021052 0ustar00rootroot00000000000000package Test::SQL::Translator; =pod =head1 NAME Test::SQL::Translator - Test::More test functions for the Schema objects. =cut use strict; use warnings; use Test::More; use SQL::Translator::Schema::Constants; use base qw(Exporter); our @EXPORT_OK; our $VERSION = '1.65'; our @EXPORT = qw( schema_ok table_ok field_ok constraint_ok index_ok view_ok trigger_ok procedure_ok maybe_plan ); # $ATTRIBUTES{ } = { => , ... } my %ATTRIBUTES = ( field => { name => undef, data_type => '', default_value => undef, size => '0', is_primary_key => 0, is_unique => 0, is_nullable => 1, is_foreign_key => 0, is_auto_increment => 0, comments => '', extra => {}, # foreign_key_reference, is_valid => 1, # order }, constraint => { name => '', type => '', deferrable => 1, expression => '', is_valid => 1, fields => [], match_type => '', options => [], on_delete => '', on_update => '', reference_fields => [], reference_table => '', extra => {}, }, index => { fields => [], is_valid => 1, name => "", options => [], type => NORMAL, extra => {}, }, view => { name => "", sql => "", fields => [], is_valid => 1, extra => {}, }, trigger => { name => '', perform_action_when => undef, database_events => undef, on_table => undef, action => undef, is_valid => 1, extra => {}, }, procedure => { name => '', sql => '', parameters => [], owner => '', comments => '', extra => {}, }, table => { comments => undef, name => '', #primary_key => undef, # pkey constraint options => [], #order => 0, fields => undef, constraints => undef, indices => undef, is_valid => 1, extra => {}, }, schema => { name => '', database => '', procedures => undef, # [] when set tables => undef, # [] when set triggers => undef, # [] when set views => undef, # [] when set is_valid => 1, extra => {}, } ); # Given a test hash and schema object name set any attribute keys not present in # the test hash to their default value for that schema object type. # e.g. default_attribs( $test, "field" ); sub default_attribs { my ($hashref, $object_type) = @_; if (!exists $ATTRIBUTES{$object_type}) { die "Can't add default attribs for unknown Schema " . "object type '$object_type'."; } for my $attr ( grep { !exists $hashref->{$_} } keys %{ $ATTRIBUTES{$object_type} } ) { $hashref->{$attr} = $ATTRIBUTES{$object_type}{$attr}; } return $hashref; } # Format test name so it will prepend the test names used below. sub t_name { my $name = shift; $name ||= ""; $name = "$name - " if $name; return $name; } sub field_ok { my ($f1, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "field"); unless ($f1) { fail " Field '$test->{name}' doesn't exist!"; # TODO Do a skip on the following tests. Currently the test counts wont # match at the end. So at least it fails. return; } my $full_name = $f1->table->name . "." . $test->{name}; is($f1->name, $test->{name}, "${t_name}Field '$full_name'"); is($f1->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); is($f1->data_type, $test->{data_type}, "$t_name type is '$test->{data_type}'"); is($f1->size, $test->{size}, "$t_name size is '$test->{size}'"); is( $f1->default_value, $test->{default_value}, "$t_name default value is " . ( defined($test->{default_value}) ? "'$test->{default_value}'" : "UNDEF" ) ); is($f1->is_nullable, $test->{is_nullable}, "$t_name " . ($test->{is_nullable} ? 'can' : 'cannot') . ' be null'); is($f1->is_unique, $test->{is_unique}, "$t_name " . ($test->{is_unique} ? 'can' : 'cannot') . ' be unique'); is( $f1->is_primary_key, $test->{is_primary_key}, "$t_name is " . ($test->{is_primary_key} ? '' : 'not ') . 'a primary_key' ); is( $f1->is_foreign_key, $test->{is_foreign_key}, "$t_name is " . ($test->{is_foreign_key} ? '' : 'not') . ' a foreign_key' ); is( $f1->is_auto_increment, $test->{is_auto_increment}, "$t_name is " . ($test->{is_auto_increment} ? '' : 'not ') . 'an auto_increment' ); is($f1->comments, $test->{comments}, "$t_name comments"); is_deeply({ $f1->extra }, $test->{extra}, "$t_name extra"); } sub constraint_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "constraint"); is($obj->name, $test->{name}, "${t_name}Constraint '$test->{name}'"); is($obj->type, $test->{type}, "$t_name type is '$test->{type}'"); is($obj->deferrable, $test->{deferrable}, "$t_name " . ($test->{deferrable} ? 'can' : 'cannot') . ' be deferred'); is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); is($obj->table->name, $test->{table}, "$t_name table is '$test->{table}'"); is($obj->expression, $test->{expression}, "$t_name expression is '$test->{expression}'"); is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'"); is($obj->reference_table, $test->{reference_table}, "$t_name reference_table is '$test->{reference_table}'"); is_deeply( [ $obj->reference_fields ], $test->{reference_fields}, "$t_name reference_fields are '" . join(",", @{ $test->{reference_fields} }) . "'" ); is($obj->match_type, $test->{match_type}, "$t_name match_type is '$test->{match_type}'"); is($obj->on_delete, $test->{on_delete}, "$t_name on_delete is '$test->{on_delete}'"); is($obj->on_update, $test->{on_update}, "$t_name on_update is '$test->{on_update}'"); is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); } sub index_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "index"); is($obj->name, $test->{name}, "${t_name}Index '$test->{name}'"); is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); is($obj->type, $test->{type}, "$t_name type is '$test->{type}'"); is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'"); is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); } sub trigger_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "index"); is($obj->name, $test->{name}, "${t_name}Trigger '$test->{name}'"); is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); is( $obj->perform_action_when, $test->{perform_action_when}, "$t_name perform_action_when is '$test->{perform_action_when}'" ); is( join(',', $obj->database_events), $test->{database_events}, sprintf("%s database_events is '%s'", $t_name, $test->{'database_events'},) ); is($obj->on_table, $test->{on_table}, "$t_name on_table is '$test->{on_table}'"); is($obj->scope, $test->{scope}, "$t_name scope is '$test->{scope}'") if exists $test->{scope}; is($obj->action, $test->{action}, "$t_name action is '$test->{action}'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); } sub view_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "index"); #isa_ok( $v, 'SQL::Translator::Schema::View', 'View' ); is($obj->name, $test->{name}, "${t_name}View '$test->{name}'"); is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); is($obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'"); is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); } sub procedure_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "index"); #isa_ok( $v, 'SQL::Translator::Schema::View', 'View' ); is($obj->name, $test->{name}, "${t_name}Procedure '$test->{name}'"); is($obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'"); is_deeply([ $obj->parameters ], $test->{parameters}, "$t_name parameters are '" . join(",", @{ $test->{parameters} }) . "'"); is($obj->comments, $test->{comments}, "$t_name comments is '$test->{comments}'"); is($obj->owner, $test->{owner}, "$t_name owner is '$test->{owner}'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); } sub table_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "table"); my %arg = %$test; my $tbl_name = $arg{name} || die "Need a table name to test."; is($obj->{name}, $arg{name}, "${t_name}Table '$arg{name}'"); is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); # Fields if ($arg{fields}) { my @fldnames = map { $_->{name} } @{ $arg{fields} }; is_deeply([ map { $_->name } $obj->get_fields ], [@fldnames], "${t_name} field names are " . join(", ", @fldnames)); foreach (@{ $arg{fields} }) { my $f_name = $_->{name} || die "Need a field name to test."; next unless my $fld = $obj->get_field($f_name); field_ok($fld, $_, $name); } } else { is(scalar($obj->get_fields), undef, "${t_name} has no fields."); } # Constraints and Indices _test_kids( $obj, $test, $name, { constraint => 'constraints', index => 'indices', } ); } sub _test_kids { my ($obj, $test, $name, $kids) = @_; my $t_name = t_name($name); my $obj_name = ref $obj; ($obj_name) = $obj_name =~ m/^.*::(.*)$/; while (my ($object_type, $plural) = each %$kids) { next unless defined $test->{$plural}; if (my @tests = @{ $test->{$plural} }) { my $meth = "get_$plural"; my @objects = $obj->$meth; is(scalar(@objects), scalar(@tests), "${t_name}$obj_name has " . scalar(@tests) . " $plural"); for my $object (@objects) { my $ans = { lc($obj_name) => $obj->name, %{ shift @tests } }; my $meth = "${object_type}_ok"; { no strict 'refs'; $meth->($object, $ans, $name); } } } } } sub schema_ok { my ($obj, $test, $name) = @_; my $t_name = t_name($name); default_attribs($test, "schema"); is($obj->name, $test->{name}, "${t_name}Schema name is '$test->{name}'"); is($obj->database, $test->{database}, "$t_name database is '$test->{database}'"); is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra"); is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid'); # Tables if ($test->{tables}) { is_deeply( [ map { $_->name } $obj->get_tables ], [ map { $_->{name} } @{ $test->{tables} } ], "${t_name} table names match" ); foreach (@{ $test->{tables} }) { my $t_name = $_->{name} || die "Need a table name to test."; table_ok($obj->get_table($t_name), $_, $name); } } else { is(scalar($obj->get_tables), undef, "${t_name} has no tables."); } # Procedures, Triggers, Views _test_kids( $obj, $test, $name, { procedure => 'procedures', trigger => 'triggers', view => 'views', } ); } # maybe_plan($ntests, @modules) # # Calls plan $ntests if @modules can all be loaded; otherwise, # calls skip_all with an explanation of why the tests were skipped. sub maybe_plan { my ($ntests, @modules) = @_; my @errors; for my $module (@modules) { eval "use $module;"; next if !$@; if ($@ =~ /Can't locate (\S+)/) { my $mod = $1; $mod =~ s/\.pm$//; $mod =~ s#/#::#g; push @errors, $mod; } elsif ($@ =~ /([\w\:]+ version [\d\.]+) required.+?this is only version/) { push @errors, $1; } elsif ($@ =~ /Can't load .+? for module .+?DynaLoader\.pm/i) { push @errors, $module; } else { (my $err = $@) =~ s/\n+/\\n/g; # Can't have newlines in the skip message push @errors, "$module: $err"; } } if (@errors) { my $msg = sprintf "Missing dependenc%s: %s", @errors == 1 ? 'y' : 'ies', join ", ", @errors; plan skip_all => $msg; } return unless defined $ntests; if ($ntests ne 'no_plan') { plan tests => $ntests; } else { plan 'no_plan'; } } 1; # compile please =========================================================== __END__ =pod =head1 SYNOPSIS # t/magic.t use FindBin '$Bin'; use Test::More; use Test::SQL::Translator; # Run parse my $sqlt = SQL::Translator->new( parser => "Magic", filename => "$Bin/data/magic/test.magic", ... ); ... my $schema = $sqlt->schema; # Test the table it produced. table_ok( $schema->get_table("Customer"), { name => "Customer", fields => [ { name => "CustomerID", data_type => "INT", size => 12, default_value => undef, is_nullable => 0, is_primary_key => 1, }, { name => "bar", data_type => "VARCHAR", size => 255, is_nullable => 0, }, ], constraints => [ { type => "PRIMARY KEY", fields => "CustomerID", }, ], indices => [ { name => "barindex", fields => ["bar"], }, ], }); =head1 DESCRIPTION Provides a set of Test::More tests for Schema objects. Testing a parsed schema is then as easy as writing a perl data structure describing how you expect the schema to look. Also provides C for conditionally running tests based on their dependencies. The data structures given to the test subs don't have to include all the possible values, only the ones you expect to have changed. Any left out will be tested to make sure they are still at their default value. This is a useful check that you your parser hasn't accidentally set schema values you didn't expect it to. For an example of the output run the F test. =head1 Tests All the tests take a first arg of the schema object to test, followed by a hash ref describing how you expect that object to look (you only need give the attributes you expect to have changed from the default). The 3rd arg is an optional test name to prepend to all the generated test names. =head2 table_ok =head2 field_ok =head2 constraint_ok =head2 index_ok =head2 view_ok =head2 trigger_ok =head2 procedure_ok =head1 CONDITIONAL TESTS The C function handles conditionally running an individual test. It is here to enable running the test suite even when dependencies are missing; not having (for example) GraphViz installed should not keep the test suite from passing. C takes the number of tests to (maybe) run, and a list of modules on which test execution depends: maybe_plan(180, 'SQL::Translator::Parser::MySQL'); If one of C's dependencies does not exist, then the test will be skipped. Instead of a number of tests, you can pass C if you're using C, or C<'no_plan'> if you don't want a plan at all. =head1 EXPORTS table_ok, field_ok, constraint_ok, index_ok, view_ok, trigger_ok, procedure_ok, maybe_plan =head1 TODO =over 4 =item Test the tests! =item Test Count Constants Constants to give the number of tests each C<*_ok> sub uses. e.g. How many tests does C run? Can then use these to set up the test plan easily. =item Test skipping As the test subs wrap up lots of tests in one call you can't skip individual tests only whole sets e.g. a whole table or field. We could add C items to the test hashes to allow per test skips. e.g. skip_is_primary_key => "Need to fix primary key parsing.", =item yaml test specs Maybe have the test subs also accept yaml for the test hash ref as it is much nicer for writing big data structures. We can then define tests as in input schema file and test yaml file to compare it against. =back =head1 AUTHOR Mark D. Addison Emark.addison@itn.co.ukE, Darren Chamberlain . Thanks to Ken Y. Clark for the original table and field test code taken from his mysql test. =head1 SEE ALSO perl(1), SQL::Translator, SQL::Translator::Schema, Test::More. =cut SQL-Translator-1.65/lib/SQL/0000755000000000000000000000000014551164244015444 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/0000755000000000000000000000000014551164244017575 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Generator/0000755000000000000000000000000014551164244021523 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/0000755000000000000000000000000014551164244022424 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/Quote.pm0000644000000000000000000000265114541265164024065 0ustar00rootroot00000000000000package SQL::Translator::Generator::Role::Quote; use Moo::Role; =head1 NAME SQL::Translator::Generator::Role::Quote - Role for dealing with identifier quoting. =head1 DESCRIPTION I =cut requires qw(quote_chars name_sep); has escape_char => ( is => 'ro', lazy => 1, clearer => 1, default => sub { $_[0]->quote_chars->[-1] }, ); sub quote { my ($self, $label) = @_; return '' unless defined $label; return $$label if ref($label) eq 'SCALAR'; my @quote_chars = @{ $self->quote_chars }; return $label unless scalar @quote_chars; my ($l, $r); if (@quote_chars == 1) { ($l, $r) = (@quote_chars) x 2; } elsif (@quote_chars == 2) { ($l, $r) = @quote_chars; } else { die 'too many quote chars!'; } my $sep = $self->name_sep || ''; my $esc = $self->escape_char; # parts containing * are naturally unquoted join $sep, map { (my $n = $_) =~ s/\Q$r/$esc$r/g; "$l$n$r" } ($sep ? split(/\Q$sep\E/, $label) : $label); } sub quote_string { my ($self, $string) = @_; return $string unless defined $string; $string =~ s/'/''/g; return qq{'$string'}; } 1; =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/DDL.pm0000644000000000000000000000504714541265164023375 0ustar00rootroot00000000000000package SQL::Translator::Generator::Role::DDL; =head1 NAME SQL::Translator::Generator::Role::DDL - Role implementing common parts of DDL generation. =head1 DESCRIPTION I =cut use Moo::Role; use SQL::Translator::Utils qw(header_comment); use Scalar::Util; requires '_build_type_map'; requires '_build_numeric_types'; requires '_build_unquoted_defaults'; requires '_build_sizeless_types'; requires 'quote'; requires 'quote_string'; has type_map => (is => 'lazy',); has numeric_types => (is => 'lazy',); has sizeless_types => (is => 'lazy',); has unquoted_defaults => (is => 'lazy',); has add_comments => (is => 'ro',); has add_drop_table => (is => 'ro',); # would also be handy to have a required size set if there is such a thing sub field_name { $_[0]->quote($_[1]->name) } sub field_comments { ($_[1]->comments ? ('-- ' . $_[1]->comments . "\n ") : ()) } sub table_comments { my ($self, $table) = @_; if ($self->add_comments) { return ("", "--", "-- Table: " . $self->quote($table->name) . "", "--", map "-- $_", $table->comments); } else { return (); } } sub field_nullable { ($_[1]->is_nullable ? $_[0]->nullable : 'NOT NULL') } sub field_default { my ($self, $field, $exceptions) = @_; my $default = $field->default_value; return () if !defined $default; $default = \"$default" if $exceptions and !ref $default and $exceptions->{$default}; if (ref $default) { $default = $$default; } elsif (!($self->numeric_types->{ lc($field->data_type) } && Scalar::Util::looks_like_number($default))) { $default = $self->quote_string($default); } return ("DEFAULT $default"); } sub field_type { my ($self, $field) = @_; my $field_type = $field->data_type; ($self->type_map->{$field_type} || $field_type) . $self->field_type_size($field); } sub field_type_size { my ($self, $field) = @_; ( $field->size && !$self->sizeless_types->{ $field->data_type } ? '(' . $field->size . ')' : '' ); } sub fields { my ($self, $table) = @_; (map $self->field($_), $table->get_fields); } sub indices { my ($self, $table) = @_; (map $self->index($_), $table->get_indices); } sub nullable {'NULL'} sub header_comments { header_comment() . "\n" if $_[0]->add_comments } 1; =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/0000755000000000000000000000000014551164244022126 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/PostgreSQL.pm0000644000000000000000000000133014541265164024466 0ustar00rootroot00000000000000package SQL::Translator::Generator::DDL::PostgreSQL; =head1 NAME SQL::Translator::Generator::DDL::PostgreSQL - A Moo based PostgreSQL DDL generation engine. =head1 DESCRIPTION I =cut use Moo; has quote_chars => ( is => 'rw', default => sub { +[qw(" ")] }, trigger => sub { $_[0]->clear_escape_char }, ); with 'SQL::Translator::Generator::Role::Quote'; sub name_sep {q(.)} 1; =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/SQLite.pm0000644000000000000000000000501414541265164023627 0ustar00rootroot00000000000000package SQL::Translator::Generator::DDL::SQLite; =head1 NAME SQL::Translator::Generator::DDL::SQLite - A Moo based SQLite DDL generation engine. =head1 DESCRIPTION I =cut use Moo; has quote_chars => (is => 'ro', default => sub { +[qw(" ")] }); with 'SQL::Translator::Generator::Role::Quote'; with 'SQL::Translator::Generator::Role::DDL'; sub name_sep {q(.)} sub _build_type_map { +{ set => 'varchar', bytea => 'blob', }; } sub _build_sizeless_types { +{ text => 1, blob => 1, }; } sub _build_numeric_types { +{ int => 1, integer => 1, tinyint => 1, smallint => 1, mediumint => 1, bigint => 1, 'unsigned big int' => 1, int2 => 1, int8 => 1, numeric => 1, decimal => 1, boolean => 1, real => 1, double => 1, 'double precision' => 1, float => 1, }; } sub _build_unquoted_defaults { +{ NULL => 1, 'now()' => 1, CURRENT_TIMESTAMP => 1, }; } sub nullable { () } sub _ipk { my ($self, $field) = @_; my $pk = $field->table->primary_key; my @pk_fields = $pk ? $pk->fields : (); $field->is_primary_key && scalar @pk_fields == 1 && ($field->data_type =~ /int(eger)?$/i || ($field->data_type =~ /^number?$/i && $field->size !~ /,/)); } sub field_autoinc { my ($self, $field) = @_; return ( ( ($field->extra->{auto_increment_type} || '') eq 'monotonic' and $self->_ipk($field) and $field->is_auto_increment ) ? 'AUTOINCREMENT' : '' ); } sub field { my ($self, $field) = @_; return join ' ', $self->field_comments($field), $self->field_name($field), ( $self->_ipk($field) ? ('INTEGER PRIMARY KEY') : ($self->field_type($field)) ), ($self->field_autoinc($field) || ()), $self->field_nullable($field), $self->field_default( $field, { NULL => 1, 'now()' => 1, 'CURRENT_TIMESTAMP' => 1, } ), ; } 1; =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/MySQL.pm0000644000000000000000000000054314541265164023435 0ustar00rootroot00000000000000package SQL::Translator::Generator::DDL::MySQL; =head1 NAME SQL::Translator::Generator::DDL::MySQL - A Moo based MySQL DDL generation engine. =head1 DESCRIPTION I =cut use Moo; has quote_chars => (is => 'ro', default => sub { +[qw(` `)] }); with 'SQL::Translator::Generator::Role::Quote'; sub name_sep {q(.)} 1; SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/SQLServer.pm0000644000000000000000000001463414541265164024324 0ustar00rootroot00000000000000package SQL::Translator::Generator::DDL::SQLServer; =head1 NAME SQL::Translator::Generator::DDL::SQLServer - A Moo based MS SQL Server DDL generation engine. =head1 DESCRIPTION I =cut use Moo; use SQL::Translator::Schema::Constants; with 'SQL::Translator::Generator::Role::Quote'; with 'SQL::Translator::Generator::Role::DDL'; sub quote_chars { [qw([ ])] } sub name_sep {q(.)} sub _build_numeric_types { +{ int => 1, }; } sub _build_unquoted_defaults { +{ NULL => 1, }; } sub _build_type_map { +{ date => 'datetime', 'time' => 'datetime', }; } sub _build_sizeless_types { +{ map { $_ => 1 } qw( tinyint smallint int integer bigint text bit image datetime ) }; } sub field { my ($self, $field) = @_; return join ' ', $self->field_name($field), ($self->field_type($field) || die 'type is required'), $self->field_autoinc($field), $self->field_nullable($field), $self->field_default($field),; } sub field_autoinc { ($_[1]->is_auto_increment ? 'IDENTITY' : ()) } sub primary_key_constraint { 'CONSTRAINT ' . $_[0]->quote($_[1]->name || $_[1]->table->name . '_pk') . ' PRIMARY KEY (' . join(', ', map $_[0]->quote($_), $_[1]->fields) . ')'; } sub index { 'CREATE INDEX ' . $_[0]->quote($_[1]->name || $_[1]->table->name . '_idx') . ' ON ' . $_[0]->quote($_[1]->table->name) . ' (' . join(', ', map $_[0]->quote($_), $_[1]->fields) . ');'; } sub unique_constraint_single { my ($self, $constraint) = @_; 'CONSTRAINT ' . $self->unique_constraint_name($constraint) . ' UNIQUE (' . join(', ', map $self->quote($_), $constraint->fields) . ')'; } sub unique_constraint_name { my ($self, $constraint) = @_; $self->quote($constraint->name || $constraint->table->name . '_uc'); } sub unique_constraint_multiple { my ($self, $constraint) = @_; 'CREATE UNIQUE NONCLUSTERED INDEX ' . $self->unique_constraint_name($constraint) . ' ON ' . $self->quote($constraint->table->name) . ' (' . join(', ', map $self->quote($_), $constraint->fields) . ')' . ' WHERE ' . join(' AND ', map $self->quote($_->name) . ' IS NOT NULL', grep { $_->is_nullable } $constraint->fields) . ';'; } sub foreign_key_constraint { my ($self, $constraint) = @_; my $on_delete = uc($constraint->on_delete || ''); my $on_update = uc($constraint->on_update || ''); # The default implicit constraint action in MSSQL is RESTRICT # but you can not specify it explicitly. Go figure :) for (map uc $_ || '', $on_delete, $on_update) { undef $_ if $_ eq 'RESTRICT'; } 'ALTER TABLE ' . $self->quote($constraint->table->name) . ' ADD CONSTRAINT ' . $self->quote($constraint->name || $constraint->table->name . '_fk') . ' FOREIGN KEY' . ' (' . join(', ', map $self->quote($_), $constraint->fields) . ') REFERENCES ' . $self->quote($constraint->reference_table) . ' (' . join(', ', map $self->quote($_), $constraint->reference_fields) . ')' . ( $on_delete && $on_delete ne "NO ACTION" ? ' ON DELETE ' . $on_delete : '' ) . ( $on_update && $on_update ne "NO ACTION" ? ' ON UPDATE ' . $on_update : '' ) . ';'; } sub enum_constraint_name { my ($self, $field_name) = @_; $self->quote($field_name . '_chk'); } sub enum_constraint { my ($self, $field_name, $vals) = @_; return ('CONSTRAINT ' . $self->enum_constraint_name($field_name) . ' CHECK (' . $self->quote($field_name) . ' IN (' . join(',', map $self->quote_string($_), @$vals) . '))'); } sub constraints { my ($self, $table) = @_; ( map $self->enum_constraint($_->name, { $_->extra }->{list} || []), grep { 'enum' eq lc $_->data_type } $table->get_fields ), (map $self->primary_key_constraint($_), grep { $_->type eq PRIMARY_KEY } $table->get_constraints), ( map $self->unique_constraint_single($_), grep { $_->type eq UNIQUE && !grep { $_->is_nullable } $_->fields } $table->get_constraints ), ; } sub table { my ($self, $table) = @_; join("\n", $self->table_comments($table), '') . join("\n\n", 'CREATE TABLE ' . $self->quote($table->name) . " (\n" . join(",\n", map {" $_"} $self->fields($table), $self->constraints($table),) . "\n);", $self->unique_constraints_multiple($table), $self->indices($table),); } sub unique_constraints_multiple { my ($self, $table) = @_; ( map $self->unique_constraint_multiple($_), grep { $_->type eq UNIQUE && grep { $_->is_nullable } $_->fields } $table->get_constraints ); } sub drop_table { my ($self, $table) = @_; my $name = $table->name; my $q_name = $self->quote($name); "IF EXISTS (SELECT name FROM sysobjects WHERE name = '$name' AND type = 'U')" . " DROP TABLE $q_name;"; } sub remove_table_constraints { my ($self, $table) = @_; my $name = $table->name; my $q_name = $self->quote($name); "IF EXISTS (SELECT name FROM sysobjects WHERE name = '$name' AND type = 'U')" . " ALTER TABLE $q_name NOCHECK CONSTRAINT all;"; } sub drop_tables { my ($self, $schema) = @_; if ($self->add_drop_table) { my @tables = sort { $b->order <=> $a->order } $schema->get_tables; return join "\n", ( ( $self->add_comments ? ('--', '-- Turn off constraints', '--', '',) : () ), (map $self->remove_table_constraints($_), @tables), ($self->add_comments ? ('--', '-- Drop tables', '--', '',) : ()), (map $self->drop_table($_), @tables), ); } return ''; } sub foreign_key_constraints { my ($self, $schema) = @_; ( map $self->foreign_key_constraint($_), grep { $_->type eq FOREIGN_KEY } map $_->get_constraints, $schema->get_tables ); } sub schema { my ($self, $schema) = @_; $self->header_comments . $self->drop_tables($schema) . join("\n\n", map $self->table($_), grep { $_->name } $schema->get_tables) . "\n" . join "\n", $self->foreign_key_constraints($schema); } 1; =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Parser.pm0000644000000000000000000000273714551163724021402 0ustar00rootroot00000000000000package SQL::Translator::Parser; use strict; use warnings; our $VERSION = '1.65'; sub parse {""} 1; # ---------------------------------------------------------------------- # Enough! or Too much. # William Blake # ---------------------------------------------------------------------- =pod =head1 NAME SQL::Translator::Parser - describes how to write a parser =head1 DESCRIPTION Parser modules that get invoked by SQL::Translator need to implement a single function: B. This function will be called by the SQL::Translator instance as $class::parse($tr, $data_as_string), where $tr is a SQL::Translator instance. Other than that, the classes are free to define any helper functions, or use any design pattern internally that make the most sense. When the parser has determined what exists, it will communicate the structure to the producer through the SQL::Translator::Schema object. This object can be retrieved from the translator (the first argument pass to B) by calling the B method: my $schema = $tr->schema; The Schema object has methods for adding tables, fields, indices, etc. For more information, consult the docs for SQL::Translator::Schema and its related modules. For examples of how this works, examine the source code for existing SQL::Translator::Parser::* modules. =head1 AUTHORS Ken Youens-Clark, Ekclark@cpan.org, darren chamberlain Edarren@cpan.orgE. =head1 SEE ALSO perl(1), SQL::Translator, SQL::Translator::Schema. =cut SQL-Translator-1.65/lib/SQL/Translator/Manual.pod0000644000000000000000000004547614316103442021526 0ustar00rootroot00000000000000=head1 NAME SQL::Translator::Manual - sqlfairy user manual =head1 SYNOPSIS SQL::Translator (AKA "SQLFairy") is a collection of modules for transforming (mainly) SQL DDL files into a variety of other formats, including other SQL dialects, documentation, images, and code. In this manual, we will attempt to address how to use SQLFairy for common tasks. For a lower-level discussion of how the code works, please read the documentation for L. It may prove helpful to have a general understanding of the SQLFairy code before continuing. The code can be broken into three conceptual groupings: =over 4 =item * Parsers The parsers are responsible for reading the input files and describing them to the Schema object middleware. =item * Producers The producers create the output as described by the Schema middleware. =item * Schema objects The Schema objects bridge the communication between the Parsers and Producers by representing any parsed file through a standard set of generic objects to represent concepts like Tables, Fields (columns), Indices, Constraints, etc. =back It's not necessary to understand how to write or manipulate any of these for most common tasks, but you should aware of the concepts as they will be referenced later in this document. =head1 SQLFAIRY SCRIPTS Most common tasks can be accomplished through the use of the script interfaces to the SQL::Translator code. All SQLFairy scripts begin with "sqlt." Here are the scripts and a description of what they each do: =over 4 =item * sqlt This is the main interface for text-to-text translations, e.g., converting a MySQL schema to Oracle. =item * sqlt-diagram This is a tailored interface for the Diagram producer and its many myriad options. =item * sqlt-diff This script will examine two schemas and report the SQL commands (ALTER, CREATE) needed to turn the first schema into the second. =item * sqlt-dumper This script generates a Perl script that can be used to connect to a database and dump the data in each table in different formats, similar to the "mysqldump" program. =item * sqlt-graph This is an interface to the GraphViz visualization tool and its myriad options. =item * sqlt.cgi This is a CGI script that presents an HTML form for uploading or pasting a schema and choosing an output and the output options. =back To read the full documentation for each script, use "perldoc" (or execute any of the command-line scripts with the "--help" flag). =head1 CONVERTING SQL DIALECTS Probably the most common task SQLFairy is used for is to convert one dialect of SQL to another. If you have a text description of an SQL database (AKA a "DDL" -- "Data Definition Language"), then you should use the "sqlt" script with switches to indicate the parser and producer and the name of the text file as the final argument. For example, to convert the "foo.sql" MySQL schema to a version suitable for PostgreSQL, you would do the following: $ sqlt -f MySQL -t PostgreSQL foo.sql > foo-pg.sql The "from" and "to" options are case-sensitive and must match exactly the names of the Parser and Producer classes in SQL::Translator. For a complete listing of your options, execute "sqlt" with the "--list" flag. =head1 EXTRACT SQL SCHEMAS DIRECTLY FROM DATABASE It is possible to extract some schemas directly from the database without parsing a text file (the "foo.sql" in the above example). This can prove significantly faster than parsing a text file. To do this, use the "DBI" parser and provide the necessary arguments to connect to the database and indicate the producer class, like so: $ sqlt -f DBI --dsn dbi:mysql:FOO --db-user guest \ --db-password p4ssw0rd -t PostgreSQL > foo The "--list" option to "sqlt" will show the databases supported by DBI parsers. =head1 HANDLING NON-SQL DATA Certain structured document formats can be easily thought of as tables. SQLFairy can parse Microsoft Excel spreadsheets and arbitrarily delimited text files just as if they were schemas which contained only one table definition. The column names are normalized to something sane for most databases (whitespace is converted to underscores and non-word characters are removed), and the data in each field is scanned to determine the appropriate data type (character, integer, or float) and size. For instance, to convert a comma-separated file to an SQLite database, do the following: $ sqlt -f xSV --fs ',' -t SQLite foo.csv > foo-sqlite.sql Additionally, there is a non-SQL representation of relational schemas namely XML. Additionally, the only XML supported is our own version; however, it would be fairly easy to add an XML parser for something like the TorqueDB (http://db.apache.org/torque/) project. The actual parsing of XML should be trivial given the number of XML parsers available, so all that would be left would be to map the specific concepts in the source file to the Schema objects in SQLFairy. To convert a schema in SQLFairy's XML dialect to Oracle, do the following: $ sqlt -f XML-SQLFairy -t Oracle foo.xml > foo-oracle.sql =head1 SERIALIZING SCHEMAS Parsing a schema is generally the most computationally expensive operation performed by SQLFairy, so it may behoove you to serialize a parsed schema if you need to perform repeated conversions. For example, as part of a build process the author converts a MySQL schema first to YAML, then to PostgreSQL, Oracle, SQLite and Sybase. Additionally, a variety of documentation in HTML and images is produced. This can be accomplished like so: $ sqlt -f MySQL -t YAML schema-mysql.sql > schema.yaml $ sqlt -f YAML -t Oracle schema.yaml > schema-oracle.sql $ sqlt -f YAML -t PostgreSQL schema.yaml > schema-postgresql.sql $ ... SQLFairy has three serialization producers, none of which is superior to the other in their description of a schema. =over 4 =item * XML-SQLFairy This is the aforementioned XML format. It is essentially a direct mapping of the Schema objects into XML. This can also provide a very convenient bridge to describing a schema to a non-Perl application. Providing a producer argument to "sqlt" of just "XML" will default to using "XML-SQLFairy." =item * Storable This producer stores the Schema object using Perl's Storable.pm module available on CPAN. =item * YAML This producer serialized the Schema object with the very readable structured data format of YAML (http://www.yaml.org/). Earlier examples show serializing to YAML. =back =head1 VISUALIZING SQL SCHEMAS The visualization tools in SQLFairy can graphically represent the tables, fields, datatypes and sizes, constraints, and foreign key relationships in a very compact and intuitive format. This can be very beneficial in understanding and document large or small schemas. Two producers in SQLFairy will create pseudo-E/R (entity-relationship) diagrams: =over 4 =item * Diagram The first visualization tool in SQLFairy, this producer uses libgd to draw a picture of the schema. The tables are evenly distributed in definition order running in columns (i.e., no graphing algorithms are used), so the many of the lines showing the foreign key relationships may cross over each other and the table boxes. Please read the documentation of the "sqlt-diagram" script for all the options available to this producer. =item * GraphViz The layout of the GraphViz producer is far superior to the Diagram producer as it uses the Graphviz binary from Bell Labs to create very professional-looking graphs. There are several different layout algorithms and node shapes available. Please see the documentation of the "sqlt-graph" script for more information. =back =head1 AUTOMATED CODE-GENERATION Given that so many applications interact with SQL databases, it's no wonder that people have automated code to deal with this interaction. Class::DBI from CPAN is one such module that allows a developer to describe the relationships between tables and fields in class declarations and then generates all the SQL to interact (SELECT, UPDATE, DELETE, INSERT statements) at runtime. Obviously, the schema already describes itself, so it only makes sense that you should be able to generate this kind of code directly from the schema. The "ClassDBI" producer in SQLFairy does just this, creating a Perl module that inherits from Class::DBI and sets up most of the code needed to interact with the database. Here is an example of how to do this: $ sqlt -f MySQL -t ClassDBI foo.sql > Foo.pm Then simply edit Foo.pm as needed and include it in your code. =head1 CREATING A DATA DUMPER SCRIPT The Dumper producer creates a Perl script that can select the fields in each table and then create "INSERT" statements for each record in the database similar to the output generated by MySQL's "mysqldump" program: $ sqlt -f YAML -t Dumper --dumper-db-user guest \ > --dumper-db-pass p4ssw0rd --dumper-dsn dbi:mysql:FOO \ > foo.yaml > foo-dumper.pl And then execute the resulting script to dump the data: $ chmod +x foo-dumper.pl $ ./foo-dumper.pl > foo-data.sql The dumper script also has a number of options available. Execute the script with the "--help" flag to read about them. =head1 DOCUMENTING WITH SQL::TRANSLATOR SQLFairy offers two producers to help document schemas: =over 4 =item * HTML This producer creates a single HTML document which uses HTML formatting to describe the Schema objects and to create hyperlinks on foreign key relationships. This can be a surprisingly useful documentation aid as it creates a very readable format that allows one to jump easily to specific tables and fields. It's also possible to plugin your own CSS to further control the presentation of the HTML. =item * POD This is arguably not that useful of a producer by itself, but the number of POD-conversion tools could be used to further transform the POD into something more interesting. The schema is basically represented in POD sections where tables are broken down into fields, indices, constraints, foreign keys, etc. =back =head1 TEMPLATE-BASED MANIPULATION OF SCHEMA OBJECTS All of the producers which create text output could have been coded using a templating system to mix in the dynamic output with static text. CPAN offers several diverse templating systems, but few are as powerful as Template Toolkit (http://www.template-toolkit.org/). You can easily create your own producer without writing any Perl code at all simply by writing a template using Template Toolkit's syntax. The template will be passed a reference to the Schema object briefly described at the beginning of this document and mentioned many times throughout. For example, you could create a template that simply prints the name of each table and field that looks like this: # file: schema.tt [% FOREACH table IN schema.get_tables %] Table: [% table.name %] Fields: [% FOREACH field IN table.get_fields -%] [% field.name %] [% END -%] [% END %] And then process it like so: $ sqlt -f YAML -t TTSchema --template schema.tt foo.yaml To create output like this: Table: foo Fields: foo_id foo_name For more information on Template Toolkit, please install the "Template" module and read the POD. =head1 FINDING THE DIFFERENCES BETWEEN TWO SCHEMAS As mentioned above, the "sqlt-diff" schema examines two schemas and creates SQL schema modification statements that can be used to transform the first schema into the second. The flag syntax is somewhat quirky: $ sqlt-diff foo-v1.sql=MySQL foo-v2.sql=Oracle > diff.sql As demonstrated, the schemas need not even be from the same vendor, though this is likely to produce some spurious results as datatypes are not currently viewed equivalent unless they match exactly, even if they would be converted to the same. For example, MySQL's "integer" data type would be converted to Oracle's "number," but the differ isn't quite smart enough yet to figure this out. Also, as the SQL to ALTER a field definition varies from database vendor to vendor, these statements are made using just the keyword "CHANGE" and will likely need to be corrected for the target database. =head1 A UNIFIED GRAPHICAL INTERFACE Seeing all the above options and scripts, you may be pining for a single, graphical interface to handle all these transformations and choices. This is exactly what the "sqlt.cgi" script provides. Simply drop this script into your web server's CGI directory and enable the execute bit and you can point your web browser to an HTML form which provides a simple interface to all the SQLFairy parsers and producers. =head1 PLUGIN YOUR OWN PARSERS AND PRODUCERS Now that you have seen how the parsers and producers interact via the Schema objects, you may wish to create your own versions to plugin. Producers are probably the easier concept to grok, so let's cover that first. By far the easiest way to create custom output is to use the TTSchema producer in conjunction with a Template Toolkit template as described earlier. However, you can also easily pass a reference to a subroutine that SQL::Translator can call for the production of the output. This subroutine will be passed a single argument of the SQL::Translator object which you can use to access the Schema objects. Please read the POD for SQL::Translator and SQL::Translator::Schema to learn the methods you can call. Here is a very simple example: #!/usr/bin/perl use strict; use SQL::Translator; my $input = q[ create table foo ( foo_id int not null default '0' primary key, foo_name varchar(30) not null default '' ); create table bar ( bar_id int not null default '0' primary key, bar_value varchar(100) not null default '' ); ]; my $t = SQL::Translator->new; $t->parser('MySQL') or die $t->error; $t->producer( \&produce ) or die $t->error; my $output = $t->translate( \$input ) or die $t->error; print $output; sub produce { my $tr = shift; my $schema = $tr->schema; my $output = ''; for my $t ( $schema->get_tables ) { $output .= join('', "Table = ", $t->name, "\n"); } return $output; } Executing this script produces the following: $ ./my-producer.pl Table = foo Table = bar A custom parser will be passed two arguments: the SQL::Translator object and the data to be parsed. In this example, the schema will be represented in a simple text format. Each line is a table definition where the fields are separated by colons. The first field is the table name and the following fields are column definitions where the column name, data type and size are separated by spaces. The specifics of the example are unimportant -- what is being demonstrated is that you have to decide how to parse the incoming data and then map the concepts in the data to the Schema object. #!/usr/bin/perl use strict; use SQL::Translator; my $input = "foo:foo_id int 11:foo_name varchar 30\n" . "bar:bar_id int 11:bar_value varchar 30" ; my $t = SQL::Translator->new; $t->parser( \&parser ) or die $t->error; $t->producer('Oracle') or die $t->error; my $output = $t->translate( \$input ) or die $t->error; print $output; sub parser { my ( $tr, $data ) = @_; my $schema = $tr->schema; for my $line ( split( /\n/, $data ) ) { my ( $table_name, @fields ) = split( /:/, $line ); my $table = $schema->add_table( name => $table_name ) or die $schema->error; for ( @fields ) { my ( $f_name, $type, $size ) = split; $table->add_field( name => $f_name, data_type => $type, size => $size, ) or die $table->error; } } return 1; } And here is the output produced by this script: -- -- Created by SQL::Translator::Producer::Oracle -- Created on Wed Mar 31 15:43:30 2004 -- -- -- Table: foo -- CREATE TABLE foo ( foo_id number(11), foo_name varchar2(30) ); -- -- Table: bar -- CREATE TABLE bar ( bar_id number(11), bar_value varchar2(30) ); If you create a useful parser or producer, you are encouraged to submit your work to the SQLFairy project! =head1 PLUGIN TEMPLATE TOOLKIT PRODUCERS You may find that the TTSchema producer doesn't give you enough control over templating and you want to play with the Template config or add you own variables. Or maybe you just have a really good template you want to submit to SQLFairy :) If so, the SQL::Translator::Producer::TT::Base producer may be just for you! Instead of working like a normal producer it provides a base class so you can cheaply build new producer modules based on templates. It's simplest use is when we just want to put a single template in its own module. So to create a Foo producer we create a F file as follows, putting our template in the __DATA__ section. package Custom::Foo.pm; use base qw/SQL::Translator::Producer::TT::Base/; # Use our new class as the producer sub produce { return __PACKAGE__->new( translator => shift )->run; }; __DATA__ [% FOREACH table IN schema.get_tables %] Table: [% table.name %] Fields: [% FOREACH field IN table.get_fields -%] [% field.name %] [% END -%] [% END %] For that we get a producer called Custom::Foo that we can now call like a normal producer (as long as the directory with F is in our @INC path): $ sqlt -f YAML -t Custom-Foo foo.yaml The template gets variables of C and C to use in building its output. You also get a number of methods you can override to hook into the template generation. B Allows you to set the config options used by the Template object. The Template Toolkit provides a huge number of options which allow you to do all sorts of magic (See L for details). This method provides a hook into them by returning a hash of options for the Template. e.g. Say you want to use the INTERPOLATE option to save some typing in your template; sub tt_config { ( INTERPOLATE => 1 ); } Another common use for this is to add you own filters to the template: sub tt_config {( INTERPOLATE => 1, FILTERS => { foo_filter => \&foo_filter, } );} Another common extension is adding your own template variables. This is done with B: sub tt_vars { ( foo => "bar" ); } What about using template files instead of DATA sections? You can already - if you give a template on the command line your new producer will use that instead of reading the DATA section: $ sqlt -f YAML -t Custom-Foo --template foo.tt foo.yaml This is useful as you can set up a producer that adds a set of filters and variables that you can then use in templates given on the command line. (There is also a tt_schema method to over ride if you need even finer control over the source of your template). Note that if you leave out the DATA section all together then your producer will require a template file name to be given. See L for more details. =head1 AUTHOR Ken Y. Clark Ekclark@cpan.orgE. SQL-Translator-1.65/lib/SQL/Translator/Utils.pm0000644000000000000000000004042714551163724021244 0ustar00rootroot00000000000000package SQL::Translator::Utils; use strict; use warnings; use Digest::SHA qw( sha1_hex ); use File::Spec; use Scalar::Util qw(blessed); use Try::Tiny; use Carp qw(carp croak); use List::Util qw(any); our $VERSION = '1.65'; use base qw(Exporter); our @EXPORT_OK = qw( debug normalize_name header_comment parse_list_arg truncate_id_uniquely $DEFAULT_COMMENT parse_mysql_version parse_dbms_version ddl_parser_instance batch_alter_table_statements uniq throw ex2err carp_ro normalize_quote_options ); use constant COLLISION_TAG_LENGTH => 8; our $DEFAULT_COMMENT = '--'; sub debug { my ($pkg, $file, $line, $sub) = caller(0); { no strict qw(refs); return unless ${"$pkg\::DEBUG"}; } $sub =~ s/^$pkg\:://; while (@_) { my $x = shift; chomp $x; $x =~ s/\bPKG\b/$pkg/g; $x =~ s/\bLINE\b/$line/g; $x =~ s/\bSUB\b/$sub/g; #warn '[' . $x . "]\n"; print STDERR '[' . $x . "]\n"; } } sub normalize_name { my $name = shift or return ''; # The name can only begin with a-zA-Z_; if there's anything # else, prefix with _ $name =~ s/^([^a-zA-Z_])/_$1/; # anything other than a-zA-Z0-9_ in the non-first position # needs to be turned into _ $name =~ tr/[a-zA-Z0-9_]/_/c; # All duplicated _ need to be squashed into one. $name =~ tr/_/_/s; # Trim a trailing _ $name =~ s/_$//; return $name; } sub normalize_quote_options { my $config = shift; my $quote; if (defined $config->{quote_identifiers}) { $quote = $config->{quote_identifiers}; for (qw/quote_table_names quote_field_names/) { carp "Ignoring deprecated parameter '$_', since 'quote_identifiers' is supplied" if defined $config->{$_}; } } # Legacy one set the other is not elsif (defined $config->{'quote_table_names'} xor defined $config->{'quote_field_names'}) { if (defined $config->{'quote_table_names'}) { carp "Explicitly disabling the deprecated 'quote_table_names' implies disabling 'quote_identifiers' which in turn implies disabling 'quote_field_names'" unless $config->{'quote_table_names'}; $quote = $config->{'quote_table_names'} ? 1 : 0; } else { carp "Explicitly disabling the deprecated 'quote_field_names' implies disabling 'quote_identifiers' which in turn implies disabling 'quote_table_names'" unless $config->{'quote_field_names'}; $quote = $config->{'quote_field_names'} ? 1 : 0; } } # Legacy both are set elsif (defined $config->{'quote_table_names'}) { croak 'Setting quote_table_names and quote_field_names to conflicting values is no longer supported' if ($config->{'quote_table_names'} xor $config->{'quote_field_names'}); $quote = $config->{'quote_table_names'} ? 1 : 0; } return $quote; } sub header_comment { my $producer = shift || caller; my $comment_char = shift; my $now = scalar localtime; $comment_char = $DEFAULT_COMMENT unless defined $comment_char; my $header_comment = <<"HEADER_COMMENT"; ${comment_char} ${comment_char} Created by $producer ${comment_char} Created on $now ${comment_char} HEADER_COMMENT # Any additional stuff passed in for my $additional_comment (@_) { $header_comment .= "${comment_char} ${additional_comment}\n"; } return $header_comment; } sub parse_list_arg { my $list = UNIVERSAL::isa($_[0], 'ARRAY') ? shift : [@_]; # # This protects stringification of references. # if (any { ref $_ } @$list) { return $list; } # # This processes string-like arguments. # else { return [ map { s/^\s+|\s+$//g; $_ } map { split /,/ } grep { defined && length } @$list ]; } } sub truncate_id_uniquely { my ($desired_name, $max_symbol_length) = @_; return $desired_name unless defined $desired_name && length $desired_name > $max_symbol_length; my $truncated_name = substr $desired_name, 0, $max_symbol_length - COLLISION_TAG_LENGTH - 1; # Hex isn't the most space-efficient, but it skirts around allowed # charset issues my $digest = sha1_hex($desired_name); my $collision_tag = substr $digest, 0, COLLISION_TAG_LENGTH; return $truncated_name . '_' . $collision_tag; } sub parse_mysql_version { my ($v, $target) = @_; return undef unless $v; $target ||= 'perl'; my @vers; # X.Y.Z style if ($v =~ / ^ (\d+) \. (\d{1,3}) (?: \. (\d{1,3}) )? $ /x) { push @vers, $1, $2, $3; } # XYYZZ (mysql) style elsif ($v =~ / ^ (\d) (\d{2}) (\d{2}) $ /x) { push @vers, $1, $2, $3; } # XX.YYYZZZ (perl) style or simply X elsif ($v =~ / ^ (\d+) (?: \. (\d{3}) (\d{3}) )? $ /x) { push @vers, $1, $2, $3; } else { #how do I croak sanely here? die "Unparseable MySQL version '$v'"; } if ($target eq 'perl') { return sprintf('%d.%03d%03d', map { $_ || 0 } (@vers)); } elsif ($target eq 'mysql') { return sprintf('%d%02d%02d', map { $_ || 0 } (@vers)); } else { #how do I croak sanely here? die "Unknown version target '$target'"; } } sub parse_dbms_version { my ($v, $target) = @_; return undef unless $v; my @vers; # X.Y.Z style if ($v =~ / ^ (\d+) \. (\d{1,3}) (?: \. (\d{1,3}) )? $ /x) { push @vers, $1, $2, $3; } # XX.YYYZZZ (perl) style or simply X elsif ($v =~ / ^ (\d+) (?: \. (\d{3}) (\d{3}) )? $ /x) { push @vers, $1, $2, $3; } else { #how do I croak sanely here? die "Unparseable database server version '$v'"; } if ($target eq 'perl') { return sprintf('%d.%03d%03d', map { $_ || 0 } (@vers)); } elsif ($target eq 'native') { return join '.' => grep defined, @vers; } else { #how do I croak sanely here? die "Unknown version target '$target'"; } } #my ($parsers_libdir, $checkout_dir); sub ddl_parser_instance { my $type = shift; # it may differ from our caller, even though currently this is not the case eval "require SQL::Translator::Parser::$type" or die "Unable to load grammar-spec container SQL::Translator::Parser::$type:\n$@"; # handle DB2 in a special way, since the grammar source was lost :( if ($type eq 'DB2') { require SQL::Translator::Parser::DB2::Grammar; return SQL::Translator::Parser::DB2::Grammar->new; } require Parse::RecDescent; return Parse::RecDescent->new(do { no strict 'refs'; ${"SQL::Translator::Parser::${type}::GRAMMAR"} || die "No \$SQL::Translator::Parser::${type}::GRAMMAR defined, unable to instantiate PRD parser\n"; }); # this is disabled until RT#74593 is resolved =begin sadness unless ($parsers_libdir) { # are we in a checkout? if ($checkout_dir = _find_co_root()) { $parsers_libdir = File::Spec->catdir($checkout_dir, 'share', 'PrecompiledParsers'); } else { require File::ShareDir; $parsers_libdir = File::Spec->catdir( File::ShareDir::dist_dir('SQL-Translator'), 'PrecompiledParsers' ); } unshift @INC, $parsers_libdir; } my $precompiled_mod = "Parse::RecDescent::DDL::SQLT::$type"; # FIXME FIXME FIXME # Parse::RecDescent has horrible architecture where each precompiled parser # instance shares global state with all its siblings # What we do here is gross, but scarily efficient - the parser compilation # is much much slower than an unload/reload cycle require Class::Unload; Class::Unload->unload($precompiled_mod); # There is also a sub-namespace that P::RD uses, but simply unsetting # $^W to stop redefine warnings seems to be enough #Class::Unload->unload("Parse::RecDescent::$precompiled_mod"); eval "local \$^W; require $precompiled_mod" or do { if ($checkout_dir) { die "Unable to find precompiled grammar for $type - run Makefile.PL to generate it\n"; } else { die "Unable to load precompiled grammar for $type... this is not supposed to happen if you are not in a checkout, please file a bugreport:\n$@" } }; my $grammar_spec_fn = $INC{"SQL/Translator/Parser/$type.pm"}; my $precompiled_fn = $INC{"Parse/RecDescent/DDL/SQLT/$type.pm"}; if ( (stat($grammar_spec_fn))[9] > (stat($precompiled_fn))[9] ) { die ( "Grammar spec '$grammar_spec_fn' is newer than precompiled parser '$precompiled_fn'" . ($checkout_dir ? " - run Makefile.PL to regenerate stale versions\n" : "... this is not supposed to happen if you are not in a checkout, please file a bugreport\n" ) ); } return $precompiled_mod->new; =end sadness =cut } # Try to determine the root of a checkout/untar if possible # or return undef sub _find_co_root { my @mod_parts = split /::/, (__PACKAGE__ . '.pm'); my $rel_path = join('/', @mod_parts); # %INC stores paths with / regardless of OS return undef unless ($INC{$rel_path}); # a bit convoluted, but what we do here essentially is: # - get the file name of this particular module # - do 'cd ..' as many times as necessary to get to lib/SQL/Translator/../../.. my $root = (File::Spec::Unix->splitpath($INC{$rel_path}))[1]; for (1 .. @mod_parts) { $root = File::Spec->catdir($root, File::Spec->updir); } return (-f File::Spec->catfile($root, 'Makefile.PL')) ? $root : undef; } { package SQL::Translator::Utils::Error; use overload '""' => sub { ${ $_[0] } }, fallback => 1; sub new { my ($class, $msg) = @_; bless \$msg, $class; } } sub uniq { my (%seen, $seen_undef, $numeric_preserving_copy); grep { not(defined $_ ? $seen{ $numeric_preserving_copy = $_ }++ : $seen_undef++) } @_; } sub throw { die SQL::Translator::Utils::Error->new($_[0]); } sub ex2err { my ($orig, $self, @args) = @_; return try { $self->$orig(@args); } catch { die $_ unless blessed($_) && $_->isa("SQL::Translator::Utils::Error"); $self->error("$_"); }; } sub carp_ro { my ($name) = @_; return sub { my ($orig, $self) = (shift, shift); carp "'$name' is a read-only accessor" if @_; return $self->$orig; }; } sub batch_alter_table_statements { my ($diff_hash, $options, @meths) = @_; @meths = qw( rename_table alter_drop_constraint alter_drop_index drop_field add_field alter_field rename_field alter_create_index alter_create_constraint alter_table ) unless @meths; my $package = caller; return map { my $meth = $package->can($_) or die "$package cant $_"; map { $meth->(ref $_ eq 'ARRAY' ? @$_ : $_, $options) } @{ $diff_hash->{$_} } } grep { @{ $diff_hash->{$_} || [] } } @meths; } 1; =pod =head1 NAME SQL::Translator::Utils - SQL::Translator Utility functions =head1 SYNOPSIS use SQL::Translator::Utils qw(debug); debug("PKG: Bad things happened"); =head1 DESCSIPTION C contains utility functions designed to be used from the other modules within the C modules. Nothing is exported by default. =head1 EXPORTED FUNCTIONS AND CONSTANTS =head2 debug C takes 0 or more messages, which will be sent to STDERR using C. Occurances of the strings I, I, and I will be replaced by the calling package, subroutine, and line number, respectively, as reported by C. For example, from within C in F, at line 666: debug("PKG: Error reading file at SUB/LINE"); Will warn [SQL::Translator: Error reading file at foo/666] The entire message is enclosed within C<[> and C<]> for visual clarity when STDERR is intermixed with STDOUT. =head2 normalize_name C takes a string and ensures that it is suitable for use as an identifier. This means: ensure that it starts with a letter or underscore, and that the rest of the string consists of only letters, numbers, and underscores. A string that begins with something other than [a-zA-Z] will be prefixer with an underscore, and all other characters in the string will be replaced with underscores. Finally, a trailing underscore will be removed, because that's ugly. normalize_name("Hello, world"); Produces: Hello_world A more useful example, from the C test suite: normalize_name("silly field (with random characters)"); returns: silly_field_with_random_characters =head2 header_comment Create the header comment. Takes 1 mandatory argument (the producer classname), an optional comment character (defaults to $DEFAULT_COMMENT), and 0 or more additional comments, which will be appended to the header, prefixed with the comment character. If additional comments are provided, then a comment string must be provided ($DEFAULT_COMMENT is exported for this use). For example, this: package My::Producer; use SQL::Translator::Utils qw(header_comment $DEFAULT_COMMENT); print header_comment(__PACKAGE__, $DEFAULT_COMMENT, "Hi mom!"); produces: -- -- Created by My::Prodcuer -- Created on Fri Apr 25 06:56:02 2003 -- -- Hi mom! -- Note the gratuitous spacing. =head2 parse_list_arg Takes a string, list or arrayref (all of which could contain comma-separated values) and returns an array reference of the values. All of the following will return equivalent values: parse_list_arg('id'); parse_list_arg('id', 'name'); parse_list_arg( 'id, name' ); parse_list_arg( [ 'id', 'name' ] ); parse_list_arg( qw[ id name ] ); =head2 truncate_id_uniquely Takes a string ($desired_name) and int ($max_symbol_length). Truncates $desired_name to $max_symbol_length by including part of the hash of the full name at the end of the truncated name, giving a high probability that the symbol will be unique. For example, truncate_id_uniquely( 'a' x 100, 64 ) truncate_id_uniquely( 'a' x 99 . 'b', 64 ); truncate_id_uniquely( 'a' x 99, 64 ) Will give three different results; specifically: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_7f900025 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_6191e39a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_8cd96af2 =head2 $DEFAULT_COMMENT This is the default comment string, '--' by default. Useful for C. =head2 parse_mysql_version Used by both L and L in order to provide a consistent format for both C<< parser_args->{mysql_parser_version} >> and C<< producer_args->{mysql_version} >> respectively. Takes any of the following version specifications: 5.0.3 4.1 3.23.2 5 5.001005 (perl style) 30201 (mysql style) =head2 parse_dbms_version Takes a version string (X.Y.Z) or perl style (XX.YYYZZZ) and a target ('perl' or 'native') transforms the string to the given target style. to =head2 throw Throws the provided string as an object that will stringify back to the original string. This stops it from being mangled by L's C code. =head2 ex2err Wraps an attribute accessor to catch any exception raised using L and store them in C<< $self->error() >>, finally returning undef. A reference to this function can be passed directly to L. around foo => \&ex2err; around bar => sub { my ($orig, $self) = (shift, shift); return ex2err($orig, $self, @_) if @_; ... }; =head2 carp_ro Takes a field name and returns a reference to a function can be used L a read-only accessor to make it L instead of die when passed an argument. =head2 batch_alter_table_statements Takes diff and argument hashes as passed to L and an optional list of producer functions to call on the calling package. Returns the list of statements returned by the producer functions. If no producer functions are specified, the following functions in the calling package are called: =over =item 1. rename_table =item 2. alter_drop_constraint =item 3. alter_drop_index =item 4. drop_field =item 5. add_field =item 5. alter_field =item 6. rename_field =item 7. alter_create_index =item 8. alter_create_constraint =item 9. alter_table =back If the corresponding array in the hash has any elements, but the caller doesn't implement that function, an exception is thrown. =head1 AUTHORS Darren Chamberlain Edarren@cpan.orgE, Ken Y. Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Types.pm0000644000000000000000000000463114541265163021244 0ustar00rootroot00000000000000package SQL::Translator::Types; use warnings; use strict; =head1 NAME SQL::Translator::Types - Type checking functions =head1 SYNOPSIS package Foo; use Moo; use SQL::Translator::Types qw(schema_obj enum); has foo => ( is => 'rw', isa => schema_obj('Trigger') ); has bar => ( is => 'rw', isa => enum([qw(baz quux quuz)], { msg => "Invalid value for bar: '%s'", icase => 1, }); =head1 DESCRIPTIONS This module exports functions that return coderefs suitable for L C type checks. Errors are reported using L. =cut use SQL::Translator::Utils qw(throw); use Scalar::Util qw(blessed); use Exporter qw(import); our @EXPORT_OK = qw(schema_obj enum); =head1 FUNCTIONS =head2 schema_obj($type) Returns a coderef that checks that its arguments is an object of the class C<< SQL::Translator::Schema::I<$type> >>. =cut sub schema_obj { my ($class) = @_; my $name = lc $class; $class = 'SQL::Translator::Schema' . ($class eq 'Schema' ? '' : "::$class"); return sub { throw("Not a $name object") unless blessed($_[0]) and $_[0]->isa($class); }; } =head2 enum(\@strings, [$msg | \%parameters]) Returns a coderef that checks that the argument is one of the provided C<@strings>. =head3 Parameters =over =item msg L string for the error message. If no other parameters are needed, this can be provided on its own, instead of the C<%parameters> hashref. The invalid value is passed as the only argument. Defaults to C. =item icase If true, folds the values to lower case before checking for equality. =item allow_undef If true, allow C in addition to the specified strings. =item allow_false If true, allow any false value in addition to the specified strings. =back =cut sub enum { my ($values, $args) = @_; $args ||= {}; $args = { msg => $args } unless ref($args) eq 'HASH'; my $icase = !!$args->{icase}; my %values = map { ($icase ? lc : $_) => undef } @{$values}; my $msg = $args->{msg} || "Invalid value: '%s'"; my $extra_test = $args->{allow_undef} ? sub { defined $_[0] } : $args->{allow_false} ? sub { !!$_[0] } : undef; return sub { my $val = $icase ? lc $_[0] : $_[0]; throw(sprintf($msg, $val)) if (!defined($extra_test) || $extra_test->($val)) && !exists $values{$val}; }; } 1; SQL-Translator-1.65/lib/SQL/Translator/Role/0000755000000000000000000000000014551164244020476 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Role/BuildArgs.pm0000644000000000000000000000122314541265164022710 0ustar00rootroot00000000000000package SQL::Translator::Role::BuildArgs; =head1 NAME SQL::Translator::Role::BuildArgs - Remove undefined constructor arguments =head1 SYNOPSIS package Foo; use Moo; with qw(SQL::Translator::Role::BuildArgs); =head1 DESCRIPTION This L wraps BUILDARGS to remove C constructor arguments for backwards compatibility with the old L-based L. =cut use Moo::Role; around BUILDARGS => sub { my $orig = shift; my $self = shift; my $args = $self->$orig(@_); foreach my $arg (keys %{$args}) { delete $args->{$arg} unless defined($args->{$arg}); } return $args; }; 1; SQL-Translator-1.65/lib/SQL/Translator/Role/Error.pm0000644000000000000000000000300214541265164022122 0ustar00rootroot00000000000000package SQL::Translator::Role::Error; =head1 NAME SQL::Translator::Role::Error - Error setter/getter for objects and classes =head1 SYNOPSIS In the class consuming the role: package Foo; use Moo; with qw(SQL::Translator::Role::Error); sub foo { ... return $self->error("Something failed") unless $some_condition; ... } In code using the class: Foo->foo or die Foo->error; # or $foo->foo or die $foo->error; =head1 DESCRIPTION This L provides a method for getting and setting error on a class or object. =cut use Moo::Role; use Sub::Quote qw(quote_sub); has _ERROR => ( is => 'rw', accessor => 'error', init_arg => undef, default => quote_sub(q{ '' }), ); =head1 METHODS =head2 $object_or_class->error([$message]) If called with an argument, sets the error message and returns undef, otherwise returns the message. As an implementation detail, for compatibility with L, the message is stored in C<< $object->{_ERROR} >> or C<< $Class::ERROR >>, depending on whether the invocant is an object. =cut around error => sub { my ($orig, $self) = (shift, shift); # Emulate horrible Class::Base API unless (ref($self)) { my $errref = do { no strict 'refs'; \${"${self}::ERROR"} }; return $$errref unless @_; $$errref = $_[0]; return undef; } return $self->$orig unless @_; $self->$orig(@_); return undef; }; =head1 SEE ALSO =over =item * L =back =cut 1; SQL-Translator-1.65/lib/SQL/Translator/Role/Debug.pm0000644000000000000000000000141414541265164022064 0ustar00rootroot00000000000000package SQL::Translator::Role::Debug; use Moo::Role; use Sub::Quote qw(quote_sub); has _DEBUG => ( is => 'rw', accessor => 'debugging', init_arg => 'debugging', coerce => quote_sub(q{ $_[0] ? 1 : 0 }), lazy => 1, builder => 1, ); sub _build__DEBUG { my ($self) = @_; my $class = ref $self; no strict 'refs'; return ${"${class}::DEBUG"}; } around debugging => sub { my ($orig, $self) = (shift, shift); # Emulate horrible Class::Base API unless (ref $self) { my $dbgref = do { no strict 'refs'; \${"${self}::DEBUG"} }; $$dbgref = $_[0] if @_; return $$dbgref; } return $self->$orig(@_); }; sub debug { my $self = shift; return unless $self->debugging; print STDERR '[', (ref $self || $self), '] ', @_, "\n"; } 1; SQL-Translator-1.65/lib/SQL/Translator/Role/ListAttr.pm0000644000000000000000000000550714541265164022613 0ustar00rootroot00000000000000package SQL::Translator::Role::ListAttr; use warnings; use strict; =head1 NAME SQL::Translator::Role::ListAttr - context-sensitive list attributes =head1 SYNOPSIS package Foo; use Moo; use SQL::Translator::Role::ListAttr; with ListAttr foo => ( uniq => 1, append => 1 ); =head1 DESCRIPTION This package provides a variable L for context-sensitive list attributes. =cut use SQL::Translator::Utils qw(parse_list_arg ex2err uniq); use Sub::Quote qw(quote_sub); use Package::Variant ( importing => { 'Moo::Role' => [], }, subs => [qw(has around)], ); =head1 FUNCTIONS =head2 ListAttr $name => %parameters; Returns a L providing an arrayref attribute named C<$name>, and wrapping the accessor to provide context-sensitivity both for setting and getting. If no C or C is provided, the default value is the empty list. On setting, the arguments are parsed using L, and the accessor will return an array reference or a list, depending on context. =head3 Parameters =over =item append If true, the setter will append arguments to the existing ones, rather than replacing them. =item uniq If true, duplicate items will be removed, keeping the first one seen. =item may_throw If accessing the attribute might L an exception (e.g. from a C or C check), this should be set to make the accessor store the exception using L and return undef. =item undef_if_empty If true, and the list is empty, the accessor will return C instead of a reference to an empty in scalar context. =back Unknown parameters are passed through to the L call for the attribute. =cut sub make_variant { my ($class, $target_package, $name, %arguments) = @_; my $may_throw = delete $arguments{may_throw}; my $undef_if_empty = delete $arguments{undef_if_empty}; my $append = delete $arguments{append}; my $coerce = delete $arguments{uniq} ? sub { [ uniq @{ parse_list_arg($_[0]) } ] } : \&parse_list_arg; has($name => ( is => 'rw', (!$arguments{builder} ? (default => quote_sub(q{ [] }),) : ()), coerce => $coerce, %arguments, )); around( $name => sub { my ($orig, $self) = (shift, shift); my $list = parse_list_arg(@_); $self->$orig([ @{ $append ? $self->$orig : [] }, @$list ]) if @$list; my $return; if ($may_throw) { $return = ex2err($orig, $self) or return; } else { $return = $self->$orig; } my $scalar_return = !@{$return} && $undef_if_empty ? undef : $return; return wantarray ? @{$return} : $scalar_return; } ); } =head1 SEE ALSO =over =item L =item L =back =cut 1; SQL-Translator-1.65/lib/SQL/Translator/Producer.pm0000644000000000000000000000524114551163724021722 0ustar00rootroot00000000000000package SQL::Translator::Producer; use strict; use warnings; use Scalar::Util (); our $VERSION = '1.65'; sub produce {""} # Do not rely on this if you are not bundled with SQL::Translator. # -- rjbs, 2008-09-30 ## $exceptions contains an arrayref of paired values ## Each pair contains a pattern match or string, and a value to be used as ## the default if matched. ## They are special per Producer, and provide support for the old 'now()' ## default value exceptions sub _apply_default_value { my ($self, $field, $field_ref, $exceptions) = @_; my $default = $field->default_value; return if !defined $default; if ($exceptions and !ref $default) { for (my $i = 0; $i < @$exceptions; $i += 2) { my ($pat, $val) = @$exceptions[ $i, $i + 1 ]; if (ref $pat and $default =~ $pat) { $default = $val; last; } elsif (lc $default eq lc $pat) { $default = $val; last; } } } my $type = lc $field->data_type; my $is_numeric_datatype = ($type =~ /^(?:(?:big|medium|small|tiny)?int(?:eger)?|decimal|double|float|num(?:ber|eric)?|real)$/); if (ref $default) { $$field_ref .= " DEFAULT $$default"; } elsif ($is_numeric_datatype && Scalar::Util::looks_like_number($default)) { # we need to check the data itself in addition to the datatype, for basic safety $$field_ref .= " DEFAULT $default"; } else { $default = $self->_quote_string($default); $$field_ref .= " DEFAULT $default"; } } sub _quote_string { my ($self, $string) = @_; $string =~ s/'/''/g; return qq{'$string'}; } 1; # ------------------------------------------------------------------- # A burnt child loves the fire. # Oscar Wilde # ------------------------------------------------------------------- =pod =head1 NAME SQL::Translator::Producer - describes how to write a producer =head1 DESCRIPTION Producer modules designed to be used with SQL::Translator need to implement a single function, called B. B will be called with the SQL::Translator object from which it is expected to retrieve the SQL::Translator::Schema object which has been populated by the parser. It is expected to return a string. =head1 METHODS =over 4 =item produce =item create_table($table) =item create_field($field) =item create_view($view) =item create_index($index) =item create_constraint($constraint) =item create_trigger($trigger) =item alter_field($from_field, $to_field) =item add_field($table, $new_field) =item drop_field($table, $old_field) =back =head1 AUTHORS Darren Chamberlain Edarren@cpan.orgE, Ken Y. Clark Ekclark@cpan.orgE. =head1 SEE ALSO perl(1), SQL::Translator, SQL::Translator::Schema. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/0000755000000000000000000000000014551164244021360 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Producer/Oracle.pm0000644000000000000000000006225314551163724023135 0ustar00rootroot00000000000000package SQL::Translator::Producer::Oracle; =head1 NAME SQL::Translator::Producer::Oracle - Oracle SQL producer =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'Oracle' ); print $translator->translate( $file ); =head1 DESCRIPTION Creates an SQL DDL suitable for Oracle. =head1 producer_args =over =item delay_constraints This option remove the primary key and other key constraints from the CREATE TABLE statement and adds ALTER TABLEs at the end with it. =item quote_field_names Controls whether quotes are being used around column names in generated DDL. =item quote_table_names Controls whether quotes are being used around table, sequence and trigger names in generated DDL. =back =head1 NOTES =head2 Autoincremental primary keys This producer uses sequences and triggers to autoincrement primary key columns, if necessary. SQLPlus and DBI expect a slightly different syntax of CREATE TRIGGER statement. You might have noticed that this producer returns a scalar containing all statements concatenated by newlines or an array of single statements depending on the context (scalar, array) it has been called in. SQLPlus expects following trigger syntax: CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; / Whereas if you want to create the same trigger using L, you need to omit the last slash: my $dbh = DBI->connect('dbi:Oracle:mysid', 'scott', 'tiger'); $dbh->do(" CREATE OR REPLACE TRIGGER ai_person_id BEFORE INSERT ON person FOR EACH ROW WHEN ( new.id IS NULL OR new.id = 0 ) BEGIN SELECT sq_person_id.nextval INTO :new.id FROM dual; END; "); If you call this producer in array context, we expect you want to process the returned array of statements using L like L does. To get this working we removed the slash in those statements in version 0.09002 of L when called in array context. In scalar context the slash will be still there to ensure compatibility with SQLPlus. =cut use strict; use warnings; our ($DEBUG, $WARN); our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; use base 'SQL::Translator::Producer'; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); use Data::Dumper; my %translate = ( # # MySQL types # bigint => 'number', double => 'float', decimal => 'number', float => 'float', int => 'number', integer => 'number', mediumint => 'number', smallint => 'number', tinyint => 'number', char => 'char', varchar => 'varchar2', tinyblob => 'blob', blob => 'blob', mediumblob => 'blob', longblob => 'blob', tinytext => 'varchar2', text => 'clob', longtext => 'clob', mediumtext => 'clob', enum => 'varchar2', set => 'varchar2', date => 'date', datetime => 'date', time => 'date', timestamp => 'date', year => 'date', # # PostgreSQL types # numeric => 'number', 'double precision' => 'number', serial => 'number', bigserial => 'number', money => 'number', character => 'char', 'character varying' => 'varchar2', bytea => 'BLOB', interval => 'number', boolean => 'number', point => 'number', line => 'number', lseg => 'number', box => 'number', path => 'number', polygon => 'number', circle => 'number', cidr => 'number', inet => 'varchar2', macaddr => 'varchar2', bit => 'number', 'bit varying' => 'number', # # Oracle types # number => 'number', varchar2 => 'varchar2', long => 'clob', ); # # Oracle 8/9 max size of data types from: # http://www.ss64.com/orasyntax/datatypes.html # my %max_size = ( char => 2000, float => 126, nchar => 2000, nvarchar2 => 4000, number => [ 38, 127 ], raw => 2000, varchar => 4000, # only synonym for varchar2 varchar2 => 4000, ); my $max_id_length = 30; my %used_identifiers = (); my %global_names; my %truncated; # Quote used to escape table, field, sequence and trigger names my $quote_char = '"'; sub produce { my $translator = shift; $DEBUG = $translator->debug; $WARN = $translator->show_warnings || 0; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $oracle_version = $translator->producer_args->{oracle_version} || 0; my $delay_constraints = $translator->producer_args->{delay_constraints}; my ($output, $create, @table_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs); debug("ORA: Beginning production"); $create .= header_comment unless ($no_comments); my $qt = 1 if $translator->quote_table_names; my $qf = 1 if $translator->quote_field_names; if ($translator->parser_type =~ /mysql/i) { $create .= "-- We assume that default NLS_DATE_FORMAT has been changed\n" . "-- but we set it here anyway to be self-consistent.\n" unless $no_comments; $create .= "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\n\n"; } for my $table ($schema->get_tables) { debug("ORA: Producing for table " . $table->name); my ($table_def, $fk_def, $trigger_def, $index_def, $constraint_def) = create_table( $table, { add_drop_table => $add_drop_table, show_warnings => $WARN, no_comments => $no_comments, delay_constraints => $delay_constraints, quote_table_names => $qt, quote_field_names => $qf, } ); push @table_defs, @$table_def; push @fk_defs, @$fk_def; push @trigger_defs, @$trigger_def; push @index_defs, @$index_def; push @constraint_defs, @$constraint_def; } my (@view_defs); foreach my $view ($schema->get_views) { my ($view_def) = create_view( $view, { add_drop_view => $add_drop_table, quote_table_names => $qt, } ); push @view_defs, @$view_def; } if (wantarray) { return defined $create ? $create : (), @table_defs, @view_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs; } else { $create .= join(";\n\n", @table_defs, @view_defs, @fk_defs, @index_defs, @constraint_defs); $create .= ";\n\n"; # If wantarray is not set we have to add "/" in this statement # DBI->do() needs them omitted # triggers may NOT end with a semicolon but a "/" instead $create .= "$_/\n\n" for @trigger_defs; return $create; } } sub create_table { my ($table, $options) = @_; my $qt = $options->{quote_table_names}; my $qf = $options->{quote_field_names}; my $table_name = $table->name; my $table_name_q = quote($table_name, $qt); my $item = ''; my $drop; my (@create, @field_defs, @constraint_defs, @fk_defs, @trigger_defs); push @create, "--\n-- Table: $table_name\n--" unless $options->{no_comments}; push @create, qq[DROP TABLE $table_name_q CASCADE CONSTRAINTS] if $options->{add_drop_table}; my (%field_name_scope, @field_comments); for my $field ($table->get_fields) { debug("ORA: Creating field " . $field->name . "(" . $field->data_type . ")"); my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($field, $options, \%field_name_scope); push @create, @$field_create if ref $field_create; push @field_defs, @$field_defs if ref $field_defs; push @trigger_defs, @$trigger_defs if ref $trigger_defs; push @field_comments, @$field_comments if ref $field_comments; } # # Table options # my @table_options; for my $opt ($table->options) { if (ref $opt eq 'HASH') { my ($key, $value) = each %$opt; if (ref $value eq 'ARRAY') { push @table_options, "$key\n(\n" . join( "\n", map {" $_->[0]\t$_->[1]"} map { [ each %$_ ] } @$value ) . "\n)"; } elsif (!defined $value) { push @table_options, $key; } else { push @table_options, "$key $value"; } } } # # Table constraints # for my $c ($table->get_constraints) { my $constr = create_constraint($c, $options); if ($constr) { if ($c->type eq FOREIGN_KEY) { # FK defs always come later as alters push @fk_defs, sprintf("ALTER TABLE %s ADD %s", $table_name_q, $constr); } else { push @constraint_defs, $constr; } } } # # Index Declarations # my @index_defs = (); for my $index ($table->get_indices) { my $index_name = $index->name || ''; my $index_type = $index->type || NORMAL; my @fields = map { quote($_, $qf) } $index->fields; next unless @fields; debug("ORA: Creating $index_type index on fields (" . join(', ', @fields) . ") named $index_name"); my @index_options; for my $opt ($index->options) { if (ref $opt eq 'HASH') { my ($key, $value) = each %$opt; if (ref $value eq 'ARRAY') { push @table_options, "$key\n(\n" . join( "\n", map {" $_->[0]\t$_->[1]"} map { [ each %$_ ] } @$value ) . "\n)"; } elsif (!defined $value) { push @index_options, $key; } else { push @index_options, "$key $value"; } } } my $index_options = @index_options ? "\n" . join("\n", @index_options) : ''; if ($index_type eq PRIMARY_KEY) { $index_name = $index_name ? mk_name($index_name) : mk_name($table_name, 'pk'); $index_name = quote($index_name, $qf); push @field_defs, 'CONSTRAINT ' . $index_name . ' PRIMARY KEY ' . '(' . join(', ', @fields) . ')'; } elsif ($index_type eq NORMAL or $index_type eq UNIQUE) { push @index_defs, create_index($index, $options, $index_options); } else { warn "Unknown index type ($index_type) on table $table_name.\n" if $WARN; } } if (my @table_comments = $table->comments) { for my $comment (@table_comments) { next unless $comment; $comment = __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON TABLE $table_name_q is\n $comment" unless $options->{no_comments}; } } my $table_options = @table_options ? "\n" . join("\n", @table_options) : ''; push @create, "CREATE TABLE $table_name_q (\n" . join(",\n", map {" $_"} @field_defs, ($options->{delay_constraints} ? () : @constraint_defs)) . "\n)$table_options"; @constraint_defs = map {"ALTER TABLE $table_name_q ADD $_"} @constraint_defs; if ($WARN) { if (%truncated) { warn "Truncated " . keys(%truncated) . " names:\n"; warn "\t" . join("\n\t", sort keys %truncated) . "\n"; } } return \@create, \@fk_defs, \@trigger_defs, \@index_defs, ($options->{delay_constraints} ? \@constraint_defs : []); } sub alter_field { my ($from_field, $to_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($to_field, $options, {}); # Fix ORA-01442 if (!$from_field->is_nullable && $to_field->is_nullable) { if ($from_field->data_type =~ /text/) { die 'Cannot alter CLOB field in this way'; } else { @$field_defs = map { $_ .= ' NULL' } @$field_defs; } } elsif (!$from_field->is_nullable && !$to_field->is_nullable) { @$field_defs = map { s/ NOT NULL//; $_ } @$field_defs; } my $table_name = quote($to_field->table->name, $qt); return 'ALTER TABLE ' . $table_name . ' MODIFY ( ' . join('', @$field_defs) . ' )'; } sub drop_field { my ($old_field, $options) = @_; my $qi = $options->{quote_identifiers}; my $table_name = quote($old_field->table->name, $qi); my $out = sprintf('ALTER TABLE %s DROP COLUMN %s', $table_name, quote($old_field->name, $qi)); return $out; } sub add_field { my ($new_field, $options) = @_; my $qt = $options->{quote_table_names}; my ($field_create, $field_defs, $trigger_defs, $field_comments) = create_field($new_field, $options, {}); my $table_name = quote($new_field->table->name, $qt); my $out = sprintf('ALTER TABLE %s ADD ( %s )', $table_name, join('', @$field_defs)); return $out; } sub create_field { my ($field, $options, $field_name_scope) = @_; my $qf = $options->{quote_field_names}; my $qt = $options->{quote_table_names}; my (@create, @field_defs, @trigger_defs, @field_comments); my $table_name = $field->table->name; my $table_name_q = quote($table_name, $qt); # # Field name # my $field_name = mk_name($field->name, '', $field_name_scope, 1); my $field_name_q = quote($field_name, $qf); my $field_def = quote($field_name, $qf); $field->name($field_name); # # Datatype # my $check; my $data_type = lc $field->data_type; my @size = $field->size; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join(', ', map { __PACKAGE__->_quote_string($_) } @$list); if ($data_type eq 'enum') { $check = "CHECK ($field_name_q IN ($commalist))"; $data_type = 'varchar2'; } elsif ($data_type eq 'set') { # XXX add a CHECK constraint maybe # (trickier and slower, than enum :) $data_type = 'varchar2'; } else { if (defined $translate{$data_type}) { if (ref $translate{$data_type} eq "ARRAY") { ($data_type, $size[0]) = @{ $translate{$data_type} }; } else { $data_type = $translate{$data_type}; } } $data_type ||= 'varchar2'; } # ensure size is not bigger than max size oracle allows for data type if (defined $max_size{$data_type}) { for (my $i = 0; $i < scalar @size; $i++) { my $max = ref($max_size{$data_type}) eq 'ARRAY' ? $max_size{$data_type}->[$i] : $max_size{$data_type}; $size[$i] = $max if $size[$i] > $max; } } # # Fixes ORA-02329: column of datatype LOB cannot be # unique or a primary key # if ($data_type eq 'clob' && $field->is_primary_key) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a primary key, changing to VARCHAR2\n" if $WARN; } if ($data_type eq 'clob' && $field->is_unique) { $data_type = 'varchar2'; $size[0] = 4000; warn "CLOB cannot be a unique key, changing to VARCHAR2\n" if $WARN; } # # Fixes ORA-00907: missing right parenthesis # if ($data_type =~ /(date|clob)/i) { undef @size; } # # Fixes ORA-00906: missing right parenthesis # if size is 0 or undefined # for (qw/varchar2/) { if ($data_type =~ /^($_)$/i) { $size[0] ||= $max_size{$_}; } } $field_def .= " $data_type"; if (defined $size[0] && $size[0] > 0) { $field_def .= '(' . join(',', @size) . ')'; } # # Default value # my $default = $field->default_value; if (defined $default) { debug("ORA: Handling default value: $default"); # # Wherein we try to catch a string being used as # a default value for a numerical field. If "true/false," # then sub "1/0," otherwise just test the truthity of the # argument and use that (naive?). # if (ref $default and defined $$default) { $default = $$default; } elsif (ref $default) { $default = 'NULL'; } elsif ($data_type =~ /^number$/i && $default !~ /^-?\d+$/ && $default !~ m/null/i) { if ($default =~ /^true$/i) { $default = "'1'"; } elsif ($default =~ /^false$/i) { $default = "'0'"; } else { $default = $default ? "'1'" : "'0'"; } } elsif ( $data_type =~ /date/ && ( $default eq 'current_timestamp' || $default eq 'now()') ) { $default = 'SYSDATE'; } else { $default = $default =~ m/null/i ? 'NULL' : __PACKAGE__->_quote_string($default); } $field_def .= " DEFAULT $default",; } # # Not null constraint # unless ($field->is_nullable) { debug("ORA: Field is NOT NULL"); $field_def .= ' NOT NULL'; } $field_def .= " $check" if $check; # # Auto_increment # if ($field->is_auto_increment) { debug("ORA: Handling auto increment"); my $base_name = $table_name . "_" . $field_name; my $seq_name = quote(mk_name($base_name, 'sq'), $qt); my $trigger_name = quote(mk_name($base_name, 'ai'), $qt); push @create, qq[DROP SEQUENCE $seq_name] if $options->{add_drop_table}; push @create, "CREATE SEQUENCE $seq_name"; my $trigger = "CREATE OR REPLACE TRIGGER $trigger_name\n" . "BEFORE INSERT ON $table_name_q\n" . "FOR EACH ROW WHEN (\n" . " new.$field_name_q IS NULL" . " OR new.$field_name_q = 0\n" . ")\n" . "BEGIN\n" . " SELECT $seq_name.nextval\n" . " INTO :new." . $field_name_q . "\n" . " FROM dual;\n" . "END;\n"; push @trigger_defs, $trigger; } push @field_defs, $field_def; if (my $comment = $field->comments) { debug("ORA: Handling comment"); $comment =~ __PACKAGE__->_quote_string($comment); push @field_comments, "COMMENT ON COLUMN $table_name_q.$field_name_q is\n $comment;" unless $options->{no_comments}; } return \@create, \@field_defs, \@trigger_defs, \@field_comments; } sub drop_table { my ($table, $options) = @_; my $qi = $options->{quote_identifiers}; my @foreign_key_constraints = grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints; my @statements; for my $constraint (@foreign_key_constraints) { push @statements, alter_drop_constraint($constraint, $options); } return @statements, 'DROP TABLE ' . quote($table, $qi); } sub alter_create_index { my ($index, $options) = @_; return create_index($index, $options); } sub create_index { my ($index, $options, $index_options) = @_; $index_options = $index_options || ''; my $qf = $options->{quote_field_names} || $options->{quote_identifiers}; my $qt = $options->{quote_table_names} || $options->{quote_identifiers}; my $index_name = $index->name || ''; $index_name = $index_name ? mk_name($index_name) : mk_name($index->table, $index_name || 'i'); return join(' ', map { $_ || () } 'CREATE', lc $index->type eq 'normal' ? 'INDEX' : $index->type . ' INDEX', $index_name ? quote($index_name, $qf) : '', 'ON', quote($index->table, $qt), '(' . join(', ', map { quote($_, $qf) } $index->fields) . ")$index_options"); } sub alter_drop_index { my ($index, $options) = @_; return 'DROP INDEX ' . $index->name; } sub alter_drop_constraint { my ($c, $options) = @_; my $qi = $options->{quote_identifiers}; my $table_name = quote($c->table->name, $qi); my @out = ('ALTER', 'TABLE', $table_name, 'DROP',); if ($c->name) { push @out, ('CONSTRAINT', quote($c->name, $qi)); } elsif ($c->type eq PRIMARY_KEY) { push @out, 'PRIMARY KEY'; } return join(' ', @out); } sub alter_create_constraint { my ($c, $options) = @_; my $qi = $options->{quote_identifiers}; my $table_name = quote($c->table->name, $qi); return join(' ', 'ALTER TABLE', $table_name, 'ADD', create_constraint($c, $options)); } sub create_constraint { my ($c, $options) = @_; my $qt = $options->{quote_table_names}; my $qf = $options->{quote_field_names}; my $table = $c->table; my $table_name = $table->name; my $table_name_q = quote($table_name, $qt); my $name = $c->name || ''; my @fields = map { quote($_, $qf) } $c->fields; my @rfields = map { quote($_, $qf) } $c->reference_fields; return undef if !@fields && $c->type ne 'CHECK'; my $definition; if ($c->type eq PRIMARY_KEY) { debug("ORA: Creating PK constraint on fields (" . join(', ', @fields) . ")"); # create a name if delay_constraints $name ||= mk_name($table_name, 'pk') if $options->{delay_constraints}; $name = quote($name, $qf); $definition = ($name ? "CONSTRAINT $name " : '') . 'PRIMARY KEY (' . join(', ', @fields) . ')'; } elsif ($c->type eq UNIQUE) { # Don't create UNIQUE constraints identical to the primary key if (my $pk = $table->primary_key) { my $u_fields = join(":", @fields); my $pk_fields = join(":", $pk->fields); next if $u_fields eq $pk_fields; } if ($name) { # Force prepend of table_name as ORACLE doesn't allow duplicate # CONSTRAINT names even for different tables (ORA-02264) $name = mk_name("${table_name}_$name", 'u') unless $name =~ /^$table_name/; } else { $name = mk_name($table_name, 'u'); } debug("ORA: Creating UNIQUE constraint on fields (" . join(', ', @fields) . ") named $name"); $name = quote($name, $qf); for my $f ($c->fields) { my $field_def = $table->get_field($f) or next; my $dtype = $translate{ ref $field_def->data_type eq "ARRAY" ? $field_def->data_type->[0] : $field_def->data_type } or next; if ($WARN && $dtype =~ /clob/i) { warn "Oracle will not allow UNIQUE constraints on " . "CLOB field '" . $field_def->table->name . '.' . $field_def->name . ".'\n"; } } $definition = "CONSTRAINT $name UNIQUE " . '(' . join(', ', @fields) . ')'; } elsif ($c->type eq CHECK_C) { $name ||= mk_name($name || $table_name, 'ck'); $name = quote($name, $qf); my $expression = $c->expression || ''; debug("ORA: Creating CHECK constraint on fields (" . join(', ', @fields) . ") named $name"); $definition = "CONSTRAINT $name CHECK ($expression)"; } elsif ($c->type eq FOREIGN_KEY) { $name = mk_name(join('_', $table_name, $c->fields) . '_fk'); $name = quote($name, $qf); my $on_delete = uc($c->on_delete || ''); $definition = "CONSTRAINT $name FOREIGN KEY "; if (@fields) { $definition .= '(' . join(', ', @fields) . ')'; } my $ref_table = quote($c->reference_table, $qt); debug("ORA: Creating FK constraint on fields (" . join(', ', @fields) . ") named $name referencing $ref_table"); $definition .= " REFERENCES $ref_table"; if (@rfields) { $definition .= ' (' . join(', ', @rfields) . ')'; } if ($c->match_type) { $definition .= ' MATCH ' . ($c->match_type =~ /full/i) ? 'FULL' : 'PARTIAL'; } if ($on_delete && $on_delete ne "RESTRICT") { $definition .= ' ON DELETE ' . $c->on_delete; } } return $definition ? $definition : undef; } sub create_view { my ($view, $options) = @_; my $qt = $options->{quote_table_names}; my $view_name = quote($view->name, $qt); my $extra = $view->extra; my $view_type = 'VIEW'; my $view_options = ''; if (my $materialized = $extra->{materialized}) { $view_type = 'MATERIALIZED VIEW'; $view_options .= ' ' . $materialized; } my @create; push @create, qq[DROP $view_type $view_name] if $options->{add_drop_view}; push @create, sprintf("CREATE %s %s%s AS\n%s", $view_type, $view_name, $view_options, $view->sql); return \@create; } sub mk_name { my $basename = shift || ''; my $type = shift || ''; $type = '' if $type =~ /^\d/; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? $max_id_length - (length($type) + 1) : $max_id_length; $basename = substr($basename, 0, $max_name) if length($basename) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ($basename ne $basename_orig and $critical) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n" if $WARN; $truncated{$basename_orig} = $name; } $scope ||= \%global_names; if (my $prev = $scope->{$name}) { my $name_orig = $name; substr($name, $max_id_length - 2) = "" if length($name) >= $max_id_length - 1; $name .= sprintf("%02d", $prev++); warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{$name_orig}++; } $scope->{$name}++; return $name; } 1; sub quote { my ($name, $q) = @_; return $name unless $q && $name; $name =~ s/\Q$quote_char/$quote_char$quote_char/g; return "$quote_char$name$quote_char"; } # ------------------------------------------------------------------- # All bad art is the result of good intentions. # Oscar Wilde # ------------------------------------------------------------------- =pod =head1 CREDITS Mad props to Tim Bunce for much of the logic stolen from his "mysql2ora" script. =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Alexander Hartmaier Eabraxxa@cpan.orgE, Fabien Wernli Efaxmodem@cpan.orgE. =head1 SEE ALSO SQL::Translator, DDL::Oracle, mysql2ora. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/PostgreSQL.pm0000644000000000000000000010604114551163724023725 0ustar00rootroot00000000000000package SQL::Translator::Producer::PostgreSQL; =head1 NAME SQL::Translator::Producer::PostgreSQL - PostgreSQL producer for SQL::Translator =head1 SYNOPSIS my $t = SQL::Translator->new( parser => '...', producer => 'PostgreSQL' ); $t->translate; =head1 DESCRIPTION Creates a DDL suitable for PostgreSQL. Very heavily based on the Oracle producer. Now handles PostGIS Geometry and Geography data types on table definitions. Does not yet support PostGIS Views. =head2 Producer Args You can change the global behavior of the producer by passing the following options to the C attribute of C. =over 4 =item postgres_version The version of postgres to generate DDL for. Turns on features only available in later versions. The following features are supported =over 4 =item IF EXISTS If your postgres_version is higher than 8.003 (I should hope it is by now), then the DDL generated for dropping objects in the database will contain IF EXISTS. =back =item attach_comments Generates table and column comments via the COMMENT command rather than as a comment in the DDL. You could then look it up with \dt+ or \d+ (for tables and columns respectively) in psql. The comment is dollar quoted with $comment$ so you can include ' in it. Just to clarify: you get this CREATE TABLE foo ...; COMMENT on TABLE foo IS $comment$hi there$comment$; instead of this -- comment CREAT TABLE foo ...; =back =head2 Extra args Various schema types support various options via the C attribute. =over 2 =item Tables =over 2 =item temporary Produces a temporary table. =back =item Views =over 2 =item temporary Produces a temporary view. =item materialized Produces a materialized view. =back =item Fields =over 2 =item list, custom_type_name For enum types, list is the list of valid values, and custom_type_name is the name that the type should have. Defaults to $table_$field_type. =item geometry_type, srid, dimensions, geography_type Fields for use with PostGIS types. =back =back =cut use strict; use warnings; our ($DEBUG, $WARN); our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; use base qw(SQL::Translator::Producer); use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment parse_dbms_version batch_alter_table_statements normalize_quote_options); use SQL::Translator::Generator::DDL::PostgreSQL; use Data::Dumper; use constant MAX_ID_LENGTH => 62; { my ($quoting_generator, $nonquoting_generator); sub _generator { my $options = shift; return $options->{generator} if exists $options->{generator}; return normalize_quote_options($options) ? $quoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new : $nonquoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new(quote_chars => [],); } } my (%translate); BEGIN { %translate = ( # # MySQL types # double => 'double precision', decimal => 'numeric', int => 'integer', mediumint => 'integer', tinyint => 'smallint', char => 'character', varchar => 'character varying', longtext => 'text', mediumtext => 'text', tinytext => 'text', tinyblob => 'bytea', blob => 'bytea', mediumblob => 'bytea', longblob => 'bytea', enum => 'character varying', set => 'character varying', datetime => 'timestamp', year => 'date', # # Oracle types # number => 'integer', varchar2 => 'character varying', long => 'text', clob => 'text', # # Sybase types # comment => 'text', # # MS Access types # memo => 'text', ); } my %truncated; =pod =head1 PostgreSQL Create Table Syntax CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name ( { column_name data_type [ DEFAULT default_expr ] [ column_constraint [, ... ] ] | table_constraint } [, ... ] ) [ INHERITS ( parent_table [, ... ] ) ] [ WITH OIDS | WITHOUT OIDS ] where column_constraint is: [ CONSTRAINT constraint_name ] { NOT NULL | NULL | UNIQUE | PRIMARY KEY | CHECK (expression) | REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] and table_constraint is: [ CONSTRAINT constraint_name ] { UNIQUE ( column_name [, ... ] ) | PRIMARY KEY ( column_name [, ... ] ) | CHECK ( expression ) | EXCLUDE [USING acc_method] (expression) [INCLUDE (column [, ...])] [WHERE (predicate)] FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] =head1 Create Index Syntax CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( column [ ops_name ] [, ...] ) [ INCLUDE ( column [, ...] ) ] [ WHERE predicate ] CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] ) [ WHERE predicate ] =cut sub produce { my $translator = shift; local $DEBUG = $translator->debug; local $WARN = $translator->show_warnings; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $pargs = $translator->producer_args; my $postgres_version = parse_dbms_version($pargs->{postgres_version}, 'perl'); my $generator = _generator({ quote_identifiers => $translator->quote_identifiers }); my @output; push @output, header_comment unless ($no_comments); my (@table_defs, @fks); my %type_defs; for my $table ($schema->get_tables) { my ($table_def, $fks) = create_table( $table, { generator => $generator, no_comments => $no_comments, postgres_version => $postgres_version, add_drop_table => $add_drop_table, type_defs => \%type_defs, attach_comments => $pargs->{attach_comments} } ); push @table_defs, $table_def; push @fks, @$fks; } for my $view ($schema->get_views) { push @table_defs, create_view( $view, { postgres_version => $postgres_version, add_drop_view => $add_drop_table, generator => $generator, no_comments => $no_comments, } ); } for my $trigger ($schema->get_triggers) { push @table_defs, create_trigger( $trigger, { add_drop_trigger => $add_drop_table, generator => $generator, no_comments => $no_comments, } ); } push @output, map {"$_;\n\n"} values %type_defs; push @output, map {"$_;\n\n"} @table_defs; if (@fks) { push @output, "--\n-- Foreign Key Definitions\n--\n\n" unless $no_comments; push @output, map {"$_;\n\n"} @fks; } if ($WARN) { if (%truncated) { warn "Truncated " . keys(%truncated) . " names:\n"; warn "\t" . join("\n\t", sort keys %truncated) . "\n"; } } return wantarray ? @output : join('', @output); } { my %global_names; sub mk_name { my $basename = shift || ''; my $type = shift || ''; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? MAX_ID_LENGTH - (length($type) + 1) : MAX_ID_LENGTH; $basename = substr($basename, 0, $max_name) if length($basename) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ($basename ne $basename_orig and $critical) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to ", MAX_ID_LENGTH, " character limit to make '$name'\n" if $WARN; $truncated{$basename_orig} = $name; } $scope ||= \%global_names; if (my $prev = $scope->{$name}) { my $name_orig = $name; $name .= sprintf("%02d", ++$prev); substr($name, MAX_ID_LENGTH - 3) = "00" if length($name) > MAX_ID_LENGTH; warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{$name_orig}++; } $scope->{$name}++; return $name; } } sub is_geometry { my $field = shift; return 1 if $field->data_type eq 'geometry'; } sub is_geography { my $field = shift; return 1 if $field->data_type eq 'geography'; } sub create_table { my ($table, $options) = @_; my $generator = _generator($options); my $no_comments = $options->{no_comments} || 0; my $add_drop_table = $options->{add_drop_table} || 0; my $postgres_version = $options->{postgres_version} || 0; my $type_defs = $options->{type_defs} || {}; my $attach_comments = $options->{attach_comments}; my $table_name = $table->name or next; my $table_name_qt = $generator->quote($table_name); my (@comments, @field_defs, @index_defs, @constraint_defs, @fks); push @comments, "--\n-- Table: $table_name\n--\n" unless $no_comments; my @comment_statements; if (my $comments = $table->comments) { if ($attach_comments) { # this follows the example in the MySQL producer, where all comments are added as # table comments, even though they could have originally been parsed as DDL comments # quoted via $$ string so there can be 'quotes' inside the comments my $comment_ddl = "COMMENT on TABLE $table_name_qt IS \$comment\$$comments\$comment\$"; push @comment_statements, $comment_ddl; } elsif (!$no_comments) { $comments =~ s/^/-- /mg; push @comments, "-- Comments:\n$comments\n--\n"; } } # # Fields # for my $field ($table->get_fields) { push @field_defs, create_field( $field, { generator => $generator, postgres_version => $postgres_version, type_defs => $type_defs, constraint_defs => \@constraint_defs, attach_comments => $attach_comments } ); if ($attach_comments) { my $field_comments = $field->comments; next unless $field_comments; my $field_name_qt = $generator->quote($field->name); my $comment_ddl = "COMMENT on COLUMN $table_name_qt.$field_name_qt IS \$comment\$$field_comments\$comment\$"; push @comment_statements, $comment_ddl; } } # # Index Declarations # for my $index ($table->get_indices) { my ($idef, $constraints) = create_index( $index, { generator => $generator, postgres_version => $postgres_version, } ); $idef and push @index_defs, $idef; push @constraint_defs, @$constraints; } # # Table constraints # for my $c ($table->get_constraints) { my ($cdefs, $fks) = create_constraint( $c, { generator => $generator, } ); push @constraint_defs, @$cdefs; push @fks, @$fks; } my $create_statement = join("\n", @comments); if ($add_drop_table) { if ($postgres_version >= 8.002) { $create_statement .= "DROP TABLE IF EXISTS $table_name_qt CASCADE;\n"; } else { $create_statement .= "DROP TABLE $table_name_qt CASCADE;\n"; } } my $temporary = $table->extra->{temporary} ? "TEMPORARY " : ""; $create_statement .= "CREATE ${temporary}TABLE $table_name_qt (\n" . join(",\n", map {" $_"} @field_defs, @constraint_defs) . "\n)"; $create_statement .= @index_defs ? ';' : q{}; $create_statement .= ($create_statement =~ /;$/ ? "\n" : q{}) . join(";\n", @index_defs); # # Geometry # if (my @geometry_columns = grep { is_geometry($_) } $table->get_fields) { $create_statement .= join(";\n", '', map { drop_geometry_column($_, $options) } @geometry_columns) if $options->{add_drop_table}; $create_statement .= join(";\n", '', map { add_geometry_column($_, $options) } @geometry_columns); } if (@comment_statements) { $create_statement .= join(";\n", '', @comment_statements); } return $create_statement, \@fks; } sub create_view { my ($view, $options) = @_; my $generator = _generator($options); my $postgres_version = $options->{postgres_version} || 0; my $add_drop_view = $options->{add_drop_view}; my $view_name = $view->name; debug("PKG: Looking at view '${view_name}'\n"); my $create = ''; $create .= "--\n-- View: " . $generator->quote($view_name) . "\n--\n" unless $options->{no_comments}; if ($add_drop_view) { if ($postgres_version >= 8.002) { $create .= "DROP VIEW IF EXISTS " . $generator->quote($view_name) . ";\n"; } else { $create .= "DROP VIEW " . $generator->quote($view_name) . ";\n"; } } $create .= 'CREATE'; my $extra = $view->extra; $create .= " TEMPORARY" if exists($extra->{temporary}) && $extra->{temporary}; $create .= " MATERIALIZED" if exists($extra->{materialized}) && $extra->{materialized}; $create .= " VIEW " . $generator->quote($view_name); if (my @fields = $view->fields) { my $field_list = join ', ', map { $generator->quote($_) } @fields; $create .= " ( ${field_list} )"; } if (my $sql = $view->sql) { $create .= " AS\n ${sql}\n"; } if ($extra->{check_option}) { $create .= ' WITH ' . uc $extra->{check_option} . ' CHECK OPTION'; } return $create; } # Returns a enum custom type name and list of values iff the field looks like an enum. sub _enum_typename_and_values { my $field = shift; if (ref $field->extra->{list} eq 'ARRAY') { # can't do anything unless we know the list if ($field->extra->{custom_type_name}) { return ($field->extra->{custom_type_name}, $field->extra->{list}); } elsif ($field->data_type eq 'enum') { my $name = $field->table->name . '_' . $field->name . '_type'; return ($name, $field->extra->{list}); } } return (); } { my %field_name_scope; sub create_field { my ($field, $options) = @_; my $generator = _generator($options); my $table_name = $field->table->name; my $constraint_defs = $options->{constraint_defs} || []; my $postgres_version = $options->{postgres_version} || 0; my $type_defs = $options->{type_defs} || {}; my $attach_comments = $options->{attach_comments}; $field_name_scope{$table_name} ||= {}; my $field_name = $field->name; my $field_comments = ''; if (!$attach_comments and my $comments = $field->comments) { $comments =~ s/(?quote($field_name); # # Datatype # my $data_type = lc $field->data_type; my %extra = $field->extra; my ($enum_typename, $list) = _enum_typename_and_values($field); if ($postgres_version >= 8.003 && $enum_typename) { my $commalist = join(', ', map { __PACKAGE__->_quote_string($_) } @$list); $field_def .= ' ' . $enum_typename; my $new_type_def = "DROP TYPE IF EXISTS $enum_typename CASCADE;\n" . "CREATE TYPE $enum_typename AS ENUM ($commalist)"; if (!exists $type_defs->{$enum_typename}) { $type_defs->{$enum_typename} = $new_type_def; } elsif ($type_defs->{$enum_typename} ne $new_type_def) { die "Attempted to redefine type name '$enum_typename' as a different type.\n"; } } else { $field_def .= ' ' . convert_datatype($field); } # # Default value # __PACKAGE__->_apply_default_value( $field, \$field_def, [ 'NULL' => \'NULL', 'now()' => 'now()', 'CURRENT_TIMESTAMP' => 'CURRENT_TIMESTAMP', ], ); # # Not null constraint # $field_def .= ' NOT NULL' unless $field->is_nullable; # # Geometry constraints # if (is_geometry($field)) { foreach (create_geometry_constraints($field, $options)) { my ($cdefs, $fks) = create_constraint($_, $options); push @$constraint_defs, @$cdefs; push @$fks, @$fks; } } return $field_def; } } sub create_geometry_constraints { my ($field, $options) = @_; my $fname = _generator($options)->quote($field); my @constraints; push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_dims_" . $field->name, expression => "(ST_NDims($fname) = " . $field->extra->{dimensions} . ")", table => $field->table, type => CHECK_C, ); push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_srid_" . $field->name, expression => "(ST_SRID($fname) = " . $field->extra->{srid} . ")", table => $field->table, type => CHECK_C, ); push @constraints, SQL::Translator::Schema::Constraint->new( name => "enforce_geotype_" . $field->name, expression => "(GeometryType($fname) = " . __PACKAGE__->_quote_string($field->extra->{geometry_type}) . "::text OR $fname IS NULL)", table => $field->table, type => CHECK_C, ); return @constraints; } sub _extract_extras_from_options { my ($options_haver, $dispatcher) = @_; for my $opt ($options_haver->options) { if (ref $opt eq 'HASH') { for my $key (keys %$opt) { my $val = $opt->{$key}; next unless defined $val; $dispatcher->{ lc $key }->($val); } } } } { my %index_name; sub create_index { my ($index, $options) = @_; my $generator = _generator($options); my $table_name = $index->table->name; my $postgres_version = $options->{postgres_version} || 0; my ($index_def, @constraint_defs); my $name = $index->name || join('_', $table_name, 'idx', ++$index_name{$table_name}); my $type = $index->type || NORMAL; my @fields = $index->fields; return unless @fields; my %index_extras; _extract_extras_from_options( $index, { using => sub { $index_extras{using} = "USING $_[0]" }, where => sub { $index_extras{where} = "WHERE $_[0]" }, include => sub { my ($value) = @_; return unless $postgres_version >= 11; die 'Include list must be an arrayref' unless ref $value eq 'ARRAY'; my $value_list = join ', ', @$value; $index_extras{include} = "INCLUDE ($value_list)"; } } ); my $def_start = 'CONSTRAINT ' . $generator->quote($name) . ' '; my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ($generator->quote($_)) } @fields)) . ')'; if ($type eq PRIMARY_KEY) { push @constraint_defs, "${def_start}PRIMARY KEY " . $field_names; } elsif ($type eq UNIQUE) { push @constraint_defs, "${def_start}UNIQUE " . $field_names; } elsif ($type eq NORMAL) { $index_def = 'CREATE INDEX ' . $generator->quote($name) . ' on ' . $generator->quote($table_name) . ' ' . join ' ', grep {defined} $index_extras{using}, $field_names, @index_extras{ 'include', 'where' }; } else { warn "Unknown index type ($type) on table $table_name.\n" if $WARN; } return $index_def, \@constraint_defs; } } sub create_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $postgres_version = $options->{postgres_version} || 0; my $table_name = $c->table->name; my (@constraint_defs, @fks); my %constraint_extras; _extract_extras_from_options( $c, { using => sub { $constraint_extras{using} = "USING $_[0]" }, where => sub { $constraint_extras{where} = "WHERE ( $_[0] )" }, include => sub { my ($value) = @_; return unless $postgres_version >= 11; die 'Include list must be an arrayref' unless ref $value eq 'ARRAY'; my $value_list = join ', ', @$value; $constraint_extras{include} = "INCLUDE ( $value_list )"; }, } ); my $name = $c->name || ''; my @fields = grep {defined} $c->fields; my @rfields = grep {defined} $c->reference_fields; return if !@fields && ($c->type ne CHECK_C && $c->type ne EXCLUDE); my $def_start = $name ? 'CONSTRAINT ' . $generator->quote($name) : ''; my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ($generator->quote($_)) } @fields)) . ')'; my $include = $constraint_extras{include} || ''; if ($c->type eq PRIMARY_KEY) { push @constraint_defs, join ' ', grep $_, $def_start, "PRIMARY KEY", $field_names, $include; } elsif ($c->type eq UNIQUE) { push @constraint_defs, join ' ', grep $_, $def_start, "UNIQUE", $field_names, $include; } elsif ($c->type eq CHECK_C) { my $expression = $c->expression; push @constraint_defs, join ' ', grep $_, $def_start, "CHECK ($expression)"; } elsif ($c->type eq FOREIGN_KEY) { my $def .= join ' ', grep $_, "ALTER TABLE", $generator->quote($table_name), 'ADD', $def_start, "FOREIGN KEY $field_names"; $def .= "\n REFERENCES " . $generator->quote($c->reference_table); if (@rfields) { $def .= ' (' . join(', ', map { $generator->quote($_) } @rfields) . ')'; } if ($c->match_type) { $def .= ' MATCH ' . ($c->match_type =~ /full/i) ? 'FULL' : 'PARTIAL'; } if ($c->on_delete) { $def .= ' ON DELETE ' . $c->on_delete; } if ($c->on_update) { $def .= ' ON UPDATE ' . $c->on_update; } if ($c->deferrable) { $def .= ' DEFERRABLE'; } push @fks, "$def"; } elsif ($c->type eq EXCLUDE) { my $using = $constraint_extras{using} || ''; my $expression = $c->expression; my $where = $constraint_extras{where} || ''; push @constraint_defs, join ' ', grep $_, $def_start, 'EXCLUDE', $using, "( $expression )", $include, $where; } return \@constraint_defs, \@fks; } sub create_trigger { my ($trigger, $options) = @_; my $generator = _generator($options); my @statements; push @statements, sprintf('DROP TRIGGER IF EXISTS %s', $generator->quote($trigger->name)) if $options->{add_drop_trigger}; my $scope = $trigger->scope || ''; $scope = " FOR EACH $scope" if $scope; push @statements, sprintf( 'CREATE TRIGGER %s %s %s ON %s%s %s', $generator->quote($trigger->name), $trigger->perform_action_when, join(' OR ', @{ $trigger->database_events }), $generator->quote($trigger->on_table), $scope, $trigger->action, ); return @statements; } sub convert_datatype { my ($field) = @_; my @size = $field->size; my $data_type = lc $field->data_type; my $array = $data_type =~ s/\[\]$//; if ($data_type eq 'enum') { # my $len = 0; # $len = ($len < length($_)) ? length($_) : $len for (@$list); # my $chk_name = mk_name( $table_name.'_'.$field_name, 'chk' ); # push @$constraint_defs, # 'CONSTRAINT "$chk_name" CHECK (' . $generator->quote(field_name) . # qq[IN ($commalist))]; $data_type = 'character varying'; } elsif ($data_type eq 'set') { $data_type = 'character varying'; } elsif ($field->is_auto_increment) { if ((defined $size[0] && $size[0] > 11) or $data_type eq 'bigint') { $data_type = 'bigserial'; } else { $data_type = 'serial'; } undef @size; } else { $data_type = defined $translate{ lc $data_type } ? $translate{ lc $data_type } : $data_type; } if ($data_type =~ /^time/i || $data_type =~ /^interval/i) { if (defined $size[0] && $size[0] > 6) { $size[0] = 6; } } if ($data_type eq 'integer') { if (defined $size[0] && $size[0] > 0) { if ($size[0] > 10) { $data_type = 'bigint'; } elsif ($size[0] < 5) { $data_type = 'smallint'; } else { $data_type = 'integer'; } } else { $data_type = 'integer'; } } my $type_with_size = join('|', 'bit', 'varbit', 'character', 'bit varying', 'character varying', 'time', 'timestamp', 'interval', 'numeric', 'float'); if ($data_type !~ /$type_with_size/) { @size = (); } if (defined $size[0] && $size[0] > 0 && $data_type =~ /^time/i) { $data_type =~ s/^(time.*?)( with.*)?$/$1($size[0])/; $data_type .= $2 if (defined $2); } elsif (defined $size[0] && $size[0] > 0) { $data_type .= '(' . join(',', @size) . ')'; } if ($array) { $data_type .= '[]'; } # # Geography # if ($data_type eq 'geography') { $data_type .= '(' . $field->extra->{geography_type} . ',' . $field->extra->{srid} . ')'; } return $data_type; } sub alter_field { my ($from_field, $to_field, $options) = @_; die "Can't alter field in another table" if ($from_field->table->name ne $to_field->table->name); my $generator = _generator($options); my @out; # drop geometry column and constraints push @out, drop_geometry_column($from_field, $options), drop_geometry_constraints($from_field, $options), if is_geometry($from_field); # it's necessary to start with rename column cause this would affect # all of the following statements which would be broken if do the # rename later # BUT: drop geometry is done before the rename, cause it work's on the # $from_field directly push @out, sprintf('ALTER TABLE %s RENAME COLUMN %s TO %s', map($generator->quote($_), $to_field->table->name, $from_field->name, $to_field->name,),) if ($from_field->name ne $to_field->name); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s SET NOT NULL', map($generator->quote($_), $to_field->table->name, $to_field->name),) if (!$to_field->is_nullable and $from_field->is_nullable); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s DROP NOT NULL', map($generator->quote($_), $to_field->table->name, $to_field->name),) if (!$from_field->is_nullable and $to_field->is_nullable); my $from_dt = convert_datatype($from_field); my $to_dt = convert_datatype($to_field); push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s TYPE %s', map($generator->quote($_), $to_field->table->name, $to_field->name), $to_dt,) if ($to_dt ne $from_dt); my ($from_enum_typename, $from_list) = _enum_typename_and_values($from_field); my ($to_enum_typename, $to_list) = _enum_typename_and_values($to_field); if ( $from_enum_typename && $to_enum_typename && $from_enum_typename eq $to_enum_typename) { # See if new enum values were added, and update the enum my %existing_vals = map +($_ => 1), @$from_list; my %desired_vals = map +($_ => 1), @$to_list; my @add_vals = grep !$existing_vals{$_}, keys %desired_vals; my @del_vals = grep !$desired_vals{$_}, keys %existing_vals; my $pg_ver_ok = ($options->{postgres_version} || 0) >= 9.001; push @out, '-- Set $sqlt->producer_args->{postgres_version} >= 9.001 to alter enums' if !$pg_ver_ok && @add_vals; for (@add_vals) { push @out, sprintf '%sALTER TYPE %s ADD VALUE IF NOT EXISTS %s', ($pg_ver_ok ? '' : '-- '), $to_enum_typename, $generator->quote_string($_); } push @out, "-- Unimplemented: delete values from enum type '$to_enum_typename': " . join(", ", @del_vals) if @del_vals; } my $old_default = $from_field->default_value; my $new_default = $to_field->default_value; my $default_value = $to_field->default_value; # fixes bug where output like this was created: # ALTER TABLE users ALTER COLUMN column SET DEFAULT ThisIsUnescaped; if (ref $default_value eq "SCALAR") { $default_value = $$default_value; } elsif (defined $default_value && $to_dt =~ /^(character|text|timestamp|date)/xsmi) { $default_value = __PACKAGE__->_quote_string($default_value); } push @out, sprintf( 'ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s', map($generator->quote($_), $to_field->table->name, $to_field->name,), $default_value, ) if (defined $new_default && (!defined $old_default || $old_default ne $new_default)); # fixes bug where removing the DEFAULT statement of a column # would result in no change push @out, sprintf('ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT', map($generator->quote($_), $to_field->table->name, $to_field->name,),) if (!defined $new_default && defined $old_default); # add geometry column and constraints push @out, add_geometry_column($to_field, $options), add_geometry_constraints($to_field, $options), if is_geometry($to_field); return wantarray ? @out : join(";\n", @out); } sub rename_field { alter_field(@_) } sub add_field { my ($new_field, $options) = @_; my $out = sprintf( 'ALTER TABLE %s ADD COLUMN %s', _generator($options)->quote($new_field->table->name), create_field($new_field, $options) ); $out .= ";\n" . add_geometry_column($new_field, $options) . ";\n" . add_geometry_constraints($new_field, $options) if is_geometry($new_field); return $out; } sub drop_field { my ($old_field, $options) = @_; my $generator = _generator($options); my $out = sprintf( 'ALTER TABLE %s DROP COLUMN %s', $generator->quote($old_field->table->name), $generator->quote($old_field->name) ); $out .= ";\n" . drop_geometry_column($old_field, $options) if is_geometry($old_field); return $out; } sub add_geometry_column { my ($field, $options) = @_; return sprintf( "INSERT INTO geometry_columns VALUES (%s,%s,%s,%s,%s,%s,%s)", map(__PACKAGE__->_quote_string($_), '', $field->table->schema->name, $options->{table} ? $options->{table} : $field->table->name, $field->name, $field->extra->{dimensions}, $field->extra->{srid}, $field->extra->{geometry_type}, ), ); } sub drop_geometry_column { my ($field) = @_; return sprintf("DELETE FROM geometry_columns WHERE f_table_schema = %s AND f_table_name = %s AND f_geometry_column = %s", map(__PACKAGE__->_quote_string($_), $field->table->schema->name, $field->table->name, $field->name,),); } sub add_geometry_constraints { my ($field, $options) = @_; return join(";\n", map { alter_create_constraint($_, $options) } create_geometry_constraints($field, $options)); } sub drop_geometry_constraints { my ($field, $options) = @_; return join(";\n", map { alter_drop_constraint($_, $options) } create_geometry_constraints($field, $options)); } sub alter_table { my ($to_table, $options) = @_; my $generator = _generator($options); my $out = sprintf('ALTER TABLE %s %s', $generator->quote($to_table->name), $options->{alter_table_action}); $out .= ";\n" . $options->{geometry_changes} if $options->{geometry_changes}; return $out; } sub rename_table { my ($old_table, $new_table, $options) = @_; my $generator = _generator($options); $options->{alter_table_action} = "RENAME TO " . $generator->quote($new_table); my @geometry_changes = map { drop_geometry_column($_, $options), add_geometry_column($_, { %{$options}, table => $new_table }), } grep { is_geometry($_) } $old_table->get_fields; $options->{geometry_changes} = join(";\n", @geometry_changes) if @geometry_changes; return alter_table($old_table, $options); } sub alter_create_index { my ($index, $options) = @_; my $generator = _generator($options); my ($idef, $constraints) = create_index($index, $options); return $index->type eq NORMAL ? $idef : sprintf('ALTER TABLE %s ADD %s', $generator->quote($index->table->name), join(q{}, @$constraints)); } sub alter_drop_index { my ($index, $options) = @_; return 'DROP INDEX ' . _generator($options)->quote($index->name); } sub alter_drop_constraint { my ($c, $options) = @_; my $generator = _generator($options); # NOT NULL constraint does not require a DROP CONSTRAINT statement if ($c->type eq NOT_NULL) { return; } # attention: Postgres has a very special naming structure for naming # foreign keys and primary keys. It names them using the name of the # table as prefix and fkey or pkey as suffix, concatenated by an underscore my $c_name; if ($c->name) { # Already has a name, just use it $c_name = $c->name; } else { # if the name is dotted we need the table, not schema nor database my ($tablename) = reverse split /[.]/, $c->table->name; if ($c->type eq FOREIGN_KEY) { # Doesn't have a name, and is foreign key, append '_fkey' $c_name = $tablename . '_' . ($c->fields)[0] . '_fkey'; } elsif ($c->type eq PRIMARY_KEY) { # Doesn't have a name, and is primary key, append '_pkey' $c_name = $tablename . '_pkey'; } } return sprintf('ALTER TABLE %s DROP CONSTRAINT %s', map { $generator->quote($_) } $c->table->name, $c_name,); } sub alter_create_constraint { my ($index, $options) = @_; my $generator = _generator($options); my ($defs, $fks) = create_constraint(@_); # return if there are no constraint definitions so we don't run # into output like this: # ALTER TABLE users ADD ; return unless (@{$defs} || @{$fks}); return $index->type eq FOREIGN_KEY ? join(q{}, @{$fks}) : join(' ', 'ALTER TABLE', $generator->quote($index->table->name), 'ADD', join(q{}, @{$defs}, @{$fks})); } sub drop_table { my ($table, $options) = @_; my $generator = _generator($options); my $out = "DROP TABLE " . $generator->quote($table) . " CASCADE"; my @geometry_drops = map { drop_geometry_column($_); } grep { is_geometry($_) } $table->get_fields; $out .= join(";\n", '', @geometry_drops) if @geometry_drops; return $out; } sub batch_alter_table { my ($table, $diff_hash, $options) = @_; # as long as we're not renaming the table we don't need to be here if (@{ $diff_hash->{rename_table} } == 0) { return batch_alter_table_statements($diff_hash, $options); } # first we need to perform drops which are on old table my @sql = batch_alter_table_statements( $diff_hash, $options, qw( alter_drop_constraint alter_drop_index drop_field ) ); # next comes the rename_table my $old_table = $diff_hash->{rename_table}[0][0]; push @sql, rename_table($old_table, $table, $options); # for alter_field (and so also rename_field) we need to make sure old # field has table name set to new table otherwise calling alter_field dies $diff_hash->{alter_field} = [ map { $_->[0]->table($table) && $_ } @{ $diff_hash->{alter_field} } ]; $diff_hash->{rename_field} = [ map { $_->[0]->table($table) && $_ } @{ $diff_hash->{rename_field} } ]; # now add everything else push @sql, batch_alter_table_statements( $diff_hash, $options, qw( add_field alter_field rename_field alter_create_index alter_create_constraint alter_table ) ); return @sql; } 1; # ------------------------------------------------------------------- # Life is full of misery, loneliness, and suffering -- # and it's all over much too soon. # Woody Allen # ------------------------------------------------------------------- =pod =head1 SEE ALSO SQL::Translator, SQL::Translator::Producer::Oracle. =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/YAML.pm0000644000000000000000000001312414551163724022463 0ustar00rootroot00000000000000package SQL::Translator::Producer::YAML; =head1 NAME SQL::Translator::Producer::YAML - A YAML producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new(producer => 'YAML'); =head1 DESCRIPTION This module uses YAML to serialize a schema to a string so that it can be saved to disk. Serializing a schema and then calling producers on the stored can realize significant performance gains when parsing takes a long time. =cut use strict; use warnings; our $VERSION = '1.65'; use YAML qw(Dump); sub produce { my $translator = shift; my $schema = $translator->schema; return Dump({ schema => { tables => { map { ($_->name => view_table($_)) } $schema->get_tables, }, views => { map { ($_->name => view_view($_)) } $schema->get_views, }, triggers => { map { ($_->name => view_trigger($_)) } $schema->get_triggers, }, procedures => { map { ($_->name => view_procedure($_)) } $schema->get_procedures, }, }, translator => { add_drop_table => $translator->add_drop_table, filename => $translator->filename, no_comments => $translator->no_comments, parser_args => $translator->parser_args, producer_args => $translator->producer_args, parser_type => $translator->parser_type, producer_type => $translator->producer_type, show_warnings => $translator->show_warnings, trace => $translator->trace, version => $translator->version, }, keys %{ $schema->extra } ? ('extra' => { $schema->extra }) : (), }); } sub view_table { my $table = shift; return { 'name' => $table->name, 'order' => $table->order, 'options' => $table->options || [], $table->comments ? ('comments' => [ $table->comments ]) : (), 'constraints' => [ map { view_constraint($_) } $table->get_constraints ], 'indices' => [ map { view_index($_) } $table->get_indices ], 'fields' => { map { ($_->name => view_field($_)) } $table->get_fields }, keys %{ $table->extra } ? ('extra' => { $table->extra }) : (), }; } sub view_constraint { my $constraint = shift; return { 'deferrable' => scalar $constraint->deferrable, 'expression' => scalar $constraint->expression, 'fields' => [ map { ref $_ ? $_->name : $_ } $constraint->field_names ], 'match_type' => scalar $constraint->match_type, 'name' => scalar $constraint->name, 'options' => scalar $constraint->options, 'on_delete' => scalar $constraint->on_delete, 'on_update' => scalar $constraint->on_update, 'reference_fields' => [ map { ref $_ ? $_->name : $_ } $constraint->reference_fields ], 'reference_table' => scalar $constraint->reference_table, 'type' => scalar $constraint->type, keys %{ $constraint->extra } ? ('extra' => { $constraint->extra }) : (), }; } sub view_field { my $field = shift; return { 'order' => scalar $field->order, 'name' => scalar $field->name, 'data_type' => scalar $field->data_type, 'size' => [ $field->size ], 'default_value' => scalar $field->default_value, 'is_nullable' => scalar $field->is_nullable, 'is_primary_key' => scalar $field->is_primary_key, 'is_unique' => scalar $field->is_unique, $field->is_auto_increment ? ('is_auto_increment' => 1) : (), $field->comments ? ('comments' => [ $field->comments ]) : (), keys %{ $field->extra } ? ('extra' => { $field->extra }) : (), }; } sub view_procedure { my $procedure = shift; return { 'order' => scalar $procedure->order, 'name' => scalar $procedure->name, 'sql' => scalar $procedure->sql, 'parameters' => scalar $procedure->parameters, 'owner' => scalar $procedure->owner, $procedure->comments ? ('comments' => [ $procedure->comments ]) : (), keys %{ $procedure->extra } ? ('extra' => { $procedure->extra }) : (), }; } sub view_trigger { my $trigger = shift; return { 'order' => scalar $trigger->order, 'name' => scalar $trigger->name, 'perform_action_when' => scalar $trigger->perform_action_when, 'database_events' => scalar $trigger->database_events, 'fields' => scalar $trigger->fields, 'on_table' => scalar $trigger->on_table, 'action' => scalar $trigger->action, ( defined $trigger->scope ? ('scope' => scalar $trigger->scope,) : () ), keys %{ $trigger->extra } ? ('extra' => { $trigger->extra }) : (), }; } sub view_view { my $view = shift; return { 'order' => scalar $view->order, 'name' => scalar $view->name, 'sql' => scalar $view->sql, 'fields' => scalar $view->fields, keys %{ $view->extra } ? ('extra' => { $view->extra }) : (), }; } sub view_index { my $index = shift; return { 'name' => scalar $index->name, 'type' => scalar $index->type, # If the index has extra properties, make sure these are written too 'fields' => [ map { ref($_) && $_->extra && keys %{ $_->extra } ? { name => $_->name, %{ $_->extra } } : "$_" } $index->fields ], 'options' => scalar $index->options, keys %{ $index->extra } ? ('extra' => { $index->extra }) : (), }; } 1; =head1 SEE ALSO SQL::Translator, YAML, http://www.yaml.org/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/GraphViz.pm0000644000000000000000000004164414551163724023463 0ustar00rootroot00000000000000package SQL::Translator::Producer::GraphViz; =pod =head1 NAME SQL::Translator::Producer::GraphViz - GraphViz producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $trans = SQL::Translator->new( from => 'MySQL', # or your db of choice to => 'GraphViz', producer_args => { out_file => 'schema.png', bgcolor => 'lightgoldenrodyellow', show_constraints => 1, show_datatypes => 1, show_sizes => 1 } ) or die SQL::Translator->error; $trans->translate or die $trans->error; =head1 DESCRIPTION Creates a graph of a schema using the amazing graphviz (see http://www.graphviz.org/) application (via the L module). It's nifty--you should try it! =head1 PRODUCER ARGS All L constructor attributes are accepted and passed through to L. The following defaults are assumed for some attributes: layout => 'dot', overlap => 'false', node => { shape => 'record', style => 'filled', fillcolor => 'white', }, # in inches width => 8.5, height => 11, See the documentation of L for more info on these and other attributes. In addition this producer accepts the following arguments: =over 4 =item * skip_tables An arrayref or a comma-separated list of table names that should be skipped. Note that a skipped table node may still appear if another table has foreign key constraints pointing to the skipped table. If this happens no table field/index information will be included. =item * skip_tables_like An arrayref or a comma-separated list of regular expressions matching table names that should be skipped. =item * cluster Clustering of tables allows you to group and box tables according to function or domain or whatever criteria you choose. The syntax for clustering tables is: cluster => 'cluster1=table1,table2;cluster2=table3,table4' Or pass it as an arrayref like so: cluster => [ 'cluster1=table1,table2', 'cluster2=table3,table4' ] Or like so: cluster => [ { name => 'cluster1', tables => [ 'table1', 'table2' ] }, { name => 'cluster2', tables => [ 'table3', 'table4' ] }, ] =item * out_file The name of the file where the resulting GraphViz output will be written. Alternatively an open filehandle can be supplied. If undefined (the default) - the result is returned as a string. =item * output_type (DEFAULT: 'png') This determines which L will be invoked to generate the graph: C translates to C, C to C and so on. =item * fontname This sets the global font name (or full path to font file) for node, edge, and graph labels =item * fontsize This sets the global font size for node and edge labels (note that arbitrarily large sizes may be ignored due to page size or graph size constraints) =item * show_fields (DEFAULT: true) If set to a true value, the names of the columns in a table will be displayed in each table's node =item * show_fk_only If set to a true value, only columns which are foreign keys will be displayed in each table's node =item * show_datatypes If set to a true value, the datatype of each column will be displayed next to each column's name; this option will have no effect if the value of C is set to false =item * friendly_ints If set to a true value, each integer type field will be displayed as a tinyint, smallint, integer or bigint depending on the field's associated size parameter. This only applies for the C type (and not the C type, which is always assumed to be a 32-bit integer); this option will have no effect if the value of C is set to false =item * friendly_ints_extended If set to a true value, the friendly ints displayed will take into account the non-standard types, 'tinyint' and 'mediumint' (which, as far as I am aware, is only implemented in MySQL) =item * show_sizes If set to a true value, the size (in bytes) of each CHAR and VARCHAR column will be displayed in parentheses next to the column's name; this option will have no effect if the value of C is set to false =item * show_constraints If set to a true value, a field's constraints (i.e., its primary-key-ness, its foreign-key-ness and/or its uniqueness) will appear as a comma-separated list in brackets next to the field's name; this option will have no effect if the value of C is set to false =item * show_indexes If set to a true value, each record will also show the indexes set on each table. It describes the index types along with which columns are included in the index. =item * show_index_names (DEFAULT: true) If C is set to a true value, then the value of this parameter determines whether or not to print names of indexes. if C is false, then a list of indexed columns will appear below the field list. Otherwise, it will be a list prefixed with the name of each index. =item * natural_join If set to a true value, L will be called before generating the graph. =item * join_pk_only The value of this option will be passed as the value of the like-named argument to L; implies C<< natural_join => 1 >> =item * skip_fields The value of this option will be passed as the value of the like-named argument to L; implies C<< natural_join => 1 >> =back =head2 DEPRECATED ARGS =over 4 =item * node_shape Deprecated, use node => { shape => ... } instead =item * add_color Deprecated, use bgcolor => 'lightgoldenrodyellow' instead If set to a true value, the graphic will have a background color of 'lightgoldenrodyellow'; otherwise the default white background will be used =item * nodeattrs Deprecated, use node => { ... } instead =item * edgeattrs Deprecated, use edge => { ... } instead =item * graphattrs Deprecated, use graph => { ... } instead =back =cut use warnings; use strict; use GraphViz; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug); use Scalar::Util qw/openhandle/; our $DEBUG; our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; sub produce { my $t = shift; my $schema = $t->schema; my $args = $t->producer_args; local $DEBUG = $t->debug; # translate legacy {node|edge|graph}attrs to just {node|edge|graph} for my $argtype (qw/node edge graph/) { my $old_arg = $argtype . 'attrs'; my %arglist = (map { %{ $_ || {} } } (delete $args->{$old_arg}, delete $args->{$argtype})); $args->{$argtype} = \%arglist if keys %arglist; } # explode font settings for (qw/fontsize fontname/) { if (defined $args->{$_}) { $args->{node}{$_} ||= $args->{$_}; $args->{edge}{$_} ||= $args->{$_}; $args->{graph}{$_} ||= $args->{$_}; } } # legacy add_color setting, trumped by bgcolor if set $args->{bgcolor} ||= 'lightgoldenrodyellow' if $args->{add_color}; # legacy node_shape setting, defaults to 'record', trumped by {node}{shape} $args->{node}{shape} ||= ($args->{node_shape} || 'record'); # maintain defaults $args->{layout} ||= 'dot'; $args->{output_type} ||= 'png'; $args->{overlap} ||= 'false'; $args->{node}{style} ||= 'filled'; $args->{node}{fillcolor} ||= 'white'; $args->{show_fields} = 1 if not exists $args->{show_fields}; $args->{show_index_names} = 1 if not exists $args->{show_index_names}; $args->{width} = 8.5 if not defined $args->{width}; $args->{height} = 11 if not defined $args->{height}; for ($args->{height}, $args->{width}) { $_ = 0 unless $_ =~ /^\d+(?:.\d+)?$/; $_ = 0 if $_ < 0; } # so split won't warn $args->{$_} ||= '' for qw/skip_fields skip_tables skip_tables_like cluster/; my %skip_fields = map { s/^\s+|\s+$//g; length $_ ? ($_, 1) : () } split(/,/, $args->{skip_fields}); my %skip_tables = map { $_, 1 } ( ref $args->{skip_tables} eq 'ARRAY' ? @{ $args->{skip_tables} } : split(/\s*,\s*/, $args->{skip_tables}) ); my @skip_tables_like = map {qr/$_/} ( ref $args->{skip_tables_like} eq 'ARRAY' ? @{ $args->{skip_tables_like} } : split(/\s*,\s*/, $args->{skip_tables_like}) ); # join_pk_only/skip_fields implies natural_join $args->{natural_join} = 1 if ($args->{join_pk_only} or scalar keys %skip_fields); # usually we do not want direction when using natural join $args->{directed} = ($args->{natural_join} ? 0 : 1) if not exists $args->{directed}; $schema->make_natural_joins( join_pk_only => $args->{join_pk_only}, skip_fields => $args->{skip_fields}, ) if $args->{natural_join}; my %cluster; if (defined $args->{'cluster'}) { my @clusters; if (ref $args->{'cluster'} eq 'ARRAY') { @clusters = @{ $args->{'cluster'} }; } else { @clusters = split /\s*;\s*/, $args->{'cluster'}; } for my $c (@clusters) { my ($cluster_name, @cluster_tables); if (ref $c eq 'HASH') { $cluster_name = $c->{'name'} || $c->{'cluster_name'}; @cluster_tables = @{ $c->{'tables'} || [] }; } else { my ($name, $tables) = split /\s*=\s*/, $c; $cluster_name = $name; @cluster_tables = split /\s*,\s*/, $tables; } for my $table (@cluster_tables) { $cluster{$table} = $cluster_name; } } } # # Create a blank GraphViz object and see if we can produce the output type. # my $gv = GraphViz->new(%$args) or die sprintf("Can't create GraphViz object: %s\n", $@ || 'reason unknown'); my $output_method = "as_$args->{output_type}"; # the generators are AUTOLOADed so can't use ->can ($output_method) eval { $gv->$output_method }; die "Invalid output type: '$args->{output_type}'" if $@; # # Process tables definitions, create nodes # my %nj_registry; # for locations of fields for natural joins my @fk_registry; # for locations of fields for foreign keys TABLE: for my $table ($schema->get_tables) { my $table_name = $table->name; if (@skip_tables_like or keys %skip_tables) { next TABLE if $skip_tables{$table_name}; for my $regex (@skip_tables_like) { next TABLE if $table_name =~ $regex; } } my @fields = $table->get_fields; if ($args->{show_fk_only}) { @fields = grep { $_->is_foreign_key } @fields; } my $field_str = ''; if ($args->{show_fields}) { my @fmt_fields; for my $field (@fields) { my $field_info; if ($args->{show_datatypes}) { my $field_type = $field->data_type; my $size = $field->size; if ( $args->{friendly_ints} && $size && (lc($field_type) eq 'integer')) { # Automatically translate to int2, int4, int8 # Type (Bits) Max. Signed/Unsigned Length # tinyint* (8) 128 3 # 255 3 # smallint (16) 32767 5 # 65535 5 # mediumint* (24) 8388607 7 # 16777215 8 # int (32) 2147483647 10 # 4294967295 11 # bigint (64) 9223372036854775807 19 # 18446744073709551615 20 # # * tinyint and mediumint are nonstandard extensions which are # only available under MySQL (to my knowledge) if ($size <= 3 and $args->{friendly_ints_extended}) { $field_type = 'tinyint'; } elsif ($size <= 5) { $field_type = 'smallint'; } elsif ($size <= 8 and $args->{friendly_ints_extended}) { $field_type = 'mediumint'; } elsif ($size <= 11) { $field_type = 'integer'; } else { $field_type = 'bigint'; } } $field_info = $field_type; if ( $args->{show_sizes} && $size && ($field_type =~ /^ (?: NUMERIC | DECIMAL | (VAR)?CHAR2? ) $/ix)) { $field_info .= '(' . $size . ')'; } } my $constraints; if ($args->{show_constraints}) { my @constraints; push(@constraints, $field->is_auto_increment ? 'PA' : 'PK') if $field->is_primary_key; push(@constraints, 'FK') if $field->is_foreign_key; push(@constraints, 'U') if $field->is_unique; push(@constraints, 'N') if $field->is_nullable; $constraints = join(',', @constraints); } # construct the field line from all info gathered so far push @fmt_fields, join(' ', '-', $field->name, $field_info || (), $constraints ? "[$constraints]" : (),); } # join field lines with graphviz formatting $field_str = join('\l', @fmt_fields) . '\l'; } my $index_str = ''; if ($args->{show_indexes}) { my @fmt_indexes; for my $index ($table->get_indices) { next unless $index->is_valid; push @fmt_indexes, join(' ', '*', $args->{show_index_names} ? $index->name . ':' : (), join(', ', $index->fields), ($index->type eq 'UNIQUE') ? '[U]' : (), ); } # join index lines with graphviz formatting (if any indexes at all) $index_str = join('\l', @fmt_indexes) . '\l' if @fmt_indexes; } my $name_str = $table_name . '\n'; # escape spaces for ($name_str, $field_str, $index_str) { $_ =~ s/ /\\ /g; } my $node_args; # only the 'record' type supports nice formatting if ($args->{node}{shape} eq 'record') { # the necessity to supply shape => 'record' is a graphviz bug $node_args = { shape => 'record', label => sprintf('{%s}', join('|', $name_str, $field_str || (), $index_str || (),),), }; } else { my $sep = sprintf('%s\n', '-' x ((length $table_name) + 2)); $node_args = { label => join($sep, $name_str, $field_str || (), $index_str || (),), }; } if (my $cluster_name = $cluster{$table_name}) { $node_args->{cluster} = $cluster_name; } $gv->add_node(qq["$table_name"], %$node_args); debug("Processing table '$table_name'"); debug("Fields = ", join(', ', map { $_->name } @fields)) if $DEBUG; for my $f (@fields) { my $name = $f->name or next; my $is_pk = $f->is_primary_key; my $is_unique = $f->is_unique; # # Decide if we should skip this field. # if ($args->{natural_join}) { next unless $is_pk || $f->is_foreign_key; } my $constraints = $f->{'constraints'}; if ($args->{natural_join} && !$skip_fields{$name}) { push @{ $nj_registry{$name} }, $table_name; } } unless ($args->{natural_join}) { for my $c ($table->get_constraints) { next unless $c->type eq FOREIGN_KEY; my $fk_table = $c->reference_table or next; for my $field_name ($c->fields) { for my $fk_field ($c->reference_fields) { next unless defined $schema->get_table($fk_table); # a condition is optional if at least one fk is nullable push @fk_registry, [ $table_name, $fk_table, scalar(grep { $_->is_nullable } ($c->fields)) ]; } } } } } # # Process relationships, create edges # my (@table_bunches, %optional_constraints); if ($args->{natural_join}) { for my $field_name (keys %nj_registry) { my @table_names = @{ $nj_registry{$field_name} || [] } or next; next if scalar @table_names == 1; push @table_bunches, [@table_names]; } } else { for my $i (0 .. $#fk_registry) { my $fk = $fk_registry[$i]; push @table_bunches, [ $fk->[0], $fk->[1] ]; $optional_constraints{$i} = $fk->[2]; } } my %done; for my $bi (0 .. $#table_bunches) { my @tables = @{ $table_bunches[$bi] }; for my $i (0 .. $#tables) { my $table1 = $tables[$i]; for my $j (1 .. $#tables) { next if $i == $j; my $table2 = $tables[$j]; next if $done{$table1}{$table2}; debug("Adding edge '$table2' -> '$table1'"); $gv->add_edge( qq["$table2"], qq["$table1"], arrowhead => $optional_constraints{$bi} ? 'empty' : 'normal', ); $done{$table1}{$table2} = 1; } } } # # Print the image # if (my $out = $args->{out_file}) { if (openhandle($out)) { print $out $gv->$output_method; } else { open my $fh, '>', $out or die "Can't write '$out': $!\n"; binmode $fh; print $fh $gv->$output_method; close $fh; } } else { return $gv->$output_method; } } 1; =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE Jonathan Yu Efrequency@cpan.orgE =head1 SEE ALSO SQL::Translator, GraphViz =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/XML/0000755000000000000000000000000014551164244022020 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Producer/XML/SQLFairy.pm0000644000000000000000000002512614551163724024020 0ustar00rootroot00000000000000package SQL::Translator::Producer::XML::SQLFairy; =pod =head1 NAME SQL::Translator::Producer::XML::SQLFairy - SQLFairy's default XML format =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( from => 'MySQL', to => 'XML-SQLFairy', filename => 'schema.sql', show_warnings => 1, ); print $t->translate; =head1 DESCRIPTION Creates XML output of a schema, in the flavor of XML used natively by the SQLFairy project (L). This format is detailed here. The XML lives in the C namespace. With a root element of . Objects in the schema are mapped to tags of the same name as the objects class (all lowercase). The attributes of the objects (e.g. $field->name) are mapped to attributes of the tag, except for sql, comments and action, which get mapped to child data elements. List valued attributes (such as the list of fields in an index) get mapped to comma separated lists of values in the attribute. Child objects, such as a tables fields, get mapped to child tags wrapped in a set of container tags using the plural of their contained classes name. An objects' extra attribute (a hash of arbitrary data) is mapped to a tag called extra, with the hash of data as attributes, sorted into alphabetical order. e.g. ...
SELECT email FROM Basic WHERE email IS NOT NULL
To see a complete example of the XML translate one of your schema :) $ sqlt -f MySQL -t XML-SQLFairy schema.sql =head1 ARGS =over 4 =item add_prefix Set to true to use the default namespace prefix of 'sqlf', instead of using the default namespace for C e.g. =item prefix Set to the namespace prefix you want to use for the C e.g. =item newlines If true (the default) inserts newlines around the XML, otherwise the schema is written on one line. =item indent When using newlines the number of whitespace characters to use as the indent. Default is 2, set to 0 to turn off indenting. =back =head1 LEGACY FORMAT The previous version of the SQLFairy XML allowed the attributes of the schema objects to be written as either xml attributes or as data elements, in any combination. The old producer could produce attribute only or data element only versions. While this allowed for lots of flexibility in writing the XML the result is a great many possible XML formats, not so good for DTD writing, XPathing etc! So we have moved to a fixed version described above. This version of the producer will now only produce the new style XML. To convert your old format files simply pass them through the translator :) $ sqlt -f XML-SQLFairy -t XML-SQLFairy schema-old.xml > schema-new.xml =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.65'; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils qw(header_comment debug); BEGIN { # Will someone fix XML::Writer already? local $^W = 0; require XML::Writer; import XML::Writer; } # Which schema object attributes (methods) to write as xml elements rather than # as attributes. e.g. blah, blah... my @MAP_AS_ELEMENTS = qw/sql comments action extra/; my $Namespace = 'http://sqlfairy.sourceforge.net/sqlfairy.xml'; my $Name = 'sqlf'; my $PArgs = {}; my $no_comments; sub produce { my $translator = shift; my $schema = $translator->schema; $no_comments = $translator->no_comments; $PArgs = $translator->producer_args; my $newlines = defined $PArgs->{newlines} ? $PArgs->{newlines} : 1; my $indent = defined $PArgs->{indent} ? $PArgs->{indent} : 2; # Setup the XML::Writer and set the namespace my $io; my $prefix = ""; $prefix = $Name if $PArgs->{add_prefix}; $prefix = $PArgs->{prefix} if $PArgs->{prefix}; my $xml = XML::Writer->new( OUTPUT => \$io, NAMESPACES => 1, PREFIX_MAP => { $Namespace => $prefix }, DATA_MODE => $newlines, DATA_INDENT => $indent, ); # Start the document $xml->xmlDecl('UTF-8'); $xml->comment(header_comment('', '')) unless $no_comments; xml_obj( $xml, $schema, tag => "schema", methods => [qw/name database extra/], end_tag => 0 ); # # Table # $xml->startTag([ $Namespace => "tables" ]); for my $table ($schema->get_tables) { debug "Table:", $table->name; xml_obj( $xml, $table, tag => "table", methods => [qw/name order extra/], end_tag => 0 ); # # Fields # xml_obj_children( $xml, $table, tag => 'field', methods => [ qw/ name data_type size is_nullable default_value is_auto_increment is_primary_key is_foreign_key extra comments order / ], ); # # Indices # xml_obj_children( $xml, $table, tag => 'index', collection_tag => "indices", methods => [qw/name type fields options extra/], ); # # Constraints # xml_obj_children( $xml, $table, tag => 'constraint', methods => [ qw/ name type fields reference_table reference_fields on_delete on_update match_type expression options deferrable extra / ], ); # # Comments # xml_obj_children( $xml, $table, tag => 'comment', collection_tag => "comments", methods => [ qw/ comments / ], ); $xml->endTag([ $Namespace => 'table' ]); } $xml->endTag([ $Namespace => 'tables' ]); # # Views # xml_obj_children( $xml, $schema, tag => 'view', methods => [qw/name sql fields order extra/], ); # # Tiggers # xml_obj_children( $xml, $schema, tag => 'trigger', methods => [ qw/name database_events action on_table perform_action_when fields order extra scope/ ], ); # # Procedures # xml_obj_children( $xml, $schema, tag => 'procedure', methods => [qw/name sql parameters owner comments order extra/], ); $xml->endTag([ $Namespace => 'schema' ]); $xml->end; return $io; } # # Takes and XML::Write object, Schema::* parent object, the tag name, # the collection name and a list of methods (of the children) to write as XML. # The collection name defaults to the name with an s on the end and is used to # work out the method to get the children with. eg a name of 'foo' gives a # collection of foos and gets the members using ->get_foos. # sub xml_obj_children { my ($xml, $parent) = (shift, shift); my %args = @_; my ($name, $collection_name, $methods) = @args{qw/tag collection_tag methods/}; $collection_name ||= "${name}s"; my $meth; if ($collection_name eq 'comments') { $meth = 'comments'; } else { $meth = "get_$collection_name"; } my @kids = $parent->$meth; #@kids || return; $xml->startTag([ $Namespace => $collection_name ]); for my $obj (@kids) { if ($collection_name eq 'comments') { $xml->dataElement([ $Namespace => 'comment' ], $obj); } else { xml_obj( $xml, $obj, tag => "$name", end_tag => 1, methods => $methods, ); } } $xml->endTag([ $Namespace => $collection_name ]); } # # Takes an XML::Writer, Schema::* object and list of method names # and writes the object out as XML. All methods values are written as attributes # except for the methods listed in @MAP_AS_ELEMENTS which get written as child # data elements. # # The attributes/tags are written in the same order as the method names are # passed. # # TODO # - Should the Namespace be passed in instead of global? Pass in the same # as Writer ie [ NS => TAGNAME ] # my $elements_re = join("|", @MAP_AS_ELEMENTS); $elements_re = qr/^($elements_re)$/; sub xml_obj { my ($xml, $obj, %args) = @_; my $tag = $args{'tag'} || ''; my $end_tag = $args{'end_tag'} || ''; my @meths = @{ $args{'methods'} }; my $empty_tag = 0; # Use array to ensure consistent (ie not hash) ordering of attribs # The order comes from the meths list passed in. my @tags; my @attr; foreach (grep { defined $obj->$_ } @meths) { my $what = m/$elements_re/ ? \@tags : \@attr; my $val = $_ eq 'extra' ? { $obj->$_ } : $obj->$_; $val = ref $val eq 'ARRAY' ? join(',', @$val) : $val; push @$what, $_ => $val; } my $child_tags = @tags; $end_tag && !$child_tags ? $xml->emptyTag([ $Namespace => $tag ], @attr) : $xml->startTag([ $Namespace => $tag ], @attr); while (my ($name, $val) = splice @tags, 0, 2) { if (ref $val eq 'HASH') { $xml->emptyTag([ $Namespace => $name ], map { ($_, $val->{$_}) } sort keys %$val); } else { $xml->dataElement([ $Namespace => $name ], $val); } } $xml->endTag([ $Namespace => $tag ]) if $child_tags && $end_tag; } 1; # ------------------------------------------------------------------- # The eyes of fire, the nostrils of air, # The mouth of water, the beard of earth. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Darren Chamberlain Edarren@cpan.orgE, Mark Addison Emark.addison@itn.co.ukE. =head1 SEE ALSO C, L, L, L, L. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/Diagram.pm0000644000000000000000000004066014551163724023272 0ustar00rootroot00000000000000package SQL::Translator::Producer::Diagram; =head1 NAME SQL::Translator::Producer::Diagram - ER diagram producer for SQL::Translator =head1 SYNOPSIS Use via SQL::Translator: use SQL::Translator; my $t = SQL::Translator->new( from => 'MySQL', to => 'Diagram', producer_args => { # All args are optional out_file => 'schema.png',# if not provided will return from translate() output_type => 'png', # is default or 'jpeg' title => 'My Schema', # default is filename font_size => 'medium', # is default or 'small,' 'large' imap_file => '', # filename to write image map coords imap_url => '', # base URL for image map gutter => 30 # is default, px distance b/w cols num_columns => 5, # the number of columns no_lines => 1, # do not draw lines to show FKs add_color => 1, # give it some color show_fk_only => 1, # show only fields used in FKs join_pk_only => 1, # use only primary keys to figure PKs natural_join => 1, # intuit FKs if not defined skip_fields => [...], # list* of field names to exclude skip_tables => [...], # list* of table names to exclude skip_tables_like => [...], # list* of regexen to exclude tables } ) or die SQL::Translator->error; $t->translate; * "list" can be either an array-ref or a comma-separated string =cut use strict; use warnings; use GD; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug); our $DEBUG; our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; use constant VALID_FONT_SIZE => { small => 1, medium => 1, large => 1, huge => 1, }; use constant VALID_IMAGE_TYPE => { png => 1, jpeg => 1, }; sub produce { my $t = shift; my $schema = $t->schema; my $args = $t->producer_args; local $DEBUG = $t->debug; debug("Schema =\n", Dumper($schema)) if $DEBUG; debug("Producer args =\n", Dumper($args)) if $DEBUG; my $out_file = $args->{'out_file'} || ''; my $output_type = $args->{'output_type'} || 'png'; my $title = $args->{'title'} || $t->filename; my $font_size = $args->{'font_size'} || 'medium'; my $imap_file = $args->{'imap_file'} || ''; my $imap_url = $args->{'imap_url'} || ''; my $gutter = $args->{'gutter'} || 30; # distance b/w columns my $num_columns = $args->{'num_columns'} || $args->{'no_columns'} || ''; my $no_lines = $args->{'no_lines'}; my $add_color = $args->{'add_color'}; my $show_fk_only = $args->{'show_fk_only'}; my $join_pk_only = $args->{'join_pk_only'}; my $natural_join = $args->{'natural_join'} || $join_pk_only; my %skip_field = map { $_, 1 } ( ref $args->{'skip_fields'} eq 'ARRAY' ? @{ $args->{'skip_fields'} } : split(/\s*,\s*/, $args->{'skip_fields'} || '') ); my %skip_table = map { $_, 1 } ( ref $args->{'skip_tables'} eq 'ARRAY' ? @{ $args->{'skip_tables'} } : split(/\s*,\s*/, $args->{'skip_tables'} || '') ); my @skip_tables_like = map {qr/$_/} ( ref $args->{'skip_tables_like'} eq 'ARRAY' ? @{ $args->{'skip_tables_like'} } : split(/\s*,\s*/, $args->{'skip_tables_like'} || '') ); my @table_names; if ($natural_join) { $schema->make_natural_joins( join_pk_only => $join_pk_only, skip_fields => $args->{'skip_fields'}, ); my $g = $schema->as_graph_pm; my $d = Graph::Traversal::DFS->new($g, next_alphabetic => 1); $d->preorder; @table_names = $d->dfs; } else { @table_names = map { $_->name } $schema->get_tables; } die "Invalid image type '$output_type'" unless VALID_IMAGE_TYPE->{$output_type}; die "Invalid font size '$font_size'" unless VALID_FONT_SIZE->{$font_size}; # # Layout the image. # my $font = $font_size eq 'small' ? gdTinyFont : $font_size eq 'medium' ? gdSmallFont : $font_size eq 'large' ? gdLargeFont : gdGiantFont; my $num_tables = scalar @table_names; $num_columns = 0 unless $num_columns =~ /^\d+$/; $num_columns ||= sprintf("%.0f", sqrt($num_tables) + .5); $num_columns ||= .5; my $no_per_col = sprintf("%.0f", $num_tables / $num_columns + .5); my @shapes; my ($max_x, $max_y); # the furthest x and y used my $orig_y = 40; # used to reset y for each column my ($x, $y) = (30, $orig_y); # where to start my $cur_col = 1; # the current column my $no_this_col = 0; # number of tables in current column my $this_col_x = $x; # current column's x my %nj_registry; # for locations of fields for natural joins my @fk_registry; # for locations of fields for foreign keys my %table_x; # for max x of each table my $field_no; # counter to give distinct no. to each field my %coords; # holds fields coordinates my @imap_coords; # for making clickable image map my %legend; TABLE: for my $table_name (@table_names) { my $table = $schema->get_table($table_name); if (@skip_tables_like or keys %skip_table) { next TABLE if $skip_table{$table_name}; for my $regex (@skip_tables_like) { next TABLE if $table_name =~ $regex; } } my $top = $y; push @shapes, [ 'string', $font, $this_col_x, $y, $table_name, 'black' ]; $y += $font->height + 2; my $below_table_name = $y; $y += 2; my $this_max_x = $this_col_x + ($font->width * length($table_name)); debug("Processing table '$table_name'"); my @fields = $table->get_fields; debug("Fields = ", join(', ', map { $_->name } @fields)) if $DEBUG; my (@fld_desc, $max_name, $max_desc); for my $f (@fields) { my $name = $f->name or next; my $is_pk = $f->is_primary_key; my @attr; # # Decide if we should skip this field. # if ($show_fk_only) { next unless $is_pk || $f->is_foreign_key; } if ($is_pk) { push @attr, 'PK'; $legend{'Primary key'} = '[PK]'; } if ($f->is_unique) { push @attr, 'U'; $legend{'Unique constraint'} = '[U]'; } if ($f->is_foreign_key) { push @attr, 'FK'; $legend{'Foreign Key'} = '[FK]'; } my $attr = ''; if (@attr) { $attr .= '[' . join(', ', @attr) . ']'; } my $desc = $f->data_type; $desc .= '(' . $f->size . ')' if $f->size && $f->data_type =~ /^(VAR)?CHAR2?$/i; my $nlen = length $name; my $dlen = length $desc; $max_name = $nlen if $nlen > ($max_name || 0); $max_desc = $dlen if $dlen > ($max_desc || 0); push @fld_desc, [ $name, $desc, $f->{'name'}, $is_pk, $attr ]; } $max_name += 2; $max_desc += 2; for my $fld_desc (@fld_desc) { my ($name, $desc, $orig_name, $is_pk, $attr) = @$fld_desc; my $diff1 = $max_name - length $name; my $diff2 = $max_desc - length $desc; $name .= ' ' x $diff1; $desc .= ' ' x $diff2; $desc = $name . $desc . $attr; push @shapes, [ 'string', $font, $this_col_x, $y, $desc, 'black' ]; $y += $font->height + 2; my $length = $this_col_x + ($font->width * length($desc)); $this_max_x = $length if $length > $this_max_x; my $constraints = $table->{'fields'}{$orig_name}{'constraints'}; if ($natural_join && !$skip_field{$orig_name}) { push @{ $nj_registry{$orig_name} }, $table_name; } my $y_link = $y - $font->height / 2; $coords{$table_name}{$orig_name}{'coords'} = { left => [ $this_col_x - 6, $y_link ], right => [ $length + 2, $y_link ], table => $table_name, field_no => ++$field_no, is_pk => $is_pk, fld_name => $orig_name, }; push @imap_coords, [ $imap_url . "#$table_name-$orig_name", $this_col_x, $y - $font->height, $length, $y_link, ]; } unless ($natural_join) { for my $c ($table->get_constraints) { next unless $c->type eq FOREIGN_KEY; my $fk_table = $c->reference_table or next; for my $field_name ($c->fields) { for my $fk_field ($c->reference_fields) { next unless defined $schema->get_table($fk_table); push @fk_registry, [ [ $fk_table, $fk_field ], [ $table_name, $field_name ], ]; } } } } $this_max_x += 5; $table_x{$table_name} = $this_max_x + 5; push @shapes, [ 'line', $this_col_x - 5, $below_table_name, $this_max_x, $below_table_name, 'black' ]; my @bounds = ($this_col_x - 5, $top - 5, $this_max_x, $y + 5); if ($add_color) { unshift @shapes, [ 'filledRectangle', $bounds[0], $bounds[1], $this_max_x, $below_table_name, 'khaki' ]; unshift @shapes, [ 'filledRectangle', @bounds, 'white' ]; } push @imap_coords, [ $imap_url . "#$table_name", $bounds[0], $bounds[1], $this_max_x, $below_table_name, ]; push @shapes, [ 'rectangle', @bounds, 'black' ]; $max_x = $this_max_x if $this_max_x > ($max_x || 0); $y += 25; if (++$no_this_col == $no_per_col) { # if we've filled up this column $cur_col++; # up the column number $no_this_col = 0; # reset the number of tables $max_x += $gutter; # push the x over for next column $this_col_x = $max_x; # remember the max x for this col $max_y = $y if $y > ($max_y || 0); # note the max y $y = $orig_y; # reset the y for next column } } # # Connect the lines. # my %horz_taken; my %done; unless ($no_lines) { my @position_bunches; if ($natural_join) { for my $field_name (keys %nj_registry) { my @positions; my @table_names = @{ $nj_registry{$field_name} || [] } or next; next if scalar @table_names == 1; for my $table_name (@table_names) { push @positions, $coords{$table_name}{$field_name}{'coords'}; } push @position_bunches, [@positions]; } } else { for my $pair (@fk_registry) { push @position_bunches, [ $coords{ $pair->[0][0] }{ $pair->[0][1] }{'coords'}, $coords{ $pair->[1][0] }{ $pair->[1][1] }{'coords'}, ]; } } my $is_directed = $natural_join ? 0 : 1; for my $bunch (@position_bunches) { my @positions = @$bunch; for my $i (0 .. $#positions) { my $pos1 = $positions[$i]; my ($ax, $ay) = @{ $pos1->{'left'} || [] } or next; my ($bx, $by) = @{ $pos1->{'right'} || [] } or next; my $table1 = $pos1->{'table'}; my $fno1 = $pos1->{'field_no'}; my $is_pk = $pos1->{'is_pk'}; next if $join_pk_only and !$is_pk; for my $j (0 .. $#positions) { my $pos2 = $positions[$j]; my ($cx, $cy) = @{ $pos2->{'left'} || [] } or next; my ($dx, $dy) = @{ $pos2->{'right'} || [] } or next; my $table2 = $pos2->{'table'}; my $fno2 = $pos2->{'field_no'}; next if $table1 eq $table2; next if $done{$fno1}{$fno2}; next if $fno1 == $fno2; my @distances = (); push @distances, [ abs($ax - $cx) + abs($ay - $cy), [ $ax, $ay, $cx, $cy ], [ 'left', 'left' ] ]; push @distances, [ abs($ax - $dx) + abs($ay - $dy), [ $ax, $ay, $dx, $dy ], [ 'left', 'right' ], ]; push @distances, [ abs($bx - $cx) + abs($by - $cy), [ $bx, $by, $cx, $cy ], [ 'right', 'left' ], ]; push @distances, [ abs($bx - $dx) + abs($by - $dy), [ $bx, $by, $dx, $dy ], [ 'right', 'right' ], ]; @distances = sort { $a->[0] <=> $b->[0] } @distances; my $shortest = $distances[0]; my ($x1, $y1, $x2, $y2) = @{ $shortest->[1] }; my ($side1, $side2) = @{ $shortest->[2] }; my ($start, $end); my $offset = 9; my $col1_right = $table_x{$table1}; my $col2_right = $table_x{$table2}; my $diff = 0; if ($x1 == $x2) { while ($horz_taken{ $x1 + $diff }) { $diff = $side1 eq 'left' ? $diff - 2 : $diff + 2; } $horz_taken{ $x1 + $diff } = 1; } if ($side1 eq 'left') { $start = $x1 - $offset + $diff; } else { $start = $col1_right + $diff; } if ($side2 eq 'left') { $end = $x2 - $offset + $diff; } else { $end = $col2_right + $diff; } push @shapes, [ 'line', $x1, $y1, $start, $y1, 'cadetblue' ]; push @shapes, [ 'line', $start, $y1, $end, $y2, 'cadetblue' ]; push @shapes, [ 'line', $end, $y2, $x2, $y2, 'cadetblue' ]; if ($is_directed) { if ( $side1 eq 'right' && $side2 eq 'left' || $side1 eq 'left' && $side2 eq 'left') { push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 - 3, $y2 - 3, $x2 - 3, $y2 + 3, 'cadetblue' ]; } else { push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 + 3, $x2, $y2, 'cadetblue' ]; push @shapes, [ 'line', $x2 + 3, $y2 - 3, $x2 + 3, $y2 + 3, 'cadetblue' ]; } } $done{$fno1}{$fno2} = 1; $done{$fno2}{$fno1} = 1; } } } } # # Add the title, legend and signature. # my $large_font = gdLargeFont; my $title_len = $large_font->width * length $title; push @shapes, [ 'string', $large_font, $max_x / 2 - $title_len / 2, 10, $title, 'black' ]; if (%legend) { $max_y += 5; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, 'Legend', 'black' ]; $max_y += $font->height + 4; my $longest; for my $len (map { length $_ } values %legend) { $longest = $len if $len > ($longest || 0); } $longest += 2; while (my ($key, $shape) = each %legend) { my $space = $longest - length $shape; push @shapes, [ 'string', $font, $x, $max_y - $font->height - 4, join('', $shape, ' ' x $space, $key), 'black' ]; $max_y += $font->height + 4; } } my $sig = 'Created by SQL::Translator ' . $t->version; my $sig_len = $font->width * length $sig; push @shapes, [ 'string', $font, $max_x - $sig_len, $max_y - $font->height - 4, $sig, 'black' ]; # # Render the image. # my $gd = GD::Image->new($max_x + 30, $max_y); unless ($gd->can($output_type)) { die "GD can't create images of type '$output_type'\n"; } my %colors = map { $_->[0], $gd->colorAllocate(@{ $_->[1] }) } ( [ white => [ 255, 255, 255 ] ], [ beige => [ 245, 245, 220 ] ], [ black => [ 0, 0, 0 ] ], [ lightblue => [ 173, 216, 230 ] ], [ cadetblue => [ 95, 158, 160 ] ], [ lightgoldenrodyellow => [ 250, 250, 210 ] ], [ khaki => [ 240, 230, 140 ] ], [ red => [ 255, 0, 0 ] ], ); $gd->interlaced('true'); my $background_color = $add_color ? 'lightgoldenrodyellow' : 'white'; $gd->fill(0, 0, $colors{$background_color}); for my $shape (@shapes) { my $method = shift @$shape; my $color = pop @$shape; $gd->$method(@$shape, $colors{$color}); } # # Make image map. # debug("imap file = '$imap_file'"); if ($imap_file && @imap_coords) { open my $fh, '>', $imap_file or die "Can't write '$imap_file': $!\n"; print $fh qq[\n] . qq[\n]; for my $rec (@imap_coords) { my $href = shift @$rec; print $fh q[\n]; } print $fh qq[]; close $fh; } # # Print the image. # if ($out_file) { open my $fh, '>', $out_file or die "Can't write '$out_file': $!\n"; binmode $fh; print $fh $gd->$output_type; close $fh; } else { return $gd->$output_type; } } 1; =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/Sybase.pm0000644000000000000000000002617014551163724023154 0ustar00rootroot00000000000000package SQL::Translator::Producer::Sybase; =head1 NAME SQL::Translator::Producer::Sybase - Sybase producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'Sybase' ); $t->translate; =head1 DESCRIPTION This module will produce text output of the schema suitable for Sybase. =cut use strict; use warnings; our ($DEBUG, $WARN); our $VERSION = '1.65'; $DEBUG = 1 unless defined $DEBUG; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); my %translate = ( # # Sybase types # integer => 'numeric', int => 'numeric', number => 'numeric', money => 'money', varchar => 'varchar', varchar2 => 'varchar', timestamp => 'datetime', text => 'varchar', real => 'double precision', comment => 'text', bit => 'bit', tinyint => 'smallint', float => 'double precision', serial => 'numeric', boolean => 'varchar', char => 'char', long => 'varchar', ); my %reserved = map { $_, 1 } qw[ ALL ANALYSE ANALYZE AND ANY AS ASC BETWEEN BINARY BOTH CASE CAST CHECK COLLATE COLUMN CONSTRAINT CROSS CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEFAULT DEFERRABLE DESC DISTINCT DO ELSE END EXCEPT FALSE FOR FOREIGN FREEZE FROM FULL GROUP HAVING ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT NATURAL NEW NOT NOTNULL NULL OFF OFFSET OLD ON ONLY OR ORDER OUTER OVERLAPS PRIMARY PUBLIC REFERENCES RIGHT SELECT SESSION_USER SOME TABLE THEN TO TRAILING TRUE UNION UNIQUE USER USING VERBOSE WHEN WHERE ]; my $max_id_length = 30; my %used_identifiers = (); my %global_names; my %unreserve; my %truncated; =pod =head1 Sybase Create Table Syntax CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name ( { column_name data_type [ DEFAULT default_expr ] [ column_constraint [, ... ] ] | table_constraint } [, ... ] ) [ INHERITS ( parent_table [, ... ] ) ] [ WITH OIDS | WITHOUT OIDS ] where column_constraint is: [ CONSTRAINT constraint_name ] { NOT NULL | NULL | UNIQUE | PRIMARY KEY | CHECK (expression) | REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] and table_constraint is: [ CONSTRAINT constraint_name ] { UNIQUE ( column_name [, ... ] ) | PRIMARY KEY ( column_name [, ... ] ) | CHECK ( expression ) | FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] [ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] =head1 Create Index Syntax CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( column [ ops_name ] [, ...] ) [ WHERE predicate ] CREATE [ UNIQUE ] INDEX index_name ON table [ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] ) [ WHERE predicate ] =cut sub produce { my $translator = shift; $DEBUG = $translator->debug; $WARN = $translator->show_warnings; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my @output; push @output, header_comment unless ($no_comments); my @foreign_keys; for my $table ($schema->get_tables) { my $table_name = $table->name or next; $table_name = mk_name($table_name, '', undef, 1); my $table_name_ur = unreserve($table_name) || ''; my (@comments, @field_defs, @index_defs, @constraint_defs); push @comments, "--\n-- Table: $table_name_ur\n--" unless $no_comments; push @comments, map {"-- $_"} $table->comments; # # Fields # my %field_name_scope; for my $field ($table->get_fields) { my $field_name = mk_name($field->name, '', \%field_name_scope, undef, 1); my $field_name_ur = unreserve($field_name, $table_name); my $field_def = qq["$field_name_ur"]; $field_def =~ s/\"//g; if ($field_def =~ /identity/) { $field_def =~ s/identity/pidentity/; } # # Datatype # my $data_type = lc $field->data_type; my $orig_data_type = $data_type; my %extra = $field->extra; my $list = $extra{'list'} || []; # \todo deal with embedded quotes my $commalist = join(', ', map {qq['$_']} @$list); my $seq_name; my $identity = ''; if ($data_type eq 'enum') { my $check_name = mk_name($table_name . '_' . $field_name, 'chk', undef, 1); push @constraint_defs, "CONSTRAINT $check_name CHECK ($field_name IN ($commalist))"; $data_type .= 'character varying'; } elsif ($data_type eq 'set') { $data_type .= 'character varying'; } else { if ($field->is_auto_increment) { $identity = 'IDENTITY'; } if (defined $translate{$data_type}) { $data_type = $translate{$data_type}; } else { warn "Unknown datatype: $data_type ", "($table_name.$field_name)\n" if $WARN; } } my $size = $field->size; unless ($size) { if ($data_type =~ /numeric/) { $size = '9,0'; } elsif ($orig_data_type eq 'text') { #interpret text fields as long varchars $size = '255'; } elsif ($data_type eq 'varchar' && $orig_data_type eq 'boolean') { $size = '6'; } elsif ($data_type eq 'varchar') { $size = '255'; } } $field_def .= " $data_type"; $field_def .= "($size)" if $size; $field_def .= " $identity" if $identity; # # Default value # my $default = $field->default_value; if (defined $default) { $field_def .= sprintf(' DEFAULT %s', ($field->is_auto_increment && $seq_name) ? qq[nextval('"$seq_name"'::text)] : ($default =~ m/null/i) ? 'NULL' : "'$default'"); } # # Not null constraint # unless ($field->is_nullable) { $field_def .= ' NOT NULL'; } else { $field_def .= ' NULL' if $data_type ne 'bit'; } push @field_defs, $field_def; } # # Constraint Declarations # my @constraint_decs = (); my $c_name_default; for my $constraint ($table->get_constraints) { my $name = $constraint->name || ''; my $type = $constraint->type || NORMAL; my @fields = map { unreserve($_, $table_name) } $constraint->fields; my @rfields = map { unreserve($_, $table_name) } $constraint->reference_fields; next unless @fields; if ($type eq PRIMARY_KEY) { $name ||= mk_name($table_name, 'pk', undef, 1); push @constraint_defs, "CONSTRAINT $name PRIMARY KEY " . '(' . join(', ', @fields) . ')'; } elsif ($type eq FOREIGN_KEY) { $name ||= mk_name($table_name, 'fk', undef, 1); push @foreign_keys, "ALTER TABLE $table ADD CONSTRAINT $name FOREIGN KEY" . ' (' . join(', ', @fields) . ') REFERENCES ' . $constraint->reference_table . ' (' . join(', ', @rfields) . ')'; } elsif ($type eq UNIQUE) { $name ||= mk_name($table_name, $name || ++$c_name_default, undef, 1); push @constraint_defs, "CONSTRAINT $name UNIQUE " . '(' . join(', ', @fields) . ')'; } } # # Indices # for my $index ($table->get_indices) { push @index_defs, 'CREATE INDEX ' . $index->name . " ON $table_name (" . join(', ', $index->fields) . ")"; } my $drop_statement = $add_drop_table ? qq[DROP TABLE $table_name_ur] : ''; my $create_statement = qq[CREATE TABLE $table_name_ur (\n] . join(",\n", map {" $_"} @field_defs, @constraint_defs) . "\n)"; $create_statement = join("\n\n", @comments) . "\n\n" . $create_statement; push @output, $create_statement, @index_defs,; } foreach my $view ($schema->get_views) { my (@comments, $view_name); $view_name = $view->name(); push @comments, "--\n-- View: $view_name\n--" unless $no_comments; # text of view is already a 'create view' statement so no need # to do anything fancy. push @output, join("\n\n", @comments, $view->sql(),); } foreach my $procedure ($schema->get_procedures) { my (@comments, $procedure_name); $procedure_name = $procedure->name(); push @comments, "--\n-- Procedure: $procedure_name\n--" unless $no_comments; # text of procedure already has the 'create procedure' stuff # so there is no need to do anything fancy. However, we should # think about doing fancy stuff with granting permissions and # so on. push @output, join("\n\n", @comments, $procedure->sql(),); } push @output, @foreign_keys; if ($WARN) { if (%truncated) { warn "Truncated " . keys(%truncated) . " names:\n"; warn "\t" . join("\n\t", sort keys %truncated) . "\n"; } if (%unreserve) { warn "Encounted " . keys(%unreserve) . " unsafe names in schema (reserved or invalid):\n"; warn "\t" . join("\n\t", sort keys %unreserve) . "\n"; } } return wantarray ? @output : join ";\n\n", @output; } sub mk_name { my $basename = shift || ''; my $type = shift || ''; my $scope = shift || ''; my $critical = shift || ''; my $basename_orig = $basename; my $max_name = $type ? $max_id_length - (length($type) + 1) : $max_id_length; $basename = substr($basename, 0, $max_name) if length($basename) > $max_name; my $name = $type ? "${type}_$basename" : $basename; if ($basename ne $basename_orig and $critical) { my $show_type = $type ? "+'$type'" : ""; warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n" if $WARN; $truncated{$basename_orig} = $name; } $scope ||= \%global_names; if (my $prev = $scope->{$name}) { my $name_orig = $name; $name .= sprintf("%02d", ++$prev); substr($name, $max_id_length - 3) = "00" if length($name) > $max_id_length; warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{$name_orig}++; } $name = substr($name, 0, $max_id_length) if ((length($name) > $max_id_length) && $critical); $scope->{$name}++; return $name; } sub unreserve { my $name = shift || ''; my $schema_obj_name = shift || ''; my ($suffix) = ($name =~ s/(\W.*)$//) ? $1 : ''; # also trap fields that don't begin with a letter return $name if !$reserved{ uc $name } && $name =~ /^[a-z]/i; if ($schema_obj_name) { ++$unreserve{"$schema_obj_name.$name"}; } else { ++$unreserve{"$name (table name)"}; } my $unreserve = sprintf '%s_', $name; return $unreserve . $suffix; } 1; =pod =head1 SEE ALSO SQL::Translator. =head1 AUTHORS Sam Angiuoli Eangiuoli@users.sourceforge.netE, Paul Harrington Eharringp@deshaw.comE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/SQLite.pm0000644000000000000000000003444414551163724023072 0ustar00rootroot00000000000000package SQL::Translator::Producer::SQLite; =head1 NAME SQL::Translator::Producer::SQLite - SQLite producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'SQLite' ); $t->translate; =head1 DESCRIPTION This module will produce text output of the schema suitable for SQLite. =cut use strict; use warnings; use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment parse_dbms_version batch_alter_table_statements); use SQL::Translator::Generator::DDL::SQLite; our ($DEBUG, $WARN); our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; $WARN = 0 unless defined $WARN; our $max_id_length = 30; my %global_names; # HIDEOUS TEMPORARY DEFAULT WITHOUT QUOTING! our $NO_QUOTES = 1; { my ($quoting_generator, $nonquoting_generator); sub _generator { $NO_QUOTES ? $nonquoting_generator ||= SQL::Translator::Generator::DDL::SQLite->new(quote_chars => []) : $quoting_generator ||= SQL::Translator::Generator::DDL::SQLite->new; } } sub produce { my $translator = shift; local $DEBUG = $translator->debug; local $WARN = $translator->show_warnings; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $producer_args = $translator->producer_args; my $sqlite_version = parse_dbms_version($producer_args->{sqlite_version}, 'perl'); my $no_txn = $producer_args->{no_transaction}; debug("PKG: Beginning production\n"); %global_names = (); #reset # only quote if quotes were requested for real # 0E0 indicates "the default of true" was assumed local $NO_QUOTES = 0 if $translator->quote_identifiers and $translator->quote_identifiers ne '0E0'; my $head; $head = (header_comment() . "\n") unless $no_comments; my @create = (); push @create, "BEGIN TRANSACTION" unless $no_txn; for my $table ($schema->get_tables) { push @create, create_table( $table, { no_comments => $no_comments, sqlite_version => $sqlite_version, add_drop_table => $add_drop_table, } ); } for my $view ($schema->get_views) { push @create, create_view( $view, { add_drop_view => $add_drop_table, no_comments => $no_comments, } ); } for my $trigger ($schema->get_triggers) { push @create, create_trigger( $trigger, { add_drop_trigger => $add_drop_table, no_comments => $no_comments, } ); } push @create, "COMMIT" unless $no_txn; if (wantarray) { return ($head || (), @create); } else { return join('', $head || (), join(";\n\n", @create), ";\n",); } } sub mk_name { my ($name, $scope, $critical) = @_; $scope ||= \%global_names; if (my $prev = $scope->{$name}) { my $name_orig = $name; $name .= sprintf("%02d", ++$prev); substr($name, $max_id_length - 3) = "00" if length($name) > $max_id_length; warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n" if $WARN; $scope->{$name_orig}++; } $scope->{$name}++; return _generator()->quote($name); } sub create_view { my ($view, $options) = @_; my $add_drop_view = $options->{add_drop_view}; my $view_name = _generator()->quote($view->name); $global_names{ $view->name } = 1; debug("PKG: Looking at view '${view_name}'\n"); # Header. Should this look like what mysqldump produces? my $extra = $view->extra; my @create; push @create, "DROP VIEW IF EXISTS $view_name" if $add_drop_view; my $create_view = 'CREATE'; $create_view .= " TEMPORARY" if exists($extra->{temporary}) && $extra->{temporary}; $create_view .= ' VIEW'; $create_view .= " IF NOT EXISTS" if exists($extra->{if_not_exists}) && $extra->{if_not_exists}; $create_view .= " ${view_name}"; if (my $sql = $view->sql) { $create_view .= " AS\n ${sql}"; } push @create, $create_view; # Tack the comment onto the first statement. unless ($options->{no_comments}) { $create[0] = "--\n-- View: ${view_name}\n--\n" . $create[0]; } return @create; } sub create_table { my ($table, $options) = @_; my $table_name = _generator()->quote($table->name); $global_names{ $table->name } = 1; my $no_comments = $options->{no_comments}; my $add_drop_table = $options->{add_drop_table}; my $sqlite_version = $options->{sqlite_version} || 0; debug("PKG: Looking at table '$table_name'\n"); my (@index_defs, @constraint_defs); my @fields = $table->get_fields or die "No fields in $table_name"; my $temp = $options->{temporary_table} ? 'TEMPORARY ' : ''; # # Header. # my $exists = ($sqlite_version >= 3.003) ? ' IF EXISTS' : ''; my @create; my ($comment, $create_table) = ""; $comment = "--\n-- Table: $table_name\n--\n" unless $no_comments; if ($add_drop_table) { push @create, $comment . qq[DROP TABLE$exists $table_name]; } else { $create_table = $comment; } $create_table .= "CREATE ${temp}TABLE $table_name (\n"; # # Comments # if ($table->comments and !$no_comments) { $create_table .= "-- Comments: \n-- "; $create_table .= join "\n-- ", $table->comments; $create_table .= "\n--\n\n"; } # # How many fields in PK? # my $pk = $table->primary_key; my @pk_fields = $pk ? $pk->fields : (); # # Fields # my (@field_defs, $pk_set); for my $field (@fields) { push @field_defs, create_field($field); } if (scalar @pk_fields > 1 || (@pk_fields && !grep /INTEGER PRIMARY KEY/, @field_defs)) { push @field_defs, 'PRIMARY KEY (' . join(', ', map _generator()->quote($_), @pk_fields) . ')'; } # # Indices # for my $index ($table->get_indices) { push @index_defs, create_index($index); } # # Constraints # for my $c ($table->get_constraints) { if ($c->type eq "FOREIGN KEY") { push @field_defs, create_foreignkey($c); } elsif ($c->type eq "CHECK") { push @field_defs, create_check_constraint($c); } next unless $c->type eq UNIQUE; push @constraint_defs, create_constraint($c); } $create_table .= join(",\n", map {" $_"} @field_defs) . "\n)"; return (@create, $create_table, @index_defs, @constraint_defs); } sub create_check_constraint { my $c = shift; my $check = ''; $check .= 'CONSTRAINT ' . _generator->quote($c->name) . ' ' if $c->name; $check .= 'CHECK(' . $c->expression . ')'; return $check; } sub create_foreignkey { my $c = shift; my @fields = $c->fields; my @rfields = map { $_ || () } $c->reference_fields; unless (@rfields) { my $rtable_name = $c->reference_table; if (my $ref_table = $c->schema->get_table($rtable_name)) { push @rfields, $ref_table->primary_key; die "FK constraint on " . $rtable_name . '.' . join('', @fields) . " has no reference fields\n" unless @rfields; } else { die "Can't find reference table '$rtable_name' in schema\n"; } } my $fk_sql = sprintf 'FOREIGN KEY (%s) REFERENCES %s(%s)', join(', ', map { _generator()->quote($_) } @fields), _generator()->quote($c->reference_table), join(', ', map { _generator()->quote($_) } @rfields); $fk_sql .= " ON DELETE " . $c->{on_delete} if $c->{on_delete}; $fk_sql .= " ON UPDATE " . $c->{on_update} if $c->{on_update}; return $fk_sql; } sub create_field { return _generator()->field($_[0]) } sub create_index { my ($index, $options) = @_; (my $index_table_name = $index->table->name) =~ s/^.+?\.//; # table name may not specify schema my $name = mk_name($index->name || "${index_table_name}_idx"); my $type = $index->type eq 'UNIQUE' ? "UNIQUE " : ''; # strip any field size qualifiers as SQLite doesn't like these my @fields = map { s/\(\d+\)$//; _generator()->quote($_) } $index->fields; $index_table_name = _generator()->quote($index_table_name); warn "removing schema name from '" . $index->table->name . "' to make '$index_table_name'\n" if $WARN; my $index_def = "CREATE ${type}INDEX $name ON " . $index_table_name . ' (' . join(', ', @fields) . ')'; return $index_def; } sub create_constraint { my ($c, $options) = @_; (my $index_table_name = $c->table->name) =~ s/^.+?\.//; # table name may not specify schema my $name = mk_name($c->name || "${index_table_name}_idx"); my @fields = map _generator()->quote($_), $c->fields; $index_table_name = _generator()->quote($index_table_name); warn "removing schema name from '" . $c->table->name . "' to make '$index_table_name'\n" if $WARN; my $c_def = "CREATE UNIQUE INDEX $name ON " . $index_table_name . ' (' . join(', ', @fields) . ')'; return $c_def; } sub create_trigger { my ($trigger, $options) = @_; my $add_drop = $options->{add_drop_trigger}; my @statements; my $trigger_name = $trigger->name; $global_names{$trigger_name} = 1; my $events = $trigger->database_events; for my $evt (@$events) { my $trig_name = $trigger_name; if (@$events > 1) { $trig_name .= "_$evt"; warn "Multiple database events supplied for trigger '$trigger_name', ", "creating trigger '$trig_name' for the '$evt' event.\n" if $WARN; } $trig_name = _generator()->quote($trig_name); push @statements, "DROP TRIGGER IF EXISTS $trig_name" if $add_drop; $DB::single = 1; my $action = ""; if (not ref $trigger->action) { $action = $trigger->action; $action = "BEGIN " . $action . " END" unless $action =~ /^ \s* BEGIN [\s\;] .*? [\s\;] END [\s\;]* $/six; } else { $action = $trigger->action->{for_each} . " " if $trigger->action->{for_each}; $action = $trigger->action->{when} . " " if $trigger->action->{when}; my $steps = $trigger->action->{steps} || []; $action .= "BEGIN "; $action .= $_ . "; " for (@$steps); $action .= "END"; } push @statements, sprintf( 'CREATE TRIGGER %s %s %s on %s %s', $trig_name, $trigger->perform_action_when, $evt, _generator()->quote($trigger->on_table), $action ); } return @statements; } sub alter_table { () } # Noop sub add_field { my ($field) = @_; return sprintf("ALTER TABLE %s ADD COLUMN %s", _generator()->quote($field->table->name), create_field($field)); } sub alter_create_index { my ($index) = @_; # This might cause name collisions return create_index($index); } sub alter_create_constraint { my ($constraint) = @_; return create_constraint($constraint) if $constraint->type eq 'UNIQUE'; } sub alter_drop_constraint { alter_drop_index(@_) } sub alter_drop_index { my ($constraint) = @_; return sprintf("DROP INDEX %s", _generator()->quote($constraint->name)); } sub batch_alter_table { my ($table, $diffs, $options) = @_; # If we have any of the following # # rename_field # alter_field # drop_field # # we need to do the following # # BEGIN TRANSACTION; # CREATE TEMPORARY TABLE t1_backup(a,b); # INSERT INTO t1_backup SELECT a,b FROM t1; # DROP TABLE t1; # CREATE TABLE t1(a,b); # INSERT INTO t1 SELECT a,b FROM t1_backup; # DROP TABLE t1_backup; # COMMIT; # # Fun, eh? # # If we have rename_field we do similarly. # # We create the temporary table as a copy of the new table, copy all data # to temp table, create new table and then copy as appropriate taking note # of renamed fields. my $table_name = $table->name; if ( @{ $diffs->{rename_field} } == 0 && @{ $diffs->{alter_field} } == 0 && @{ $diffs->{drop_field} } == 0) { return batch_alter_table_statements($diffs, $options); } my @sql; # $table is the new table but we may need an old one # TODO: this is NOT very well tested at the moment so add more tests my $old_table = $table; if ($diffs->{rename_table} && @{ $diffs->{rename_table} }) { $old_table = $diffs->{rename_table}[0][0]; } my $temp_table_name = $table_name . '_temp_alter'; # CREATE TEMPORARY TABLE t1_backup(a,b); my %temp_table_fields; do { local $table->{name} = $temp_table_name; # We only want the table - don't care about indexes on tmp table my ($table_sql) = create_table($table, { no_comments => 1, temporary_table => 1 }); push @sql, $table_sql; %temp_table_fields = map { $_ => 1 } $table->get_fields; }; # record renamed fields for later my %rename_field = map { $_->[1]->name => $_->[0]->name } @{ $diffs->{rename_field} }; # drop added fields from %temp_table_fields delete @temp_table_fields{ @{ $diffs->{add_field} } }; # INSERT INTO t1_backup SELECT a,b FROM t1; push @sql, sprintf( 'INSERT INTO %s( %s) SELECT %s FROM %s', _generator()->quote($temp_table_name), join(', ', map _generator()->quote($_), grep { $temp_table_fields{$_} } $table->get_fields), join(', ', map _generator()->quote($_), map { $rename_field{$_} ? $rename_field{$_} : $_ } grep { $temp_table_fields{$_} } $table->get_fields), _generator()->quote($old_table->name) ); # DROP TABLE t1; push @sql, sprintf('DROP TABLE %s', _generator()->quote($old_table->name)); # CREATE TABLE t1(a,b); push @sql, create_table($table, { no_comments => 1 }); # INSERT INTO t1 SELECT a,b FROM t1_backup; push @sql, sprintf( 'INSERT INTO %s SELECT %s FROM %s', _generator()->quote($table_name), join(', ', map _generator()->quote($_), $table->get_fields), _generator()->quote($temp_table_name) ); # DROP TABLE t1_backup; push @sql, sprintf('DROP TABLE %s', _generator()->quote($temp_table_name)); return wantarray ? @sql : join(";\n", @sql); } sub drop_table { my ($table) = @_; $table = _generator()->quote($table); return "DROP TABLE $table"; } sub rename_table { my ($old_table, $new_table, $options) = @_; $old_table = _generator()->quote($old_table); $new_table = _generator()->quote($new_table); return "ALTER TABLE $old_table RENAME TO $new_table"; } # No-op. Just here to signify that we are a new style parser. sub preproces_schema { } 1; =pod =head1 SEE ALSO SQL::Translator, http://www.sqlite.org/. =head1 AUTHOR Ken Youens-Clark C<< >>. Diff code added by Ash Berlin C<< >>. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/ClassDBI.pm0000644000000000000000000002750714551163724023317 0ustar00rootroot00000000000000package SQL::Translator::Producer::ClassDBI; use strict; use warnings; our $DEBUG; our $VERSION = '1.65'; $DEBUG = 1 unless defined $DEBUG; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); use Data::Dumper; my %CDBI_auto_pkgs = ( MySQL => 'mysql', PostgreSQL => 'Pg', Oracle => 'Oracle', ); sub produce { my $t = shift; local $DEBUG = $t->debug; my $no_comments = $t->no_comments; my $schema = $t->schema; my $args = $t->producer_args; my @create; if (my $fmt = $args->{'format_pkg_name'}) { $t->format_package_name($fmt); } if (my $fmt = $args->{'format_fk_name'}) { $t->format_fk_name($fmt); } my $db_user = $args->{'db_user'} || ''; my $db_pass = $args->{'db_password'} || ''; my $main_pkg_name = $args->{'package_name'} || # $args->{'main_pkg_name'} || # keep this? undocumented $t->format_package_name('DBI'); my $header = header_comment(__PACKAGE__, "# "); my $parser_type = (split /::/, $t->parser_type)[-1]; my $from = $CDBI_auto_pkgs{$parser_type} || ''; my $dsn = $args->{'dsn'} || sprintf( 'dbi:%s:_', $CDBI_auto_pkgs{$parser_type} ? $CDBI_auto_pkgs{$parser_type} : $parser_type ); my $sep = '# ' . '-' x 67; # # Identify "link tables" (have only PK and FK fields). # my %linkable; my %linktable; for my $table ($schema->get_tables) { debug("PKG: Table = ", $table->name, "\n"); my $is_link = 1; for my $field ($table->get_fields) { unless ($field->is_primary_key or $field->is_foreign_key) { $is_link = 0; last; } } next unless $is_link; foreach my $left ($table->get_fields) { next unless $left->is_foreign_key; my $lfk = $left->foreign_key_reference or next; my $lr_table = $schema->get_table($lfk->reference_table) or next; my $lr_field_name = ($lfk->reference_fields)[0]; my $lr_field = $lr_table->get_field($lr_field_name); next unless $lr_field->is_primary_key; foreach my $right ($table->get_fields) { next if $left->name eq $right->name; my $rfk = $right->foreign_key_reference or next; my $rr_table = $schema->get_table($rfk->reference_table) or next; my $rr_field_name = ($rfk->reference_fields)[0]; my $rr_field = $rr_table->get_field($rr_field_name); next unless $rr_field->is_primary_key; $linkable{ $lr_table->name }{ $rr_table->name } = $table; $linkable{ $rr_table->name }{ $lr_table->name } = $table; $linktable{ $table->name } = $table; } } } # # Iterate over all tables # my (%packages, $order); for my $table ($schema->get_tables) { my $table_name = $table->name or next; my $table_pkg_name = join '::', $main_pkg_name, $t->format_package_name($table_name); $packages{$table_pkg_name} = { order => ++$order, pkg_name => $table_pkg_name, base => $main_pkg_name, table => $table_name, }; # # Primary key may have a different accessor method name # # if ( my $constraint = $table->primary_key ) { # my $field = ( $constraint->fields )[0]; # $packages{ $table_pkg_name }{'_columns_primary'} = $field; # # if ( my $pk_xform = $t->format_pk_name ) { # my $pk_name = $pk_xform->( $table_pkg_name, $field ); # # $packages{$table_pkg_name}{'pk_accessor'} = # "#\n# Primary key accessor\n#\n" # . "sub $pk_name {\n shift->$field\n}\n\n"; # } # } my $is_data = 0; foreach my $field ($table->get_fields) { if (!$field->is_foreign_key and !$field->is_primary_key) { push @{ $packages{$table_pkg_name}{'_columns_essential'} }, $field->name; $is_data++; } elsif (!$field->is_primary_key) { push @{ $packages{$table_pkg_name}{'_columns_others'} }, $field->name; } } my %linked; if ($is_data) { foreach my $link (keys %{ $linkable{$table_name} }) { my $linkmethodname; if (my $fk_xform = $t->format_fk_name) { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE $linkmethodname = $fk_xform->($linkable{$table_name}{$link}->name, ($schema->get_table($link)->primary_key->fields)[0]) . 's'; } else { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE $linkmethodname = $linkable{$table_name}{$link}->name . '_' . ($schema->get_table($link)->primary_key->fields)[0] . 's'; } my @rk_fields = (); my @lk_fields = (); foreach my $field ($linkable{$table_name}{$link}->get_fields) { next unless $field->is_foreign_key; next unless ($field->foreign_key_reference->reference_table eq $table_name || $field->foreign_key_reference->reference_table eq $link); push @lk_fields, ($field->foreign_key_reference->reference_fields)[0] if $field->foreign_key_reference->reference_table eq $link; push @rk_fields, $field->name if $field->foreign_key_reference->reference_table eq $table_name; } # # If one possible traversal via link table. # if (scalar(@rk_fields) == 1 and scalar(@lk_fields) == 1) { foreach my $rk_field (@rk_fields) { push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkmethodname . " { my \$self = shift; " . "return map \$_->" . ($schema->get_table($link)->primary_key->fields)[0] . ", \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . " }\n\n"; } # # Else there is more than one way to traverse it. # ack! Let's treat these types of link tables as # a many-to-one (easier) # # NOTE: we need to rethink the link method name, # as the cardinality has shifted on us. # } elsif (scalar(@rk_fields) == 1) { foreach my $rk_field (@rk_fields) { # # ADD CALLBACK FOR PLURALIZATION MANGLING HERE # push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkable{$table_name}{$link}->name . "s { my \$self = shift; return \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . "(\@_) }\n\n"; } } elsif (scalar(@lk_fields) == 1) { # # These will be taken care of on the other end... # } else { # # Many many many. Need multiple iterations here, # data structure revision to handle N FK sources. # This code has not been tested and likely doesn't # work here. # foreach my $rk_field (@rk_fields) { # ADD CALLBACK FOR PLURALIZATION MANGLING HERE push @{ $packages{$table_pkg_name}{'has_many'}{$link} }, "sub " . $linkable{$table_name}{$link}->name . "_" . $rk_field . "s { my \$self = shift; return \$self->" . $linkable{$table_name}{$link}->name . "_" . $rk_field . "(\@_) }\n\n"; } } } } # # Use foreign keys to set up "has_a/has_many" relationships. # foreach my $field ($table->get_fields) { if ($field->is_foreign_key) { my $table_name = $table->name; my $field_name = $field->name; # my $fk_method = $t->format_fk_name( $table_name, $field_name ); my $fk_method = join('::', $table_pkg_name, $t->format_fk_name($table_name, $field_name)); my $fk = $field->foreign_key_reference; my $ref_table = $fk->reference_table; my $ref_pkg = $t->format_package_name($ref_table); my $ref_field = ($fk->reference_fields)[0]; # my $fk_method = join('::', # $table_pkg_name, $t->format_fk_name( $ref_table ) # ); push @{ $packages{$table_pkg_name}{'has_a'} }, "$table_pkg_name->has_a(\n" . " $field_name => '$ref_pkg'\n);\n\n" . "sub $fk_method {\n" . " return shift->$field_name\n}\n\n"; # if there weren't M-M relationships via the has_many # being set up here, create nice pluralized method alias # rather for user as alt. to ugly tablename_fieldname name # # if ( !$packages{$ref_pkg}{'has_many'}{$table_name} ) { # # # # ADD CALLBACK FOR PLURALIZATION MANGLING HERE # # # push @{ $packages{$ref_pkg}{'has_many'}{$table_name} }, # "sub ${table_name}s {\n " . # "return shift->$table_name\_$field_name\n}\n\n"; # # else ugly # } # else { # } push @{ $packages{$ref_pkg}{'has_many'}{$table_name} }, "$ref_pkg->has_many(\n '${table_name}_${field_name}', " . "'$table_pkg_name' => '$field_name'\n);\n\n"; } } } # # Now build up text of package. # my $base_pkg = sprintf('Class::DBI%s', $from ? "::$from" : ''); push @create, join("\n", "package $main_pkg_name;\n", $header, "use strict;", "use base '$base_pkg';\n", "$main_pkg_name->set_db('Main', '$dsn', '$db_user', '$db_pass');\n\n", ); for my $pkg_name ( sort { $packages{$a}{'order'} <=> $packages{$b}{'order'} } keys %packages ) { my $pkg = $packages{$pkg_name} or next; next unless $pkg->{'pkg_name'}; push @create, join("\n", $sep, "package " . $pkg->{'pkg_name'} . ";", "use base '" . $pkg->{'base'} . "';", "use Class::DBI::Pager;\n\n", ); if ($from) { push @create, join('', $pkg->{'pkg_name'}, "->set_up_table('", $pkg->{'table'}, "');\n\n"); } else { my $table = $schema->get_table($pkg->{'table'}); my @field_names = map { $_->name } $table->get_fields; push @create, join("\n", $pkg_name . "->table('" . $pkg->{'table'} . "');\n", $pkg_name . "->columns(All => qw/" . join(' ', @field_names) . "/);\n\n", ); } push @create, "\n"; if (my $pk = $pkg->{'pk_accessor'}) { push @create, $pk; } if (my @has_a = @{ $pkg->{'has_a'} || [] }) { push @create, $_ for @has_a; } foreach my $has_many_key (keys %{ $pkg->{'has_many'} }) { if (my @has_many = @{ $pkg->{'has_many'}{$has_many_key} || [] }) { push @create, $_ for @has_many; } } } push @create, "1;\n"; return wantarray ? @create : join('', @create); } 1; =pod =head1 NAME SQL::Translator::Producer::ClassDBI - create Class::DBI classes from schema =head1 SYNOPSIS Use this producer as you would any other from SQL::Translator. See L for details. This package uses SQL::Translator's formatting methods format_package_name(), format_pk_name(), format_fk_name(), and format_table_name() as it creates classes, one per table in the schema provided. An additional base class is also created for database connectivity configuration. See L for details on how this works. =head1 AUTHORS Allen Day Eallenday@ucla.eduE, Ying Zhang Ezyolive@yahoo.comE, Ken Youens-Clark Ekclark@cpan.orgE. SQL-Translator-1.65/lib/SQL/Translator/Producer/MySQL.pm0000644000000000000000000006461314551163724022677 0ustar00rootroot00000000000000package SQL::Translator::Producer::MySQL; =head1 NAME SQL::Translator::Producer::MySQL - MySQL-specific producer for SQL::Translator =head1 SYNOPSIS Use via SQL::Translator: use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'MySQL', '...' ); $t->translate; =head1 DESCRIPTION This module will produce text output of the schema suitable for MySQL. There are still some issues to be worked out with syntax differences between MySQL versions 3 and 4 ("SET foreign_key_checks," character sets for fields, etc.). =head1 ARGUMENTS This producer takes a single optional producer_arg C, which provides the desired version for the target database. By default MySQL v3 is assumed, and statements pertaining to any features introduced in later versions (e.g. CREATE VIEW) are not produced. Valid version specifiers for C are listed L =head2 Table Types Normally the tables will be created without any explicit table type given and so will use the MySQL default. Any tables involved in foreign key constraints automatically get a table type of InnoDB, unless this is overridden by setting the C extra attribute explicitly on the table. =head2 Extra attributes. The producer recognises the following extra attributes on the Schema objects. =over 4 =item B Set the list of allowed values for Enum fields. =item B, B, B Set the MySQL field options of the same name. =item B, B Use when producing diffs to indicate that the current table/field has been renamed from the old name as given in the attribute value. =item B Set the type of the table e.g. 'InnoDB', 'MyISAM'. This will be automatically set for tables involved in foreign key constraints if it is not already set explicitly. See L<"Table Types">. Please note that the C option is the preferred method of specifying the MySQL storage engine to use, but this method still works for backwards compatibility. =item B, B Set the tables default character set and collation order. =item B, B Set the fields character set and collation order. =back =cut use strict; use warnings; our ($DEBUG, %used_names); our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; # Maximum length for most identifiers is 64, according to: # http://dev.mysql.com/doc/refman/4.1/en/identifiers.html # http://dev.mysql.com/doc/refman/5.0/en/identifiers.html my $DEFAULT_MAX_ID_LENGTH = 64; use base qw(SQL::Translator::Producer); use Data::Dumper; use SQL::Translator::Schema::Constants; use SQL::Translator::Generator::DDL::MySQL; use SQL::Translator::Utils qw(debug header_comment truncate_id_uniquely parse_mysql_version batch_alter_table_statements normalize_quote_options ); # # Use only lowercase for the keys (e.g. "long" and not "LONG") # my %translate = ( # # Oracle types # varchar2 => 'varchar', long => 'text', clob => 'longtext', # # Sybase types # int => 'integer', money => 'float', real => 'double', comment => 'text', bit => 'tinyint', # # Access types # 'long integer' => 'integer', 'text' => 'text', 'datetime' => 'datetime', # # PostgreSQL types # bytea => 'BLOB', ); # # Column types that do not support length attribute # my @no_length_attr = qw/ date time timestamp datetime year /; sub preprocess_schema { my ($schema) = @_; # extra->{mysql_table_type} used to be the type. It belongs in options, so # move it if we find it. Return Engine type if found in extra or options # Similarly for mysql_charset and mysql_collate my $extra_to_options = sub { my ($table, $extra_name, $opt_name) = @_; my $extra = $table->extra; my $extra_type = delete $extra->{$extra_name}; # Now just to find if there is already an Engine or Type option... # and lets normalize it to ENGINE since: # # The ENGINE table option specifies the storage engine for the table. # TYPE is a synonym, but ENGINE is the preferred option name. # my $options = $table->options; # If multiple option names, normalize to the first one if (ref $opt_name) { OPT_NAME: for (@$opt_name[ 1 .. $#$opt_name ]) { for my $idx (0 .. $#{$options}) { my ($key, $value) = %{ $options->[$idx] }; if (uc $key eq $_) { $options->[$idx] = { $opt_name->[0] => $value }; last OPT_NAME; } } } $opt_name = $opt_name->[0]; } # This assumes that there isn't both a Type and an Engine option. OPTION: for my $idx (0 .. $#{$options}) { my ($key, $value) = %{ $options->[$idx] }; next unless uc $key eq $opt_name; # make sure case is right on option name delete $options->[$idx]{$key}; return $options->[$idx]{$opt_name} = $value || $extra_type; } if ($extra_type) { push @$options, { $opt_name => $extra_type }; return $extra_type; } }; # Names are only specific to a given schema local %used_names = (); # # Work out which tables need to be InnoDB to support foreign key # constraints. We do this first as we need InnoDB at both ends. # foreach my $table ($schema->get_tables) { $extra_to_options->($table, 'mysql_table_type', [ 'ENGINE', 'TYPE' ]); $extra_to_options->($table, 'mysql_charset', 'CHARACTER SET'); $extra_to_options->($table, 'mysql_collate', 'COLLATE'); foreach my $c ($table->get_constraints) { next unless $c->type eq FOREIGN_KEY; # Normalize constraint names here. my $c_name = $c->name; # Give the constraint a name if it doesn't have one, so it doesn't feel # left out $c_name = $table->name . '_fk' unless length $c_name; $c->name(next_unused_name($c_name)); for my $meth (qw/table reference_table/) { my $table = $schema->get_table($c->$meth) || next; # This normalizes the types to ENGINE and returns the value if its there next if $extra_to_options->($table, 'mysql_table_type', [ 'ENGINE', 'TYPE' ]); $table->options({ 'ENGINE' => 'InnoDB' }); } } # foreach constraints my %map = (mysql_collate => 'collate', mysql_charset => 'character set'); foreach my $f ($table->get_fields) { my $extra = $f->extra; for (keys %map) { $extra->{ $map{$_} } = delete $extra->{$_} if exists $extra->{$_}; } my @size = $f->size; if (!$size[0] && $f->data_type =~ /char$/) { $f->size((255)); } } } } { my ($quoting_generator, $nonquoting_generator); sub _generator { my $options = shift; return $options->{generator} if exists $options->{generator}; return normalize_quote_options($options) ? $quoting_generator ||= SQL::Translator::Generator::DDL::MySQL->new() : $nonquoting_generator ||= SQL::Translator::Generator::DDL::MySQL->new(quote_chars => [],); } } sub produce { my $translator = shift; local $DEBUG = $translator->debug; local %used_names; my $no_comments = $translator->no_comments; my $add_drop_table = $translator->add_drop_table; my $schema = $translator->schema; my $show_warnings = $translator->show_warnings || 0; my $producer_args = $translator->producer_args; my $mysql_version = parse_mysql_version($producer_args->{mysql_version}, 'perl') || 0; my $max_id_length = $producer_args->{mysql_max_id_length} || $DEFAULT_MAX_ID_LENGTH; my $generator = _generator({ quote_identifiers => $translator->quote_identifiers }); debug("PKG: Beginning production\n"); %used_names = (); my $create = ''; $create .= header_comment unless ($no_comments); # \todo Don't set if MySQL 3.x is set on command line my @create = "SET foreign_key_checks=0"; preprocess_schema($schema); # # Generate sql # my @table_defs = (); for my $table ($schema->get_tables) { # print $table->name, "\n"; push @table_defs, create_table( $table, { add_drop_table => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version } ); } if ($mysql_version >= 5.000001) { for my $view ($schema->get_views) { push @table_defs, create_view( $view, { add_replace_view => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version } ); } } if ($mysql_version >= 5.000002) { for my $trigger ($schema->get_triggers) { push @table_defs, create_trigger( $trigger, { add_drop_trigger => $add_drop_table, show_warnings => $show_warnings, no_comments => $no_comments, generator => $generator, max_id_length => $max_id_length, mysql_version => $mysql_version } ); } } # print "@table_defs\n"; push @table_defs, "SET foreign_key_checks=1"; return wantarray ? ($create ? $create : (), @create, @table_defs) : ($create . join('', map { $_ ? "$_;\n\n" : () } (@create, @table_defs))); } sub create_trigger { my ($trigger, $options) = @_; my $generator = _generator($options); my $trigger_name = $trigger->name; debug("PKG: Looking at trigger '${trigger_name}'\n"); my @statements; my $events = $trigger->database_events; for my $event (@$events) { my $name = $trigger_name; if (@$events > 1) { $name .= "_$event"; warn "Multiple database events supplied for trigger '${trigger_name}', ", "creating trigger '${name}' for the '${event}' event\n" if $options->{show_warnings}; } my $action = $trigger->action; if ($action !~ /^ \s* BEGIN [\s\;] .*? [\s\;] END [\s\;]* $/six) { $action .= ";" unless $action =~ /;\s*\z/; $action = "BEGIN $action END"; } push @statements, "DROP TRIGGER IF EXISTS " . $generator->quote($name) if $options->{add_drop_trigger}; push @statements, sprintf( "CREATE TRIGGER %s %s %s ON %s\n FOR EACH ROW %s", $generator->quote($name), $trigger->perform_action_when, $event, $generator->quote($trigger->on_table), $action, ); } # Tack the comment onto the first statement $statements[0] = "--\n-- Trigger " . $generator->quote($trigger_name) . "\n--\n" . $statements[0] unless $options->{no_comments}; return @statements; } sub create_view { my ($view, $options) = @_; my $generator = _generator($options); my $view_name = $view->name; my $view_name_qt = $generator->quote($view_name); debug("PKG: Looking at view '${view_name}'\n"); # Header. Should this look like what mysqldump produces? my $create = ''; $create .= "--\n-- View: $view_name_qt\n--\n" unless $options->{no_comments}; $create .= 'CREATE'; $create .= ' OR REPLACE' if $options->{add_replace_view}; $create .= "\n"; my $extra = $view->extra; # ALGORITHM if (exists($extra->{mysql_algorithm}) && defined(my $algorithm = $extra->{mysql_algorithm})) { $create .= " ALGORITHM = ${algorithm}\n" if $algorithm =~ /(?:UNDEFINED|MERGE|TEMPTABLE)/i; } # DEFINER if (exists($extra->{mysql_definer}) && defined(my $user = $extra->{mysql_definer})) { $create .= " DEFINER = ${user}\n"; } # SECURITY if (exists($extra->{mysql_security}) && defined(my $security = $extra->{mysql_security})) { $create .= " SQL SECURITY ${security}\n" if $security =~ /(?:DEFINER|INVOKER)/i; } #Header, cont. $create .= " VIEW $view_name_qt"; if (my @fields = $view->fields) { my $list = join ', ', map { $generator->quote($_) } @fields; $create .= " ( ${list} )"; } if (my $sql = $view->sql) { # do not wrap parenthesis around the selector, mysql doesn't like this # http://bugs.mysql.com/bug.php?id=9198 $create .= " AS\n ${sql}\n"; } # $create .= ""; return $create; } sub create_table { my ($table, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($table->name); debug("PKG: Looking at table '$table_name'\n"); # # Header. Should this look like what mysqldump produces? # my $create = ''; my $drop; $create .= "--\n-- Table: $table_name\n--\n" unless $options->{no_comments}; $drop = qq[DROP TABLE IF EXISTS $table_name] if $options->{add_drop_table}; $create .= "CREATE TABLE $table_name (\n"; # # Fields # my @field_defs; for my $field ($table->get_fields) { push @field_defs, create_field($field, $options); } # # Indices # my @index_defs; my %indexed_fields; for my $index ($table->get_indices) { push @index_defs, create_index($index, $options); $indexed_fields{$_} = 1 for $index->fields; } # # Constraints -- need to handle more than just FK. -ky # my @constraint_defs; my @constraints = $table->get_constraints; for my $c (@constraints) { my $constr = create_constraint($c, $options); push @constraint_defs, $constr if ($constr); unless ($indexed_fields{ ($c->fields())[0] } || $c->type ne FOREIGN_KEY) { push @index_defs, "INDEX (" . $generator->quote(($c->fields())[0]) . ")"; $indexed_fields{ ($c->fields())[0] } = 1; } } $create .= join(",\n", map {" $_"} @field_defs, @index_defs, @constraint_defs); # # Footer # $create .= "\n)"; $create .= generate_table_options($table, $options) || ''; # $create .= ";\n\n"; return $drop ? ($drop, $create) : $create; } sub generate_table_options { my ($table, $options) = @_; my $create; my $table_type_defined = 0; my $generator = _generator($options); my $charset = $table->extra('mysql_charset'); my $collate = $table->extra('mysql_collate'); my $union = undef; for my $t1_option_ref ($table->options) { my ($key, $value) = %{$t1_option_ref}; $table_type_defined = 1 if uc $key eq 'ENGINE' or uc $key eq 'TYPE'; if (uc $key eq 'CHARACTER SET') { $charset = $value; next; } elsif (uc $key eq 'COLLATE') { $collate = $value; next; } elsif (uc $key eq 'UNION') { $union = '(' . join(', ', map { $generator->quote($_) } @$value) . ')'; next; } $create .= " $key=$value"; } my $mysql_table_type = $table->extra('mysql_table_type'); $create .= " ENGINE=$mysql_table_type" if $mysql_table_type && !$table_type_defined; my $comments = $table->comments; $create .= " DEFAULT CHARACTER SET $charset" if $charset; $create .= " COLLATE $collate" if $collate; $create .= " UNION=$union" if $union; $create .= qq[ comment='$comments'] if $comments; return $create; } sub create_field { my ($field, $options) = @_; my $generator = _generator($options); my $field_name = $field->name; debug("PKG: Looking at field '$field_name'\n"); my $field_def = $generator->quote($field_name); # data type and size my $data_type = $field->data_type; my @size = $field->size; my %extra = $field->extra; my $list = $extra{'list'} || []; my $commalist = join(', ', map { __PACKAGE__->_quote_string($_) } @$list); my $charset = $extra{'mysql_charset'}; my $collate = $extra{'mysql_collate'}; my $mysql_version = $options->{mysql_version} || 0; # # Oracle "number" type -- figure best MySQL type # if (lc $data_type eq 'number') { # not an integer if (scalar @size > 1) { $data_type = 'double'; } elsif ($size[0] && $size[0] >= 12) { $data_type = 'bigint'; } elsif ($size[0] && $size[0] <= 1) { $data_type = 'tinyint'; } else { $data_type = 'int'; } } # # Convert a large Oracle varchar to "text" # (not necessary as of 5.0.3 http://dev.mysql.com/doc/refman/5.0/en/char.html) # elsif ($data_type =~ /char/i && $size[0] > 255) { unless ($size[0] <= 65535 && $mysql_version >= 5.000003) { $data_type = 'text'; @size = (); } } elsif ($data_type =~ /boolean/i) { if ($mysql_version >= 4) { $data_type = 'boolean'; } else { $data_type = 'enum'; $commalist = "'0','1'"; } } elsif (exists $translate{ lc $data_type }) { $data_type = $translate{ lc $data_type }; } @size = () if $data_type =~ /(text|blob)/i; if ($data_type =~ /(double|float)/ && scalar @size == 1) { push @size, '0'; } $field_def .= " $data_type"; if (lc($data_type) eq 'enum' || lc($data_type) eq 'set') { $field_def .= '(' . $commalist . ')'; } elsif (defined $size[0] && $size[0] > 0 && !grep lc($data_type) eq $_, @no_length_attr) { $field_def .= '(' . join(', ', @size) . ')'; } # char sets $field_def .= " CHARACTER SET $charset" if $charset; $field_def .= " COLLATE $collate" if $collate; # MySQL qualifiers for my $qual (qw[ binary unsigned zerofill ]) { my $val = $extra{$qual} || $extra{ uc $qual } or next; $field_def .= " $qual"; } for my $qual ('character set', 'collate', 'on update') { my $val = $extra{$qual} || $extra{ uc $qual } or next; if (ref $val) { $field_def .= " $qual ${$val}"; } else { $field_def .= " $qual $val"; } } # Null? if ($field->is_nullable) { $field_def .= ' NULL'; } else { $field_def .= ' NOT NULL'; } # Default? __PACKAGE__->_apply_default_value( $field, \$field_def, [ 'NULL' => \'NULL', ], ); if (my $comments = $field->comments) { $comments = __PACKAGE__->_quote_string($comments); $field_def .= qq[ comment $comments]; } # auto_increment? $field_def .= " auto_increment" if $field->is_auto_increment; return $field_def; } sub _quote_string { my ($self, $string) = @_; $string =~ s/([\\'])/$1$1/g; return qq{'$string'}; } sub alter_create_index { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join(' ', 'ALTER TABLE', $table_name, 'ADD', create_index(@_)); } sub create_index { my ($index, $options) = @_; my $generator = _generator($options); my @fields; for my $field ($index->fields) { my $name = $generator->quote($field->name); if (my $len = $field->extra->{prefix_length}) { $name .= "($len)"; } push @fields, $name; } return join(' ', map { $_ || () } lc $index->type eq 'normal' ? 'INDEX' : $index->type . ' INDEX', $index->name ? $generator->quote(truncate_id_uniquely($index->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH)) : '', '(' . join(', ', @fields) . ')'); } sub alter_drop_index { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join(' ', 'ALTER TABLE', $table_name, 'DROP', 'INDEX', $index->name || $index->fields); } sub alter_drop_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($c->table->name); my @out = ('ALTER', 'TABLE', $table_name, 'DROP'); if ($c->type eq PRIMARY_KEY) { push @out, $c->type; } else { push @out, ($c->type eq FOREIGN_KEY ? $c->type : "CONSTRAINT"), $generator->quote($c->name); } return join(' ', @out); } sub alter_create_constraint { my ($index, $options) = @_; my $table_name = _generator($options)->quote($index->table->name); return join(' ', 'ALTER TABLE', $table_name, 'ADD', create_constraint(@_)); } sub create_constraint { my ($c, $options) = @_; my $generator = _generator($options); my $leave_name = $options->{leave_name} || undef; my $reference_table_name = $generator->quote($c->reference_table); my @fields = $c->fields; if ($c->type eq PRIMARY_KEY) { return unless @fields; return 'PRIMARY KEY (' . join(", ", map { $generator->quote($_) } @fields) . ')'; } elsif ($c->type eq UNIQUE) { return unless @fields; return sprintf 'UNIQUE %s(%s)', ( (defined $c->name && $c->name) ? $generator->quote(truncate_id_uniquely($c->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH),) . ' ' : '' ), (join ', ', map { $generator->quote($_) } @fields), ; } elsif ($c->type eq FOREIGN_KEY) { return unless @fields; # # Make sure FK field is indexed or MySQL complains. # my $table = $c->table; my $c_name = truncate_id_uniquely($c->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH); my $def = join(' ', 'CONSTRAINT', ($c_name ? $generator->quote($c_name) : ()), 'FOREIGN KEY'); $def .= ' (' . join(', ', map { $generator->quote($_) } @fields) . ')'; $def .= ' REFERENCES ' . $reference_table_name; my @rfields = map { $_ || () } $c->reference_fields; unless (@rfields) { my $rtable_name = $c->reference_table; if (my $ref_table = $table->schema->get_table($rtable_name)) { push @rfields, $ref_table->primary_key; } else { warn "Can't find reference table '$rtable_name' " . "in schema\n" if $options->{show_warnings}; } } if (@rfields) { $def .= ' (' . join(', ', map { $generator->quote($_) } @rfields) . ')'; } else { warn "FK constraint on " . $table->name . '.' . join('', @fields) . " has no reference fields\n" if $options->{show_warnings}; } if ($c->match_type) { $def .= ' MATCH ' . ($c->match_type =~ /full/i) ? 'FULL' : 'PARTIAL'; } if ($c->on_delete) { $def .= ' ON DELETE ' . $c->on_delete; } if ($c->on_update) { $def .= ' ON UPDATE ' . $c->on_update; } return $def; } elsif ($c->type eq CHECK_C) { my $table = $c->table; my $c_name = truncate_id_uniquely($c->name, $options->{max_id_length} || $DEFAULT_MAX_ID_LENGTH); my $def = join(' ', 'CONSTRAINT', ($c_name ? $generator->quote($c_name) : ()), 'CHECK'); $def .= ' (' . $c->expression . ')'; return $def; } return undef; } sub alter_table { my ($to_table, $options) = @_; my $table_options = generate_table_options($to_table, $options) || ''; my $table_name = _generator($options)->quote($to_table->name); my $out = sprintf('ALTER TABLE %s%s', $table_name, $table_options); return $out; } sub rename_field { alter_field(@_) } sub alter_field { my ($from_field, $to_field, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($to_field->table->name); my $out = sprintf( 'ALTER TABLE %s CHANGE COLUMN %s %s', $table_name, $generator->quote($from_field->name), create_field($to_field, $options) ); return $out; } sub add_field { my ($new_field, $options) = @_; my $table_name = _generator($options)->quote($new_field->table->name); my $out = sprintf('ALTER TABLE %s ADD COLUMN %s', $table_name, create_field($new_field, $options)); return $out; } sub drop_field { my ($old_field, $options) = @_; my $generator = _generator($options); my $table_name = $generator->quote($old_field->table->name); my $out = sprintf('ALTER TABLE %s DROP COLUMN %s', $table_name, $generator->quote($old_field->name)); return $out; } sub batch_alter_table { my ($table, $diff_hash, $options) = @_; # InnoDB has an issue with dropping and re-adding a FK constraint under the # name in a single alter statement, see: http://bugs.mysql.com/bug.php?id=13741 # # We have to work round this. my %fks_to_alter; my %fks_to_drop = map { $_->type eq FOREIGN_KEY ? ($_->name => $_) : () } @{ $diff_hash->{alter_drop_constraint} }; my %fks_to_create = map { if ($_->type eq FOREIGN_KEY) { $fks_to_alter{ $_->name } = $fks_to_drop{ $_->name } if $fks_to_drop{ $_->name }; ($_->name => $_); } else { () } } @{ $diff_hash->{alter_create_constraint} }; my @drop_stmt; if (scalar keys %fks_to_alter) { $diff_hash->{alter_drop_constraint} = [ grep { !$fks_to_alter{ $_->name } } @{ $diff_hash->{alter_drop_constraint} } ]; @drop_stmt = batch_alter_table($table, { alter_drop_constraint => [ values %fks_to_alter ] }, $options); } my @stmts = batch_alter_table_statements($diff_hash, $options); #quote my $generator = _generator($options); # rename_table makes things a bit more complex my $renamed_from = ""; $renamed_from = $generator->quote($diff_hash->{rename_table}[0][0]->name) if $diff_hash->{rename_table} && @{ $diff_hash->{rename_table} }; return unless @stmts; # Just zero or one stmts. return now return (@drop_stmt, @stmts) unless @stmts > 1; # Now strip off the 'ALTER TABLE xyz' of all but the first one my $table_name = $generator->quote($table->name); my $re = $renamed_from ? qr/^ALTER TABLE (?:\Q$table_name\E|\Q$renamed_from\E) / : qr/^ALTER TABLE \Q$table_name\E /; my $first = shift @stmts; my ($alter_table) = $first =~ /($re)/; my $padd = " " x length($alter_table); return @drop_stmt, join(",\n", $first, map { s/$re//; $padd . $_ } @stmts); } sub drop_table { my ($table, $options) = @_; return ( # Drop (foreign key) constraints so table drops cleanly batch_alter_table( $table, { alter_drop_constraint => [ grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints ] }, $options ), 'DROP TABLE ' . _generator($options)->quote($table), ); } sub rename_table { my ($old_table, $new_table, $options) = @_; my $generator = _generator($options); my $old_table_name = $generator->quote($old_table); my $new_table_name = $generator->quote($new_table); return "ALTER TABLE $old_table_name RENAME TO $new_table_name"; } sub next_unused_name { my $name = shift || ''; if (!defined($used_names{$name})) { $used_names{$name} = $name; return $name; } my $i = 1; while (defined($used_names{ $name . '_' . $i })) { ++$i; } $name .= '_' . $i; $used_names{$name} = $name; return $name; } 1; =pod =head1 SEE ALSO SQL::Translator, http://www.mysql.com/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/Latex.pm0000644000000000000000000000350614551163724023001 0ustar00rootroot00000000000000package SQL::Translator::Producer::Latex; =pod =head1 NAME SQL::Translator::Producer::Latex - Produces latex formatted tables ready for import from schema. =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'Latex', ); print $translator->translate; =head1 DESCRIPTION Currently you will get one class (with the a table stereotype) generated per table in the schema. The fields are added as attributes of the classes and their datatypes set. It doesn't currently set any of the relationships. It doesn't do any layout, all the classes are in one big stack. However it is still useful as you can use the layout tools in Dia to automatically arrange them horizontally or vertically. =head2 Producer Args =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.65'; use SQL::Translator::Utils 'debug'; sub produce { my $translator = shift; my $schema = $translator->schema; my $o = ''; for my $table ($schema->get_tables) { my $table_name = $table->name or next; my $n = latex($table_name); $o .= sprintf ' \subsubsection{%s} %s \begin{table}[htb] \caption{%s} \label{tab:%s} \center { \small \begin{tabular}{l l p{8cm}} Column & Datatype & Description \\\\ \hline ', $n, latex($table->comments), $n, $table_name; foreach my $f ($table->get_fields) { $o .= sprintf '%s & %s & %s \\\\', map { latex($_) } ($f->name, $f->data_type, $f->comments || ''); $o .= "\n"; } $o .= sprintf ' \end{tabular} } \end{table} \clearpage '; } return $o; } sub latex { my $s = shift; return '' unless defined $s; $s =~ s/([\&\_\$\{\#])/\\$1/g; return $s; } 1; =pod =head1 AUTHOR Chris Mungall =head1 SEE ALSO SQL::Translator. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/DiaUml.pm0000644000000000000000000000277414551163724023105 0ustar00rootroot00000000000000package SQL::Translator::Producer::DiaUml; =pod =head1 NAME SQL::Translator::Producer::DiaUml - Produces dia UML diagrams from schema. =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new( from => 'MySQL', filename => 'foo_schema.sql', to => 'DiaUml', ); print $translator->translate; =head1 DESCRIPTION Currently you will get one class (with the a table stereotype) generated per table in the schema. The fields are added as attributes of the classes and their datatypes set. It doesn't currently set any of the relationships. It doesn't do any layout, all the classes are in one big stack. However it is still useful as you can use the layout tools in Dia to automatically arrange them horizontally or vertically. =head2 Producer Args =cut use strict; use warnings; our ($DEBUG, @EXPORT_OK); our $VERSION = '1.65'; $DEBUG = 0 unless defined $DEBUG; use File::ShareDir qw/dist_dir/; use SQL::Translator::Utils 'debug'; use base qw/SQL::Translator::Producer::TT::Base/; # Convert produce call into a method call on our class sub produce { return __PACKAGE__->new(translator => shift)->run; } sub tt_config { (INCLUDE_PATH => File::Spec->catdir(dist_dir('SQL-Translator'), 'DiaUml')); } sub tt_schema {'schema.tt2'} 1; =pod =head1 AUTHOR Mark Addison Egrommit@users.sourceforge.netE. =head1 TODO * Add the foreign keys from the schema as UML relations. * Layout the classes. =head1 SEE ALSO SQL::Translator. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/POD.pm0000644000000000000000000000703114551163724022343 0ustar00rootroot00000000000000package SQL::Translator::Producer::POD; =head1 NAME SQL::Translator::Producer::POD - POD producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'POD', '...' ); print $t->translate; =head1 DESCRIPTION Creates a POD description of each table, field, index, and constraint. A good starting point for text documentation of a schema. You can easily convert the output to HTML or text using "perldoc" or other interesting formats using Pod::POM or Template::Toolkit's POD plugin. =cut use strict; use warnings; our $VERSION = '1.65'; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(header_comment); sub produce { my $t = shift; my $schema = $t->schema; my $schema_name = $schema->name || 'Schema'; my $args = $t->producer_args; my $title = $args->{'title'} || $schema_name; my $pod = "=pod\n\n=head1 DESCRIPTION\n\n$title\n\n=head1 TABLES\n\n"; for my $table ($schema->get_tables) { my $table_name = $table->name or next; my @fields = $table->get_fields or next; $pod .= "=head2 $table_name\n\n=head3 FIELDS\n\n"; # # Fields # for my $field (@fields) { $pod .= "=head4 " . $field->name . "\n\n=over 4\n\n"; my $data_type = $field->data_type; my $size = $field->size; $data_type .= "($size)" if $size; $pod .= "=item * $data_type\n\n"; $pod .= "=item * PRIMARY KEY\n\n" if $field->is_primary_key; my $default = $field->default_value; $pod .= "=item * Default '$default' \n\n" if defined $default; $pod .= sprintf("=item * Nullable '%s' \n\n", $field->is_nullable ? 'Yes' : 'No'); $pod .= "=back\n\n"; } # # Indices # if (my @indices = $table->get_indices) { $pod .= "=head3 INDICES\n\n"; for my $index (@indices) { $pod .= "=head4 " . $index->type . "\n\n=over 4\n\n"; $pod .= "=item * Fields = " . join(', ', $index->fields) . "\n\n"; $pod .= "=back\n\n"; } } # # Constraints # if (my @constraints = $table->get_constraints) { $pod .= "=head3 CONSTRAINTS\n\n"; for my $c (@constraints) { $pod .= "=head4 " . $c->type . "\n\n=over 4\n\n"; if ($c->type eq CHECK_C) { $pod .= "=item * Expression = " . $c->expression . "\n\n"; } else { $pod .= "=item * Fields = " . join(', ', $c->fields) . "\n\n"; if ($c->type eq FOREIGN_KEY) { $pod .= "=item * Reference Table = Lreference_table . ">\n\n"; $pod .= "=item * Reference Fields = " . join(', ', map {"L"} $c->reference_fields) . "\n\n"; } if (my $update = $c->on_update) { $pod .= "=item * On update = $update\n\n"; } if (my $delete = $c->on_delete) { $pod .= "=item * On delete = $delete\n\n"; } } $pod .= "=back\n\n"; } } } my $header = (map { $_ || () } split(/\n/, header_comment('', '')))[0]; $header =~ s/^Created by //; $pod .= "=head1 PRODUCED BY\n\n$header\n\n=cut"; return $pod; } 1; # ------------------------------------------------------------------- # Expect poison from the standing water. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =head2 CONTRIBUTORS Jonathan Yu Efrequency@cpan.orgE =head1 SEE ALSO perldoc, perlpod, Pod::POM, Template::Manual::Plugins. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/JSON.pm0000644000000000000000000001325214551163724022474 0ustar00rootroot00000000000000package SQL::Translator::Producer::JSON; =head1 NAME SQL::Translator::Producer::JSON - A JSON producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new(producer => 'JSON'); =head1 DESCRIPTION This module serializes a schema to a JSON string. =cut use strict; use warnings; our $VERSION = '1.65'; use JSON::MaybeXS 'to_json'; sub produce { my $translator = shift; my $schema = $translator->schema; return to_json( { schema => { tables => { map { ($_->name => view_table($_)) } $schema->get_tables, }, views => { map { ($_->name => view_view($_)) } $schema->get_views, }, triggers => { map { ($_->name => view_trigger($_)) } $schema->get_triggers, }, procedures => { map { ($_->name => view_procedure($_)) } $schema->get_procedures, }, }, translator => { add_drop_table => $translator->add_drop_table, filename => $translator->filename, no_comments => $translator->no_comments, parser_args => $translator->parser_args, producer_args => $translator->producer_args, parser_type => $translator->parser_type, producer_type => $translator->producer_type, show_warnings => $translator->show_warnings, trace => $translator->trace, version => $translator->version, }, keys %{ $schema->extra } ? ('extra' => { $schema->extra }) : (), }, { allow_blessed => 1, allow_unknown => 1, ( map { $_ => $translator->producer_args->{$_} } grep { defined $translator->producer_args->{$_} } qw[ pretty indent canonical ] ), } ); } sub view_table { my $table = shift; return { 'name' => $table->name, 'order' => $table->order, 'options' => $table->options || [], $table->comments ? ('comments' => [ $table->comments ]) : (), 'constraints' => [ map { view_constraint($_) } $table->get_constraints ], 'indices' => [ map { view_index($_) } $table->get_indices ], 'fields' => { map { ($_->name => view_field($_)) } $table->get_fields }, keys %{ $table->extra } ? ('extra' => { $table->extra }) : (), }; } sub view_constraint { my $constraint = shift; return { 'deferrable' => scalar $constraint->deferrable, 'expression' => scalar $constraint->expression, 'fields' => [ map { ref $_ ? $_->name : $_ } $constraint->field_names ], 'match_type' => scalar $constraint->match_type, 'name' => scalar $constraint->name, 'options' => scalar $constraint->options, 'on_delete' => scalar $constraint->on_delete, 'on_update' => scalar $constraint->on_update, 'reference_fields' => [ map { ref $_ ? $_->name : $_ } $constraint->reference_fields ], 'reference_table' => scalar $constraint->reference_table, 'type' => scalar $constraint->type, keys %{ $constraint->extra } ? ('extra' => { $constraint->extra }) : (), }; } sub view_field { my $field = shift; return { 'order' => scalar $field->order, 'name' => scalar $field->name, 'data_type' => scalar $field->data_type, 'size' => [ $field->size ], 'default_value' => scalar $field->default_value, 'is_nullable' => scalar $field->is_nullable, 'is_primary_key' => scalar $field->is_primary_key, 'is_unique' => scalar $field->is_unique, $field->is_auto_increment ? ('is_auto_increment' => 1) : (), $field->comments ? ('comments' => [ $field->comments ]) : (), keys %{ $field->extra } ? ('extra' => { $field->extra }) : (), }; } sub view_procedure { my $procedure = shift; return { 'order' => scalar $procedure->order, 'name' => scalar $procedure->name, 'sql' => scalar $procedure->sql, 'parameters' => scalar $procedure->parameters, 'owner' => scalar $procedure->owner, $procedure->comments ? ('comments' => [ $procedure->comments ]) : (), keys %{ $procedure->extra } ? ('extra' => { $procedure->extra }) : (), }; } sub view_trigger { my $trigger = shift; return { 'order' => scalar $trigger->order, 'name' => scalar $trigger->name, 'perform_action_when' => scalar $trigger->perform_action_when, 'database_events' => scalar $trigger->database_events, 'fields' => scalar $trigger->fields, 'on_table' => scalar $trigger->on_table, 'action' => scalar $trigger->action, ( defined $trigger->scope ? ('scope' => scalar $trigger->scope,) : () ), keys %{ $trigger->extra } ? ('extra' => { $trigger->extra }) : (), }; } sub view_view { my $view = shift; return { 'order' => scalar $view->order, 'name' => scalar $view->name, 'sql' => scalar $view->sql, 'fields' => scalar $view->fields, keys %{ $view->extra } ? ('extra' => { $view->extra }) : (), }; } sub view_index { my $index = shift; return { 'name' => scalar $index->name, 'type' => scalar $index->type, 'fields' => [ map { ref($_) && $_->extra && keys %{ $_->extra } ? { name => $_->name, %{ $_->extra } } : "$_" } $index->fields ], 'options' => scalar $index->options, keys %{ $index->extra } ? ('extra' => { $index->extra }) : (), }; } 1; =head1 SEE ALSO SQL::Translator, JSON::MaybeXS, http://www.json.org/. =head1 AUTHORS darren chamberlain Edarren@cpan.orgE, Ken Youens-Clark Ekclark@cpan.orgE. Jon Jensen Ejonj@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/Storable.pm0000644000000000000000000000205414551163724023474 0ustar00rootroot00000000000000package SQL::Translator::Producer::Storable; =head1 NAME SQL::Translator::Producer::Storable - serializes the SQL::Translator::Schema object via the Storable module =head1 SYNOPSIS use SQL::Translator; my $translator = SQL::Translator->new; $translator->producer('Storable'); =head1 DESCRIPTION This module uses Storable to serialize a schema to a string so that it can be saved to disk. Serializing a schema and then calling producers on the stored can realize significant performance gains when parsing takes a long time. =cut use strict; use warnings; our ($DEBUG, @EXPORT_OK); $DEBUG = 0 unless defined $DEBUG; our $VERSION = '1.65'; use Storable; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $serialized = Storable::nfreeze($schema); return $serialized; } 1; =pod =head1 AUTHOR Paul Harrington Eharringp@deshaw.comE. =head1 SEE ALSO SQL::Translator, SQL::Translator::Schema, Storable. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/SQLServer.pm0000644000000000000000000000435014551163724023550 0ustar00rootroot00000000000000package SQL::Translator::Producer::SQLServer; use strict; use warnings; our ($DEBUG, $WARN); our $VERSION = '1.65'; $DEBUG = 1 unless defined $DEBUG; use SQL::Translator::Schema::Constants; use SQL::Translator::Utils qw(debug header_comment); use SQL::Translator::Generator::DDL::SQLServer; sub produce { my $translator = shift; SQL::Translator::Generator::DDL::SQLServer->new( add_comments => !$translator->no_comments, add_drop_table => $translator->add_drop_table, )->schema($translator->schema); } 1; =head1 NAME SQL::Translator::Producer::SQLServer - MS SQLServer producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator; my $t = SQL::Translator->new( parser => '...', producer => 'SQLServer' ); $t->translate; =head1 DESCRIPTION This is currently a thin wrapper around the nextgen L DDL maker. =head1 Extra Attributes =over 4 =item field.list List of values for an enum field. =back =head1 TODO * !! Write some tests !! * Reserved words list needs updating to SQLServer. * Triggers, Procedures and Views DO NOT WORK # Text of view is already a 'create view' statement so no need to # be fancy foreach ( $schema->get_views ) { my $name = $_->name(); $output .= "\n\n"; $output .= "--\n-- View: $name\n--\n\n" unless $no_comments; my $text = $_->sql(); $text =~ s/\r//g; $output .= "$text\nGO\n"; } # Text of procedure already has the 'create procedure' stuff # so there is no need to do anything fancy. However, we should # think about doing fancy stuff with granting permissions and # so on. foreach ( $schema->get_procedures ) { my $name = $_->name(); $output .= "\n\n"; $output .= "--\n-- Procedure: $name\n--\n\n" unless $no_comments; my $text = $_->sql(); $text =~ s/\r//g; $output .= "$text\nGO\n"; } =head1 SEE ALSO L =head1 AUTHORS See the included AUTHORS file: L =head1 COPYRIGHT Copyright (c) 2012 the SQL::Translator L as listed above. =head1 LICENSE This code is free software and may be distributed under the same terms as Perl itself. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/Dumper.pm0000644000000000000000000001664014551163724023163 0ustar00rootroot00000000000000package SQL::Translator::Producer::Dumper; =head1 NAME SQL::Translator::Producer::Dumper - SQL Dumper producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator::Producer::Dumper; Options: db_user Database username db_password Database password dsn DSN for DBI mysql_loadfile Create MySQL's LOAD FILE syntax instead of INSERTs skip=t1[,t2] Skip tables in comma-separated list skiplike=regex Skip tables in comma-separated list =head1 DESCRIPTION This producer creates a Perl script that can connect to a database and dump the data as INSERT statements (a la mysqldump) or as a file suitable for MySQL's LOAD DATA command. If you enable "add-truncate" or specify tables to "skip" (also using the "skiplike" regular expression) then the generated dumper script will leave out those tables. However, these will also be options in the generated dumper, so you can wait to specify these options when you dump your database. The database username, password, and DSN can be hardcoded into the generated script, or part of the DSN can be intuited from the "database" argument. =cut use strict; use warnings; use Config; use SQL::Translator; use File::Temp 'tempfile'; use Template; use Data::Dumper; our $VERSION = '1.65'; sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $add_truncate = $args->{'add_truncate'} || 0; my $skip = $args->{'skip'} || ''; my $skiplike = $args->{'skiplike'} || ''; my $db_user = $args->{'db_user'} || 'db_user'; my $db_pass = $args->{'db_password'} || 'db_pass'; my $parser_name = $t->parser_type; my %skip = map { $_, 1 } map { s/^\s+|\s+$//; $_ } split(/,/, $skip); my $sqlt_version = $t->version; if ($parser_name =~ /Parser::(\w+)$/) { $parser_name = $1; } my %type_to_dbd = ( MySQL => 'mysql', Oracle => 'Oracle', PostgreSQL => 'Pg', SQLite => 'SQLite', Sybase => 'Sybase', ); my $dbd = $type_to_dbd{$parser_name} || 'DBD'; my $dsn = $args->{'dsn'} || "dbi:$dbd:"; if ($dbd eq 'Pg' && !$args->{'dsn'}) { $dsn .= 'dbname=dbname;host=hostname'; } elsif ($dbd eq 'Oracle' && !$args->{'dsn'}) { $db_user = "$db_user/$db_pass@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)" . "(HOST=hostname)(PORT=1521))(CONNECT_DATA=(SID=sid)))"; $db_pass = ''; } elsif ($dbd eq 'mysql' && !$args->{'dsn'}) { $dsn .= 'dbname'; } my $template = Template->new; my $template_text = template(); my $out; $template->process( \$template_text, { translator => $t, schema => $schema, db_user => $db_user, db_pass => $db_pass, dsn => $dsn, perl => $Config{'startperl'}, skip => \%skip, skiplike => $skiplike, }, \$out ) or die $template->error; return $out; } sub template { # # Returns the template to be processed by Template Toolkit # return <<'EOF'; [% perl || '#!/usr/bin/perl' %] [% USE date %] # # Generated by SQL::Translator [% translator.version %] # [% date.format( date.now, "%Y-%m-%d" ) %] # For more info, see http://sqlfairy.sourceforge.net/ # use strict; use Cwd; use DBI; use Getopt::Long; use File::Spec::Functions 'catfile'; my ( $help, $add_truncate, $skip, $skiplike, $no_comments, $takelike, $mysql_loadfile ); GetOptions( 'add-truncate' => \$add_truncate, 'h|help' => \$help, 'no-comments' => \$no_comments, 'mysql-loadfile' => \$mysql_loadfile, 'skip:s' => \$skip, 'skiplike:s' => \$skiplike, 'takelike:s' => \$takelike, ); if ( $help ) { print <<"USAGE"; Usage: $0 [options] > dump.sql Options: -h|--help Show help and exit --add-truncate Add "TRUNCATE TABLE" statements --mysql-loadfile Create MySQL's LOAD FILE syntax, not INSERTs --no-comments Suppress comments --skip=t1[,t2] Comma-separated list of tables to skip --skiplike=regex Regular expression of table names to skip --takelike=regex Regular expression of table names to take USAGE exit(0); } $no_comments = 1 if $mysql_loadfile; [%- SET table_defs = []; SET max_field = 0; FOREACH table IN schema.get_tables; SET table_name = table.name; NEXT IF skip.$table_name; NEXT IF skiplike AND table_name.match("(?:$skiplike)"); SET field_names = []; SET types = {}; FOR field IN table.get_fields; field_name = field.name; fname_len = field.name.length; max_field = fname_len > max_field ? fname_len : max_field; types.$field_name = field.data_type.match( '(char|str|long|text|enum|date)' ) ? 'string' : 'number'; field_names.push( field_name ); END; table_defs.push({ name => table_name, types => types, fields => field_names, }); END -%] my $db = DBI->connect( '[% dsn %]', '[% db_user %]', '[% db_pass %]', { RaiseError => 1 } ); my %skip = map { $_, 1 } map { s/^\s+|\s+$//; $_ } split (/,/, $skip); my @tables = ( [%- FOREACH t IN table_defs %] { table_name => '[% t.name %]', fields => [ qw/ [% t.fields.join(' ') %] / ], types => { [%- FOREACH fname IN t.types.keys %] '[% fname %]' => '[% t.types.$fname %]', [%- END %] }, }, [%- END %] ); for my $table ( @tables ) { my $table_name = $table->{'table_name'}; next if $skip{ $table_name }; next if $skiplike && $table_name =~ qr/$skiplike/; next if $takelike && $table_name !~ qr/$takelike/; my ( $out_fh, $outfile ); if ( $mysql_loadfile ) { $outfile = catfile( cwd(), "$table_name.txt" ); open $out_fh, ">$outfile" or die "Can't write LOAD FILE to '$table_name': $!\n"; } print "--\n-- Data for table '$table_name'\n--\n" unless $no_comments; if ( $add_truncate ) { print "TRUNCATE TABLE $table_name;\n"; } my $sql = 'select ' . join(', ', @{ $table->{'fields'} } ) . " from $table_name" ; my $sth = $db->prepare( $sql ); $sth->execute; while ( my $rec = $sth->fetchrow_hashref ) { my @vals; for my $fld ( @{ $table->{'fields'} } ) { my $val = $rec->{ $fld }; if ( defined $val ) { if ( $table->{'types'}{ $fld } eq 'string' ) { $val =~ s/'/\\'/g; $val = qq['$val'] } } else { $val = $mysql_loadfile ? '\N' : 'NULL'; } push @vals, $val; } if ( $mysql_loadfile ) { print $out_fh join("\t", @vals), "\n"; } else { print "INSERT INTO $table_name (". join(', ', @{ $table->{'fields'} }) . ') VALUES (', join(', ', @vals), ");\n"; } } if ( $out_fh ) { print "LOAD DATA INFILE '$outfile' INTO TABLE $table_name ", "FIELDS OPTIONALLY ENCLOSED BY '\\'';\n"; close $out_fh or die "Can't close filehandle: $!\n"; } else { print "\n"; } } EOF } 1; # ------------------------------------------------------------------- # To create a little flower is the labour of ages. # William Blake # ------------------------------------------------------------------- =pod =head1 AUTHOR Ken Youens-Clark Ekclark@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/HTML.pm0000644000000000000000000002142414551163724022467 0ustar00rootroot00000000000000package SQL::Translator::Producer::HTML; use strict; use warnings; use Data::Dumper; our $VERSION = '1.65'; our $NAME = __PACKAGE__; our $NOWRAP = 0 unless defined $NOWRAP; our $NOLINKTABLE = 0 unless defined $NOLINKTABLE; # Emit XHTML by default $CGI::XHTML = $CGI::XHTML = 42; use SQL::Translator::Schema::Constants; # ------------------------------------------------------------------- # Main entry point. Returns a string containing HTML. # ------------------------------------------------------------------- sub produce { my $t = shift; my $args = $t->producer_args; my $schema = $t->schema; my $schema_name = $schema->name || 'Schema'; my $title = $args->{'title'} || "Description of $schema_name"; my $wrap = !( defined $args->{'nowrap'} ? $args->{'nowrap'} : $NOWRAP ); my $linktable = !( defined $args->{'nolinktable'} ? $args->{'nolinktable'} : $NOLINKTABLE ); my %stylesheet = defined $args->{'stylesheet'} ? (-style => { src => $args->{'stylesheet'} }) : (); my @html; my $q = defined $args->{'pretty'} ? do { require CGI::Pretty; import CGI::Pretty; CGI::Pretty->new; } : do { require CGI; import CGI; CGI->new; }; my ($table, @table_names); if ($wrap) { push @html, $q->start_html({ -title => $title, %stylesheet, -meta => { generator => $NAME }, }), $q->h1({ -class => 'SchemaDescription' }, $title), $q->hr; } @table_names = grep { length $_->name } $schema->get_tables; if ($linktable) { # Generate top menu, with links to full table information my $count = scalar(@table_names); $count = sprintf "%d table%s", $count, $count == 1 ? '' : 's'; # Leading table of links push @html, $q->comment("Table listing ($count)"), $q->a({ -name => 'top' }), $q->start_table({ -width => '100%', -class => 'LinkTable' }), # XXX This needs to be colspan="$#{$table->fields}" class="LinkTableHeader" $q->Tr($q->td({ -class => 'LinkTableCell' }, $q->h2({ -class => 'LinkTableTitle' }, 'Tables'),),); for my $table (@table_names) { my $table_name = $table->name; push @html, $q->comment("Start link to table '$table_name'"), $q->Tr({ -class => 'LinkTableRow' }, $q->td({ -class => 'LinkTableCell' }, qq[$table_name])), $q->comment("End link to table '$table_name'"); } push @html, $q->end_table; } for my $table ($schema->get_tables) { my $table_name = $table->name or next; my @fields = $table->get_fields or next; push @html, $q->comment("Starting table '$table_name'"), $q->a({ -name => $table_name }), $q->table( { -class => 'TableHeader', -width => '100%' }, $q->Tr( { -class => 'TableHeaderRow' }, $q->td({ -class => 'TableHeaderCell' }, $q->h3($table_name)), qq[], $q->td({ -class => 'TableHeaderCell', -align => 'right' }, qq[Top]) ) ); if (my @comments = map { $_ ? $_ : () } $table->comments) { push @html, $q->b("Comments:"), $q->br, $q->em(map { $q->br, $_ } @comments); } # # Fields # push @html, $q->start_table({ -border => 1 }), $q->Tr($q->th( { -class => 'FieldHeader' }, [ 'Field Name', 'Data Type', 'Size', 'Default Value', 'Other', 'Foreign Key' ] )); my $i = 0; for my $field (@fields) { my $name = $field->name || ''; $name = qq[$name]; my $data_type = $field->data_type || ''; my $size = defined $field->size ? $field->size : ''; my $default = defined $field->default_value ? $field->default_value : ''; my $comment = $field->comments || ''; my $fk = ''; if ($field->is_foreign_key) { my $c = $field->foreign_key_reference; my $ref_table = $c->reference_table || ''; my $ref_field = ($c->reference_fields)[0] || ''; $fk = qq[$ref_table.$ref_field]; } my @other = (); push @other, 'PRIMARY KEY' if $field->is_primary_key; push @other, 'UNIQUE' if $field->is_unique; push @other, 'NOT NULL' unless $field->is_nullable; push @other, $comment if $comment; my $class = $i++ % 2 ? 'even' : 'odd'; push @html, $q->Tr( { -class => "tr-$class" }, $q->td({ -class => "FieldCellName" }, $name), $q->td({ -class => "FieldCellType" }, $data_type), $q->td({ -class => "FieldCellSize" }, $size), $q->td({ -class => "FieldCellDefault" }, $default), $q->td({ -class => "FieldCellOther" }, join(', ', @other)), $q->td({ -class => "FieldCellFK" }, $fk), ); } push @html, $q->end_table; # # Indices # if (my @indices = $table->get_indices) { push @html, $q->h3('Indices'), $q->start_table({ -border => 1 }), $q->Tr({ -class => 'IndexRow' }, $q->th([ 'Name', 'Fields' ])); for my $index (@indices) { my $name = $index->name || ''; my $fields = join(', ', $index->fields) || ''; push @html, $q->Tr({ -class => 'IndexCell' }, $q->td([ $name, $fields ])); } push @html, $q->end_table; } # # Constraints # my @constraints = grep { $_->type ne PRIMARY_KEY } $table->get_constraints; if (@constraints) { push @html, $q->h3('Constraints'), $q->start_table({ -border => 1 }), $q->Tr({ -class => 'IndexRow' }, $q->th([ 'Type', 'Fields' ])); for my $c (@constraints) { my $type = $c->type || ''; my $fields = join(', ', $c->fields) || ''; push @html, $q->Tr({ -class => 'IndexCell' }, $q->td([ $type, $fields ])); } push @html, $q->end_table; } push @html, $q->hr; } my $sqlt_version = $t->version; if ($wrap) { push @html, qq[Created by ], qq[SQL::Translator $sqlt_version], $q->end_html; } return join "\n", @html; } 1; # ------------------------------------------------------------------- # Always be ready to speak your mind, # and a base man will avoid you. # William Blake # ------------------------------------------------------------------- =head1 NAME SQL::Translator::Producer::HTML - HTML producer for SQL::Translator =head1 SYNOPSIS use SQL::Translator::Producer::HTML; =head1 DESCRIPTION Creates an HTML document describing the tables. The HTML produced is composed of a number of tables: =over 4 =item Links A link table sits at the top of the output, and contains anchored links to elements in the rest of the document. If the I producer arg is present, then this table is not produced. =item Tables Each table in the schema has its own HTML table. The top row is a row of EthE elements, with a class of B; these elements are I, I, I, I, I and I. Each successive row describes one field in the table, and has a class of B, where $item id corresponds to the label of the column. For example: id int 11 PRIMARY KEY, NOT NULL foo varchar 255 NOT NULL updated timestamp 0 =back Unless the I producer arg is present, the HTML will be enclosed in a basic HTML header and footer. If the I producer arg is present, the generated HTML will be nicely spaced and human-readable. Otherwise, it will have very little insignificant whitespace and be generally smaller. =head1 AUTHORS Ken Youens-Clark Ekclark@cpan.orgE, Darren Chamberlain Edarren@cpan.orgE. =cut SQL-Translator-1.65/lib/SQL/Translator/Producer/TT/0000755000000000000000000000000014551164244021707 5ustar00rootroot00000000000000SQL-Translator-1.65/lib/SQL/Translator/Producer/TT/Base.pm0000644000000000000000000002044314551163724023124 0ustar00rootroot00000000000000package SQL::Translator::Producer::TT::Base; =pod =head1 NAME SQL::Translator::Producer::TT::Base - TT (Template Toolkit) based Producer base class. =cut use strict; use warnings; our @EXPORT_OK; our $VERSION = '1.65'; use Template; use Data::Dumper; use IO::Handle; use Exporter; use base qw(Exporter); @EXPORT_OK = qw(produce); use SQL::Translator::Utils 'debug'; # Hack to convert the produce call into an object. ALL sub-classes need todo # this so that the correct class gets created. sub produce { return __PACKAGE__->new(translator => shift)->run; } sub new { my $proto = shift; my $class = ref $proto || $proto; my %args = @_; my $me = bless {}, $class; $me->{translator} = delete $args{translator} || die "Need a translator."; return $me; } sub translator { shift->{translator}; } sub schema { shift->{translator}->schema(@_); } # Util args access method. # No args - Return hashref (the actual hash in Translator) or hash of args. # 1 arg - Return that named args value. # Args - List of names. Return values of the given arg names in list context # or return as hashref in scalar context. Any names given that don't # exist in the args are returned as undef. sub args { my $me = shift; # No args unless (@_) { return wantarray ? %{ $me->{translator}->producer_args } : $me->{translator}->producer_args; } # 1 arg. Return the value whatever the context. return $me->{translator}->producer_args->{ $_[0] } if @_ == 1; # More args so return values list or hash ref my %args = %{ $me->{translator}->producer_args }; return wantarray ? @args{@_} : { map { ($_ => $args{$_}) } @_ }; } # Run the produce and return the result. sub run { my $me = shift; my $scma = $me->schema; my %args = %{ $me->args }; my $tmpl = $me->tt_schema or die "No template!"; debug "Processing template $tmpl\n"; my $out; my $tt = Template->new( #DEBUG => $me->translator->debug, ABSOLUTE => 1, # Set so we can use from the command line sensibly RELATIVE => 1, # Maybe the cmd line code should set it! Security! $me->tt_config, # Hook for sub-classes to add config %args, # Allow any TT opts to be passed in the producer_args ) || die "Failed to initialize Template object: " . Template->error; $tt->process( $tmpl, { $me->tt_default_vars, $me->tt_vars, # Sub-class hook for adding vars }, \$out ) or die "Error processing template '$tmpl': " . $tt->error; return $out; } # Sub class hooks #----------------------------------------------------------------------------- sub tt_config { () } sub tt_schema { my $me = shift; my $class = ref $me; my $file = $me->args("ttfile"); return $file if $file; no strict 'refs'; my $ref = *{"$class\:\:DATA"}{IO}; if ($ref->opened) { local $/ = undef; # Slurp mode return \<$ref>; } undef; } sub tt_default_vars { my $me = shift; return ( translator => $me->translator, schema => $me->pre_process_schema($me->translator->schema), ); } sub pre_process_schema { $_[1] } sub tt_vars { () } 1; =pod =head1 SYNOPSIS # Create a producer using a template in the __DATA__ section. package SQL::Translator::Producer::Foo; use base qw/SQL::Translator::Producer::TT::Base/; # Convert produce call into a method call on our new class sub produce { return __PACKAGE__->new( translator => shift )->run; }; # Configure the Template object. sub tt_config { ( INTERPOLATE => 1 ); } # Extra vars to add to the template sub tt_vars { ( foo => "bar" ); } # Put template in DATA section (or use file with ttfile producer arg) __DATA__ Schema Database: [% schema.database %] Foo: $foo ... =head1 DESCRIPTION A base class producer designed to be sub-classed to create new TT based producers cheaply - by simply giving the template to use and sprinkling in some extra template variables and config. You can find an introduction to this module in L. The 1st thing the module does is convert the produce sub routine call we get from SQL::Translator into a method call on an object, which we can then sub-class. This is done with the following code which needs to appear in B sub classes. # Convert produce call into an object method call sub produce { return __PACKAGE__->new( translator => shift )->run; }; See L below for details. The upshot of this is we can make new template producers by sub classing this base class, adding the above snippet and a template. The module also provides a number of hooks into the templating process, see L
for details. See the L above for an example of creating a simple producer using a single template stored in the producers DATA section. =head1 SUB CLASS HOOKS Sub-classes can override these methods to control the templating by giving the template source, adding variables and giving config to the Tempate object. =head2 tt_config sub tt_config { ( INTERPOLATE => 1 ); } Return hash of Template config to add to that given to the L