SQL-Translator-1.65/ 0000755 0000000 0000000 00000000000 14551164245 014240 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/Makefile.PL 0000644 0000000 0000000 00000007557 14551163537 016233 0 ustar 00root root 0000000 0000000 use strict;
use warnings;
use ExtUtils::MakeMaker;
use File::ShareDir::Install;
do './maint/Makefile.PL.include' or die $@
unless -f 'META.yml';
my $eumm_version = eval $ExtUtils::MakeMaker::VERSION;
my %eumm_args = (
NAME => 'SQL::Translator',
ABSTRACT => 'SQL DDL transformations and more',
VERSION_FROM => 'lib/SQL/Translator.pm',
LICENSE => 'perl',
MIN_PERL_VERSION => '5.008001',
CONFIGURE_REQUIRES => {
'ExtUtils::MakeMaker' => '6.54', # to deal with x.y.z versions properly
'File::ShareDir::Install' => '0',
},
TEST_REQUIRES => {
'JSON::MaybeXS' => '1.003003',
'YAML' => '0.66',
'XML::Writer' => '0.500',
'Test::More' => '0.88',
'Test::Differences' => '0',
'Test::Exception' => '0.42',
'Text::ParseWords' => '0',
},
PREREQ_PM => {
'Digest::SHA' => '0',
'Carp::Clan' => '0',
'Parse::RecDescent' => '1.967009',
'DBI' => '1.54',
'File::ShareDir' => '1.0',
'Moo' => '1.000003',
'Package::Variant' => '1.001001',
'Sub::Quote' => '0',
'Try::Tiny' => '0.04',
'Scalar::Util' => '0',
'List::Util' => '1.33', # support for `any`
},
realclean => {
FILES => 't/data/roundtrip_autogen.yaml'
},
EXE_FILES => [
qw|
script/sqlt-diagram
script/sqlt-diff
script/sqlt-diff-old
script/sqlt-dumper
script/sqlt-graph
script/sqlt
|
],
META_MERGE => {
"meta-spec" => { version => 2 },
dynamic_config => 0,
resources => {
bugtracker => {
web => 'https://github.com/dbsrgits/sql-translator/issues',
},
repository => {
type => 'git',
url => 'git@github.com/dbsrgits/sql-translator.git',
web => 'https://github.com/dbsrgits/sql-translator/',
},
license => ['http://dev.perl.org/licenses/'],
x_IRC => 'irc://irc.perl.org/#sql-translator',
x_Ratings => 'http://cpanratings.perl.org/d/SQL-Translator',
},
x_authority => 'cpan:JROBINSON',
no_index => {
directory => [qw(maint share xt)],
},
prereqs => {
runtime => {
recommends => {
'Template' => '2.20',
'GD' => '0',
'GraphViz' => '0',
'Graph::Directed' => '0',
'Spreadsheet::ParseExcel' => '0.41',
'Text::RecordParser' => '0.02',
'XML::LibXML' => '1.69',
},
},
develop => {
requires => {
'Template' => '2.20',
'GD' => '0',
'DBD::SQLite' => '0',
'CGI' => '0',
'GraphViz' => '0',
'Graph::Directed' => '0',
'Spreadsheet::ParseExcel' => '0.41',
'Text::RecordParser' => '0.02',
'XML::LibXML' => '1.69',
'Test::EOL' => '1.1',
'Test::NoTabs' => '1.1',
'Software::LicenseUtils' => '0', # for Distar
},
},
},
},
);
install_share 'share';
sub _move_to {
my ($hash, $fromkey, $tokey) = @_;
$hash->{$tokey} = { %{ $hash->{$tokey} || {} }, %{ delete($hash->{$fromkey}) || {} }, };
}
delete $eumm_args{META_MERGE} if $eumm_version < 6.45_01;
delete $eumm_args{CONFIGURE_REQUIRES}
if $eumm_version < 6.51_03; # too late to use so just delete
_move_to(\%eumm_args, 'TEST_REQUIRES', 'BUILD_REQUIRES')
if $eumm_version < 6.63_03;
_move_to(\%eumm_args, 'BUILD_REQUIRES', 'PREREQ_PM')
if $eumm_version < 6.55_01;
$eumm_args{NO_MYMETA} = 1
if $eumm_version >= 6.57_02 and $eumm_version < 6.57_07;
WriteMakefile(%eumm_args);
package MY;
use File::ShareDir::Install qw(postamble);
SQL-Translator-1.65/share/ 0000755 0000000 0000000 00000000000 14551164244 015341 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/ 0000755 0000000 0000000 00000000000 14551164244 021144 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/Parse/ 0000755 0000000 0000000 00000000000 14551164244 022216 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/ 0000755 0000000 0000000 00000000000 14551164244 024235 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/ 0000755 0000000 0000000 00000000000 14551164244 024640 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/SQLT/ 0000755 0000000 0000000 00000000000 14551164244 025423 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/PrecompiledParsers/Parse/RecDescent/DDL/SQLT/README 0000644 0000000 0000000 00000000147 14316103442 026275 0 ustar 00root root 0000000 0000000 The contents of this directory are automatically regenerated when
invoking Makefile.PL in author mode.
SQL-Translator-1.65/share/DiaUml/ 0000755 0000000 0000000 00000000000 14551164244 016514 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/share/DiaUml/diagram.tt2 0000644 0000000 0000000 00000004110 14316103442 020537 0 ustar 00root root 0000000 0000000
#A4#
[% content %]
SQL-Translator-1.65/share/DiaUml/uml-class-start.tt2 0000644 0000000 0000000 00000006736 14316103442 022206 0 ustar 00root root 0000000 0000000 [% # vim:ft=tt2
DEFAULT
visible_operations='true'
-%]
#[% name %]#
#[% stereotype %]#
#[% comment %]#
SQL-Translator-1.65/share/DiaUml/layer.tt2 0000644 0000000 0000000 00000000210 14316103442 020244 0 ustar 00root root 0000000 0000000 [%-
DEFAULT name="Layer1" visible="true"
%]
[% content %]
SQL-Translator-1.65/share/DiaUml/uml-class-all.tt2 0000644 0000000 0000000 00000007375 14316103442 021621 0 ustar 00root root 0000000 0000000 [%# vim:ft=tt2
-%]
#[% name %]#
#[% stereotype %]#
#[% comment %]#
[% FOREACH attributes %]
[% INCLUDE "uml-attribute.tt2" %]
[% END %]
SQL-Translator-1.65/share/DiaUml/uml-class-end.tt2 0000644 0000000 0000000 00000000045 14316103442 021602 0 ustar 00root root 0000000 0000000 [%# vim:ft=tt2
-%]
SQL-Translator-1.65/share/DiaUml/schema.tt2 0000644 0000000 0000000 00000002144 14316103442 020400 0 ustar 00root root 0000000 0000000 [%# vim:ft=tt2 -%]
[% WRAPPER diagram.tt2 %]
[% WRAPPER layer.tt2 name="Background" %]
[% FOREACH table IN schema.get_tables %]
[% INCLUDE 'uml-class-start.tt2'
name = table.name
stereotype = 'Table'
visible_operations = 'false'
%]
[% FOREACH field IN table.get_fields;
SET type = field.data_type;
SET type = "$type($field.size)" IF field.size;
INCLUDE "uml-attribute.tt2"
name = field.name
stereotype = 'Field'
type = type
value = field.default_value
;
END %]
[% INCLUDE 'uml-class-end.tt2' %]
[% END %]
[% END %]
[% END %]
SQL-Translator-1.65/share/DiaUml/uml-attribute.tt2 0000644 0000000 0000000 00000001727 14316103442 021744 0 ustar 00root root 0000000 0000000 [%# vim:ft=tt2
-%]
[%-
DEFAULT visibility=0 abstract="false" class_scope="false"
%]
#[% name %]#
#[% type %]#
#[% value %]#
#[% comment %]#
SQL-Translator-1.65/share/DiaUml/uml-class.tt2 0000644 0000000 0000000 00000000651 14316103442 021041 0 ustar 00root root 0000000 0000000 [%# vim:ft=tt2
-%]
[% INCLUDE 'uml-class-start.tt2' %]
[%- FOREACH attributes;
INCLUDE "uml-attribute.tt2";
END %]
[% INCLUDE 'uml-class-end.tt2' %]
SQL-Translator-1.65/lib/ 0000755 0000000 0000000 00000000000 14551164244 015005 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/Test/ 0000755 0000000 0000000 00000000000 14551164244 015724 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/Test/SQL/ 0000755 0000000 0000000 00000000000 14551164244 016363 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/Test/SQL/Translator.pm 0000644 0000000 0000000 00000042011 14551163724 021052 0 ustar 00root root 0000000 0000000 package Test::SQL::Translator;
=pod
=head1 NAME
Test::SQL::Translator - Test::More test functions for the Schema objects.
=cut
use strict;
use warnings;
use Test::More;
use SQL::Translator::Schema::Constants;
use base qw(Exporter);
our @EXPORT_OK;
our $VERSION = '1.65';
our @EXPORT = qw(
schema_ok
table_ok
field_ok
constraint_ok
index_ok
view_ok
trigger_ok
procedure_ok
maybe_plan
);
# $ATTRIBUTES{ } = { => , ... }
my %ATTRIBUTES = (
field => {
name => undef,
data_type => '',
default_value => undef,
size => '0',
is_primary_key => 0,
is_unique => 0,
is_nullable => 1,
is_foreign_key => 0,
is_auto_increment => 0,
comments => '',
extra => {},
# foreign_key_reference,
is_valid => 1,
# order
},
constraint => {
name => '',
type => '',
deferrable => 1,
expression => '',
is_valid => 1,
fields => [],
match_type => '',
options => [],
on_delete => '',
on_update => '',
reference_fields => [],
reference_table => '',
extra => {},
},
index => {
fields => [],
is_valid => 1,
name => "",
options => [],
type => NORMAL,
extra => {},
},
view => {
name => "",
sql => "",
fields => [],
is_valid => 1,
extra => {},
},
trigger => {
name => '',
perform_action_when => undef,
database_events => undef,
on_table => undef,
action => undef,
is_valid => 1,
extra => {},
},
procedure => {
name => '',
sql => '',
parameters => [],
owner => '',
comments => '',
extra => {},
},
table => {
comments => undef,
name => '',
#primary_key => undef, # pkey constraint
options => [],
#order => 0,
fields => undef,
constraints => undef,
indices => undef,
is_valid => 1,
extra => {},
},
schema => {
name => '',
database => '',
procedures => undef, # [] when set
tables => undef, # [] when set
triggers => undef, # [] when set
views => undef, # [] when set
is_valid => 1,
extra => {},
}
);
# Given a test hash and schema object name set any attribute keys not present in
# the test hash to their default value for that schema object type.
# e.g. default_attribs( $test, "field" );
sub default_attribs {
my ($hashref, $object_type) = @_;
if (!exists $ATTRIBUTES{$object_type}) {
die "Can't add default attribs for unknown Schema " . "object type '$object_type'.";
}
for my $attr (
grep { !exists $hashref->{$_} }
keys %{ $ATTRIBUTES{$object_type} }
) {
$hashref->{$attr} = $ATTRIBUTES{$object_type}{$attr};
}
return $hashref;
}
# Format test name so it will prepend the test names used below.
sub t_name {
my $name = shift;
$name ||= "";
$name = "$name - " if $name;
return $name;
}
sub field_ok {
my ($f1, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "field");
unless ($f1) {
fail " Field '$test->{name}' doesn't exist!";
# TODO Do a skip on the following tests. Currently the test counts wont
# match at the end. So at least it fails.
return;
}
my $full_name = $f1->table->name . "." . $test->{name};
is($f1->name, $test->{name}, "${t_name}Field '$full_name'");
is($f1->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
is($f1->data_type, $test->{data_type}, "$t_name type is '$test->{data_type}'");
is($f1->size, $test->{size}, "$t_name size is '$test->{size}'");
is(
$f1->default_value,
$test->{default_value},
"$t_name default value is "
. (
defined($test->{default_value})
? "'$test->{default_value}'"
: "UNDEF"
)
);
is($f1->is_nullable, $test->{is_nullable}, "$t_name " . ($test->{is_nullable} ? 'can' : 'cannot') . ' be null');
is($f1->is_unique, $test->{is_unique}, "$t_name " . ($test->{is_unique} ? 'can' : 'cannot') . ' be unique');
is(
$f1->is_primary_key,
$test->{is_primary_key},
"$t_name is " . ($test->{is_primary_key} ? '' : 'not ') . 'a primary_key'
);
is(
$f1->is_foreign_key,
$test->{is_foreign_key},
"$t_name is " . ($test->{is_foreign_key} ? '' : 'not') . ' a foreign_key'
);
is(
$f1->is_auto_increment,
$test->{is_auto_increment},
"$t_name is " . ($test->{is_auto_increment} ? '' : 'not ') . 'an auto_increment'
);
is($f1->comments, $test->{comments}, "$t_name comments");
is_deeply({ $f1->extra }, $test->{extra}, "$t_name extra");
}
sub constraint_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "constraint");
is($obj->name, $test->{name}, "${t_name}Constraint '$test->{name}'");
is($obj->type, $test->{type}, "$t_name type is '$test->{type}'");
is($obj->deferrable, $test->{deferrable}, "$t_name " . ($test->{deferrable} ? 'can' : 'cannot') . ' be deferred');
is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
is($obj->table->name, $test->{table}, "$t_name table is '$test->{table}'");
is($obj->expression, $test->{expression}, "$t_name expression is '$test->{expression}'");
is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'");
is($obj->reference_table, $test->{reference_table}, "$t_name reference_table is '$test->{reference_table}'");
is_deeply(
[ $obj->reference_fields ],
$test->{reference_fields},
"$t_name reference_fields are '" . join(",", @{ $test->{reference_fields} }) . "'"
);
is($obj->match_type, $test->{match_type}, "$t_name match_type is '$test->{match_type}'");
is($obj->on_delete, $test->{on_delete}, "$t_name on_delete is '$test->{on_delete}'");
is($obj->on_update, $test->{on_update}, "$t_name on_update is '$test->{on_update}'");
is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
}
sub index_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "index");
is($obj->name, $test->{name}, "${t_name}Index '$test->{name}'");
is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
is($obj->type, $test->{type}, "$t_name type is '$test->{type}'");
is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'");
is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
}
sub trigger_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "index");
is($obj->name, $test->{name}, "${t_name}Trigger '$test->{name}'");
is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
is(
$obj->perform_action_when,
$test->{perform_action_when},
"$t_name perform_action_when is '$test->{perform_action_when}'"
);
is(
join(',', $obj->database_events),
$test->{database_events},
sprintf("%s database_events is '%s'", $t_name, $test->{'database_events'},)
);
is($obj->on_table, $test->{on_table}, "$t_name on_table is '$test->{on_table}'");
is($obj->scope, $test->{scope}, "$t_name scope is '$test->{scope}'")
if exists $test->{scope};
is($obj->action, $test->{action}, "$t_name action is '$test->{action}'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
}
sub view_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "index");
#isa_ok( $v, 'SQL::Translator::Schema::View', 'View' );
is($obj->name, $test->{name}, "${t_name}View '$test->{name}'");
is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
is($obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'");
is_deeply([ $obj->fields ], $test->{fields}, "$t_name fields are '" . join(",", @{ $test->{fields} }) . "'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
}
sub procedure_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "index");
#isa_ok( $v, 'SQL::Translator::Schema::View', 'View' );
is($obj->name, $test->{name}, "${t_name}Procedure '$test->{name}'");
is($obj->sql, $test->{sql}, "$t_name sql is '$test->{sql}'");
is_deeply([ $obj->parameters ],
$test->{parameters}, "$t_name parameters are '" . join(",", @{ $test->{parameters} }) . "'");
is($obj->comments, $test->{comments}, "$t_name comments is '$test->{comments}'");
is($obj->owner, $test->{owner}, "$t_name owner is '$test->{owner}'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
}
sub table_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "table");
my %arg = %$test;
my $tbl_name = $arg{name} || die "Need a table name to test.";
is($obj->{name}, $arg{name}, "${t_name}Table '$arg{name}'");
is_deeply([ $obj->options ], $test->{options}, "$t_name options are '" . join(",", @{ $test->{options} }) . "'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
# Fields
if ($arg{fields}) {
my @fldnames = map { $_->{name} } @{ $arg{fields} };
is_deeply([ map { $_->name } $obj->get_fields ],
[@fldnames], "${t_name} field names are " . join(", ", @fldnames));
foreach (@{ $arg{fields} }) {
my $f_name = $_->{name} || die "Need a field name to test.";
next unless my $fld = $obj->get_field($f_name);
field_ok($fld, $_, $name);
}
} else {
is(scalar($obj->get_fields), undef, "${t_name} has no fields.");
}
# Constraints and Indices
_test_kids(
$obj, $test, $name,
{
constraint => 'constraints',
index => 'indices',
}
);
}
sub _test_kids {
my ($obj, $test, $name, $kids) = @_;
my $t_name = t_name($name);
my $obj_name = ref $obj;
($obj_name) = $obj_name =~ m/^.*::(.*)$/;
while (my ($object_type, $plural) = each %$kids) {
next unless defined $test->{$plural};
if (my @tests = @{ $test->{$plural} }) {
my $meth = "get_$plural";
my @objects = $obj->$meth;
is(scalar(@objects), scalar(@tests), "${t_name}$obj_name has " . scalar(@tests) . " $plural");
for my $object (@objects) {
my $ans = { lc($obj_name) => $obj->name, %{ shift @tests } };
my $meth = "${object_type}_ok";
{
no strict 'refs';
$meth->($object, $ans, $name);
}
}
}
}
}
sub schema_ok {
my ($obj, $test, $name) = @_;
my $t_name = t_name($name);
default_attribs($test, "schema");
is($obj->name, $test->{name}, "${t_name}Schema name is '$test->{name}'");
is($obj->database, $test->{database}, "$t_name database is '$test->{database}'");
is_deeply({ $obj->extra }, $test->{extra}, "$t_name extra");
is($obj->is_valid, $test->{is_valid}, "$t_name is " . ($test->{is_valid} ? '' : 'not ') . 'valid');
# Tables
if ($test->{tables}) {
is_deeply(
[ map { $_->name } $obj->get_tables ],
[ map { $_->{name} } @{ $test->{tables} } ],
"${t_name} table names match"
);
foreach (@{ $test->{tables} }) {
my $t_name = $_->{name} || die "Need a table name to test.";
table_ok($obj->get_table($t_name), $_, $name);
}
} else {
is(scalar($obj->get_tables), undef, "${t_name} has no tables.");
}
# Procedures, Triggers, Views
_test_kids(
$obj, $test, $name,
{
procedure => 'procedures',
trigger => 'triggers',
view => 'views',
}
);
}
# maybe_plan($ntests, @modules)
#
# Calls plan $ntests if @modules can all be loaded; otherwise,
# calls skip_all with an explanation of why the tests were skipped.
sub maybe_plan {
my ($ntests, @modules) = @_;
my @errors;
for my $module (@modules) {
eval "use $module;";
next if !$@;
if ($@ =~ /Can't locate (\S+)/) {
my $mod = $1;
$mod =~ s/\.pm$//;
$mod =~ s#/#::#g;
push @errors, $mod;
} elsif ($@ =~ /([\w\:]+ version [\d\.]+) required.+?this is only version/) {
push @errors, $1;
} elsif ($@ =~ /Can't load .+? for module .+?DynaLoader\.pm/i) {
push @errors, $module;
} else {
(my $err = $@) =~ s/\n+/\\n/g; # Can't have newlines in the skip message
push @errors, "$module: $err";
}
}
if (@errors) {
my $msg = sprintf "Missing dependenc%s: %s", @errors == 1 ? 'y' : 'ies', join ", ", @errors;
plan skip_all => $msg;
}
return unless defined $ntests;
if ($ntests ne 'no_plan') {
plan tests => $ntests;
} else {
plan 'no_plan';
}
}
1; # compile please ===========================================================
__END__
=pod
=head1 SYNOPSIS
# t/magic.t
use FindBin '$Bin';
use Test::More;
use Test::SQL::Translator;
# Run parse
my $sqlt = SQL::Translator->new(
parser => "Magic",
filename => "$Bin/data/magic/test.magic",
...
);
...
my $schema = $sqlt->schema;
# Test the table it produced.
table_ok( $schema->get_table("Customer"), {
name => "Customer",
fields => [
{
name => "CustomerID",
data_type => "INT",
size => 12,
default_value => undef,
is_nullable => 0,
is_primary_key => 1,
},
{
name => "bar",
data_type => "VARCHAR",
size => 255,
is_nullable => 0,
},
],
constraints => [
{
type => "PRIMARY KEY",
fields => "CustomerID",
},
],
indices => [
{
name => "barindex",
fields => ["bar"],
},
],
});
=head1 DESCRIPTION
Provides a set of Test::More tests for Schema objects. Testing a parsed
schema is then as easy as writing a perl data structure describing how you
expect the schema to look. Also provides C for conditionally running
tests based on their dependencies.
The data structures given to the test subs don't have to include all the
possible values, only the ones you expect to have changed. Any left out will be
tested to make sure they are still at their default value. This is a useful
check that you your parser hasn't accidentally set schema values you didn't
expect it to.
For an example of the output run the F test.
=head1 Tests
All the tests take a first arg of the schema object to test, followed by a
hash ref describing how you expect that object to look (you only need give the
attributes you expect to have changed from the default).
The 3rd arg is an optional test name to prepend to all the generated test
names.
=head2 table_ok
=head2 field_ok
=head2 constraint_ok
=head2 index_ok
=head2 view_ok
=head2 trigger_ok
=head2 procedure_ok
=head1 CONDITIONAL TESTS
The C function handles conditionally running an individual
test. It is here to enable running the test suite even when dependencies
are missing; not having (for example) GraphViz installed should not keep
the test suite from passing.
C takes the number of tests to (maybe) run, and a list of
modules on which test execution depends:
maybe_plan(180, 'SQL::Translator::Parser::MySQL');
If one of C's dependencies does not exist,
then the test will be skipped.
Instead of a number of tests, you can pass C if you're using
C, or C<'no_plan'> if you don't want a plan at all.
=head1 EXPORTS
table_ok, field_ok, constraint_ok, index_ok, view_ok, trigger_ok, procedure_ok,
maybe_plan
=head1 TODO
=over 4
=item Test the tests!
=item Test Count Constants
Constants to give the number of tests each C<*_ok> sub uses. e.g. How many tests
does C run? Can then use these to set up the test plan easily.
=item Test skipping
As the test subs wrap up lots of tests in one call you can't skip individual
tests only whole sets e.g. a whole table or field.
We could add C items to the test hashes to allow per test skips. e.g.
skip_is_primary_key => "Need to fix primary key parsing.",
=item yaml test specs
Maybe have the test subs also accept yaml for the test hash ref as it is much
nicer for writing big data structures. We can then define tests as in input
schema file and test yaml file to compare it against.
=back
=head1 AUTHOR
Mark D. Addison Emark.addison@itn.co.ukE,
Darren Chamberlain .
Thanks to Ken Y. Clark for the original table and field test code taken from
his mysql test.
=head1 SEE ALSO
perl(1), SQL::Translator, SQL::Translator::Schema, Test::More.
=cut
SQL-Translator-1.65/lib/SQL/ 0000755 0000000 0000000 00000000000 14551164244 015444 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/ 0000755 0000000 0000000 00000000000 14551164244 017575 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Generator/ 0000755 0000000 0000000 00000000000 14551164244 021523 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/ 0000755 0000000 0000000 00000000000 14551164244 022424 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/Quote.pm 0000644 0000000 0000000 00000002651 14541265164 024065 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::Role::Quote;
use Moo::Role;
=head1 NAME
SQL::Translator::Generator::Role::Quote - Role for dealing with identifier
quoting.
=head1 DESCRIPTION
I
=cut
requires qw(quote_chars name_sep);
has escape_char => (
is => 'ro',
lazy => 1,
clearer => 1,
default => sub { $_[0]->quote_chars->[-1] },
);
sub quote {
my ($self, $label) = @_;
return '' unless defined $label;
return $$label if ref($label) eq 'SCALAR';
my @quote_chars = @{ $self->quote_chars };
return $label unless scalar @quote_chars;
my ($l, $r);
if (@quote_chars == 1) {
($l, $r) = (@quote_chars) x 2;
} elsif (@quote_chars == 2) {
($l, $r) = @quote_chars;
} else {
die 'too many quote chars!';
}
my $sep = $self->name_sep || '';
my $esc = $self->escape_char;
# parts containing * are naturally unquoted
join $sep, map { (my $n = $_) =~ s/\Q$r/$esc$r/g; "$l$n$r" } ($sep ? split(/\Q$sep\E/, $label) : $label);
}
sub quote_string {
my ($self, $string) = @_;
return $string unless defined $string;
$string =~ s/'/''/g;
return qq{'$string'};
}
1;
=head1 AUTHORS
See the included AUTHORS file:
L
=head1 COPYRIGHT
Copyright (c) 2012 the SQL::Translator L as listed above.
=head1 LICENSE
This code is free software and may be distributed under the same terms as Perl
itself.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Generator/Role/DDL.pm 0000644 0000000 0000000 00000005047 14541265164 023375 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::Role::DDL;
=head1 NAME
SQL::Translator::Generator::Role::DDL - Role implementing common parts of
DDL generation.
=head1 DESCRIPTION
I
=cut
use Moo::Role;
use SQL::Translator::Utils qw(header_comment);
use Scalar::Util;
requires '_build_type_map';
requires '_build_numeric_types';
requires '_build_unquoted_defaults';
requires '_build_sizeless_types';
requires 'quote';
requires 'quote_string';
has type_map => (is => 'lazy',);
has numeric_types => (is => 'lazy',);
has sizeless_types => (is => 'lazy',);
has unquoted_defaults => (is => 'lazy',);
has add_comments => (is => 'ro',);
has add_drop_table => (is => 'ro',);
# would also be handy to have a required size set if there is such a thing
sub field_name { $_[0]->quote($_[1]->name) }
sub field_comments {
($_[1]->comments ? ('-- ' . $_[1]->comments . "\n ") : ())
}
sub table_comments {
my ($self, $table) = @_;
if ($self->add_comments) {
return ("", "--", "-- Table: " . $self->quote($table->name) . "", "--", map "-- $_", $table->comments);
} else {
return ();
}
}
sub field_nullable { ($_[1]->is_nullable ? $_[0]->nullable : 'NOT NULL') }
sub field_default {
my ($self, $field, $exceptions) = @_;
my $default = $field->default_value;
return () if !defined $default;
$default = \"$default"
if $exceptions
and !ref $default
and $exceptions->{$default};
if (ref $default) {
$default = $$default;
} elsif (!($self->numeric_types->{ lc($field->data_type) } && Scalar::Util::looks_like_number($default))) {
$default = $self->quote_string($default);
}
return ("DEFAULT $default");
}
sub field_type {
my ($self, $field) = @_;
my $field_type = $field->data_type;
($self->type_map->{$field_type} || $field_type) . $self->field_type_size($field);
}
sub field_type_size {
my ($self, $field) = @_;
(
$field->size && !$self->sizeless_types->{ $field->data_type }
? '(' . $field->size . ')'
: ''
);
}
sub fields {
my ($self, $table) = @_;
(map $self->field($_), $table->get_fields);
}
sub indices {
my ($self, $table) = @_;
(map $self->index($_), $table->get_indices);
}
sub nullable {'NULL'}
sub header_comments { header_comment() . "\n" if $_[0]->add_comments }
1;
=head1 AUTHORS
See the included AUTHORS file:
L
=head1 COPYRIGHT
Copyright (c) 2012 the SQL::Translator L as listed above.
=head1 LICENSE
This code is free software and may be distributed under the same terms as Perl
itself.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/ 0000755 0000000 0000000 00000000000 14551164244 022126 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/PostgreSQL.pm 0000644 0000000 0000000 00000001330 14541265164 024466 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::DDL::PostgreSQL;
=head1 NAME
SQL::Translator::Generator::DDL::PostgreSQL - A Moo based PostgreSQL DDL generation
engine.
=head1 DESCRIPTION
I
=cut
use Moo;
has quote_chars => (
is => 'rw',
default => sub { +[qw(" ")] },
trigger => sub { $_[0]->clear_escape_char },
);
with 'SQL::Translator::Generator::Role::Quote';
sub name_sep {q(.)}
1;
=head1 AUTHORS
See the included AUTHORS file:
L
=head1 COPYRIGHT
Copyright (c) 2012 the SQL::Translator L as listed above.
=head1 LICENSE
This code is free software and may be distributed under the same terms as Perl
itself.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/SQLite.pm 0000644 0000000 0000000 00000005014 14541265164 023627 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::DDL::SQLite;
=head1 NAME
SQL::Translator::Generator::DDL::SQLite - A Moo based SQLite DDL generation
engine.
=head1 DESCRIPTION
I
=cut
use Moo;
has quote_chars => (is => 'ro', default => sub { +[qw(" ")] });
with 'SQL::Translator::Generator::Role::Quote';
with 'SQL::Translator::Generator::Role::DDL';
sub name_sep {q(.)}
sub _build_type_map {
+{
set => 'varchar',
bytea => 'blob',
};
}
sub _build_sizeless_types {
+{
text => 1,
blob => 1,
};
}
sub _build_numeric_types {
+{
int => 1,
integer => 1,
tinyint => 1,
smallint => 1,
mediumint => 1,
bigint => 1,
'unsigned big int' => 1,
int2 => 1,
int8 => 1,
numeric => 1,
decimal => 1,
boolean => 1,
real => 1,
double => 1,
'double precision' => 1,
float => 1,
};
}
sub _build_unquoted_defaults {
+{
NULL => 1,
'now()' => 1,
CURRENT_TIMESTAMP => 1,
};
}
sub nullable { () }
sub _ipk {
my ($self, $field) = @_;
my $pk = $field->table->primary_key;
my @pk_fields = $pk ? $pk->fields : ();
$field->is_primary_key
&& scalar @pk_fields == 1
&& ($field->data_type =~ /int(eger)?$/i
|| ($field->data_type =~ /^number?$/i && $field->size !~ /,/));
}
sub field_autoinc {
my ($self, $field) = @_;
return (
(
($field->extra->{auto_increment_type} || '') eq 'monotonic'
and $self->_ipk($field)
and $field->is_auto_increment
)
? 'AUTOINCREMENT'
: ''
);
}
sub field {
my ($self, $field) = @_;
return join ' ', $self->field_comments($field), $self->field_name($field),
(
$self->_ipk($field)
? ('INTEGER PRIMARY KEY')
: ($self->field_type($field))
),
($self->field_autoinc($field) || ()), $self->field_nullable($field),
$self->field_default(
$field,
{
NULL => 1,
'now()' => 1,
'CURRENT_TIMESTAMP' => 1,
}
),
;
}
1;
=head1 AUTHORS
See the included AUTHORS file:
L
=head1 COPYRIGHT
Copyright (c) 2012 the SQL::Translator L as listed above.
=head1 LICENSE
This code is free software and may be distributed under the same terms as Perl
itself.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/MySQL.pm 0000644 0000000 0000000 00000000543 14541265164 023435 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::DDL::MySQL;
=head1 NAME
SQL::Translator::Generator::DDL::MySQL - A Moo based MySQL DDL generation
engine.
=head1 DESCRIPTION
I
=cut
use Moo;
has quote_chars => (is => 'ro', default => sub { +[qw(` `)] });
with 'SQL::Translator::Generator::Role::Quote';
sub name_sep {q(.)}
1;
SQL-Translator-1.65/lib/SQL/Translator/Generator/DDL/SQLServer.pm 0000644 0000000 0000000 00000014634 14541265164 024324 0 ustar 00root root 0000000 0000000 package SQL::Translator::Generator::DDL::SQLServer;
=head1 NAME
SQL::Translator::Generator::DDL::SQLServer - A Moo based MS SQL Server DDL
generation engine.
=head1 DESCRIPTION
I
=cut
use Moo;
use SQL::Translator::Schema::Constants;
with 'SQL::Translator::Generator::Role::Quote';
with 'SQL::Translator::Generator::Role::DDL';
sub quote_chars { [qw([ ])] }
sub name_sep {q(.)}
sub _build_numeric_types {
+{ int => 1, };
}
sub _build_unquoted_defaults {
+{ NULL => 1, };
}
sub _build_type_map {
+{
date => 'datetime',
'time' => 'datetime',
};
}
sub _build_sizeless_types {
+{ map { $_ => 1 } qw( tinyint smallint int integer bigint text bit image datetime ) };
}
sub field {
my ($self, $field) = @_;
return join ' ', $self->field_name($field),
($self->field_type($field) || die 'type is required'),
$self->field_autoinc($field),
$self->field_nullable($field),
$self->field_default($field),;
}
sub field_autoinc { ($_[1]->is_auto_increment ? 'IDENTITY' : ()) }
sub primary_key_constraint {
'CONSTRAINT '
. $_[0]->quote($_[1]->name || $_[1]->table->name . '_pk')
. ' PRIMARY KEY ('
. join(', ', map $_[0]->quote($_), $_[1]->fields) . ')';
}
sub index {
'CREATE INDEX '
. $_[0]->quote($_[1]->name || $_[1]->table->name . '_idx') . ' ON '
. $_[0]->quote($_[1]->table->name) . ' ('
. join(', ', map $_[0]->quote($_), $_[1]->fields) . ');';
}
sub unique_constraint_single {
my ($self, $constraint) = @_;
'CONSTRAINT '
. $self->unique_constraint_name($constraint)
. ' UNIQUE ('
. join(', ', map $self->quote($_), $constraint->fields) . ')';
}
sub unique_constraint_name {
my ($self, $constraint) = @_;
$self->quote($constraint->name || $constraint->table->name . '_uc');
}
sub unique_constraint_multiple {
my ($self, $constraint) = @_;
'CREATE UNIQUE NONCLUSTERED INDEX '
. $self->unique_constraint_name($constraint) . ' ON '
. $self->quote($constraint->table->name) . ' ('
. join(', ', map $self->quote($_), $constraint->fields) . ')'
. ' WHERE '
. join(' AND ', map $self->quote($_->name) . ' IS NOT NULL', grep { $_->is_nullable } $constraint->fields) . ';';
}
sub foreign_key_constraint {
my ($self, $constraint) = @_;
my $on_delete = uc($constraint->on_delete || '');
my $on_update = uc($constraint->on_update || '');
# The default implicit constraint action in MSSQL is RESTRICT
# but you can not specify it explicitly. Go figure :)
for (map uc $_ || '', $on_delete, $on_update) {
undef $_ if $_ eq 'RESTRICT';
}
'ALTER TABLE '
. $self->quote($constraint->table->name)
. ' ADD CONSTRAINT '
. $self->quote($constraint->name || $constraint->table->name . '_fk')
. ' FOREIGN KEY' . ' ('
. join(', ', map $self->quote($_), $constraint->fields)
. ') REFERENCES '
. $self->quote($constraint->reference_table) . ' ('
. join(', ', map $self->quote($_), $constraint->reference_fields) . ')'
. (
$on_delete && $on_delete ne "NO ACTION" ? ' ON DELETE ' . $on_delete
: ''
)
. (
$on_update && $on_update ne "NO ACTION" ? ' ON UPDATE ' . $on_update
: ''
) . ';';
}
sub enum_constraint_name {
my ($self, $field_name) = @_;
$self->quote($field_name . '_chk');
}
sub enum_constraint {
my ($self, $field_name, $vals) = @_;
return ('CONSTRAINT '
. $self->enum_constraint_name($field_name)
. ' CHECK ('
. $self->quote($field_name) . ' IN ('
. join(',', map $self->quote_string($_), @$vals)
. '))');
}
sub constraints {
my ($self, $table) = @_;
(
map $self->enum_constraint($_->name, { $_->extra }->{list} || []),
grep { 'enum' eq lc $_->data_type } $table->get_fields
),
(map $self->primary_key_constraint($_), grep { $_->type eq PRIMARY_KEY } $table->get_constraints),
(
map $self->unique_constraint_single($_),
grep {
$_->type eq UNIQUE
&& !grep { $_->is_nullable }
$_->fields
} $table->get_constraints
),
;
}
sub table {
my ($self, $table) = @_;
join("\n", $self->table_comments($table), '')
. join("\n\n",
'CREATE TABLE '
. $self->quote($table->name) . " (\n"
. join(",\n", map {" $_"} $self->fields($table), $self->constraints($table),) . "\n);",
$self->unique_constraints_multiple($table), $self->indices($table),);
}
sub unique_constraints_multiple {
my ($self, $table) = @_;
(
map $self->unique_constraint_multiple($_),
grep {
$_->type eq UNIQUE
&& grep { $_->is_nullable }
$_->fields
} $table->get_constraints
);
}
sub drop_table {
my ($self, $table) = @_;
my $name = $table->name;
my $q_name = $self->quote($name);
"IF EXISTS (SELECT name FROM sysobjects WHERE name = '$name' AND type = 'U')" . " DROP TABLE $q_name;";
}
sub remove_table_constraints {
my ($self, $table) = @_;
my $name = $table->name;
my $q_name = $self->quote($name);
"IF EXISTS (SELECT name FROM sysobjects WHERE name = '$name' AND type = 'U')"
. " ALTER TABLE $q_name NOCHECK CONSTRAINT all;";
}
sub drop_tables {
my ($self, $schema) = @_;
if ($self->add_drop_table) {
my @tables = sort { $b->order <=> $a->order } $schema->get_tables;
return join "\n",
(
(
$self->add_comments
? ('--', '-- Turn off constraints', '--', '',)
: ()
),
(map $self->remove_table_constraints($_), @tables),
($self->add_comments ? ('--', '-- Drop tables', '--', '',) : ()),
(map $self->drop_table($_), @tables),
);
}
return '';
}
sub foreign_key_constraints {
my ($self, $schema) = @_;
(
map $self->foreign_key_constraint($_), grep { $_->type eq FOREIGN_KEY }
map $_->get_constraints, $schema->get_tables
);
}
sub schema {
my ($self, $schema) = @_;
$self->header_comments
. $self->drop_tables($schema)
. join("\n\n", map $self->table($_), grep { $_->name } $schema->get_tables) . "\n"
. join "\n", $self->foreign_key_constraints($schema);
}
1;
=head1 AUTHORS
See the included AUTHORS file:
L
=head1 COPYRIGHT
Copyright (c) 2012 the SQL::Translator L as listed above.
=head1 LICENSE
This code is free software and may be distributed under the same terms as Perl
itself.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser.pm 0000644 0000000 0000000 00000002737 14551163724 021402 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser;
use strict;
use warnings;
our $VERSION = '1.65';
sub parse {""}
1;
# ----------------------------------------------------------------------
# Enough! or Too much.
# William Blake
# ----------------------------------------------------------------------
=pod
=head1 NAME
SQL::Translator::Parser - describes how to write a parser
=head1 DESCRIPTION
Parser modules that get invoked by SQL::Translator need to implement a
single function: B. This function will be called by the
SQL::Translator instance as $class::parse($tr, $data_as_string), where
$tr is a SQL::Translator instance. Other than that, the classes are
free to define any helper functions, or use any design pattern
internally that make the most sense.
When the parser has determined what exists, it will communicate the
structure to the producer through the SQL::Translator::Schema object.
This object can be retrieved from the translator (the first argument
pass to B) by calling the B method:
my $schema = $tr->schema;
The Schema object has methods for adding tables, fields, indices, etc.
For more information, consult the docs for SQL::Translator::Schema and
its related modules. For examples of how this works, examine the
source code for existing SQL::Translator::Parser::* modules.
=head1 AUTHORS
Ken Youens-Clark, Ekclark@cpan.org,
darren chamberlain Edarren@cpan.orgE.
=head1 SEE ALSO
perl(1), SQL::Translator, SQL::Translator::Schema.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Manual.pod 0000644 0000000 0000000 00000045476 14316103442 021526 0 ustar 00root root 0000000 0000000 =head1 NAME
SQL::Translator::Manual - sqlfairy user manual
=head1 SYNOPSIS
SQL::Translator (AKA "SQLFairy") is a collection of modules for
transforming (mainly) SQL DDL files into a variety of other formats,
including other SQL dialects, documentation, images, and code. In
this manual, we will attempt to address how to use SQLFairy for common
tasks. For a lower-level discussion of how the code works, please
read the documentation for L.
It may prove helpful to have a general understanding of the SQLFairy
code before continuing. The code can be broken into three conceptual
groupings:
=over 4
=item * Parsers
The parsers are responsible for reading the input files and describing
them to the Schema object middleware.
=item * Producers
The producers create the output as described by the Schema middleware.
=item * Schema objects
The Schema objects bridge the communication between the Parsers and
Producers by representing any parsed file through a standard set of
generic objects to represent concepts like Tables, Fields (columns),
Indices, Constraints, etc.
=back
It's not necessary to understand how to write or manipulate any
of these for most common tasks, but you should aware of the concepts
as they will be referenced later in this document.
=head1 SQLFAIRY SCRIPTS
Most common tasks can be accomplished through the use of the script
interfaces to the SQL::Translator code. All SQLFairy scripts begin
with "sqlt." Here are the scripts and a description of what they each
do:
=over 4
=item * sqlt
This is the main interface for text-to-text translations, e.g.,
converting a MySQL schema to Oracle.
=item * sqlt-diagram
This is a tailored interface for the Diagram producer and its many
myriad options.
=item * sqlt-diff
This script will examine two schemas and report the SQL commands
(ALTER, CREATE) needed to turn the first schema into the second.
=item * sqlt-dumper
This script generates a Perl script that can be used to connect to a
database and dump the data in each table in different formats, similar
to the "mysqldump" program.
=item * sqlt-graph
This is an interface to the GraphViz visualization tool and its myriad
options.
=item * sqlt.cgi
This is a CGI script that presents an HTML form for uploading or
pasting a schema and choosing an output and the output options.
=back
To read the full documentation for each script, use "perldoc" (or
execute any of the command-line scripts with the "--help" flag).
=head1 CONVERTING SQL DIALECTS
Probably the most common task SQLFairy is used for is to convert one
dialect of SQL to another. If you have a text description of an SQL
database (AKA a "DDL" -- "Data Definition Language"), then you should
use the "sqlt" script with switches to indicate the parser and
producer and the name of the text file as the final argument. For
example, to convert the "foo.sql" MySQL schema to a version suitable
for PostgreSQL, you would do the following:
$ sqlt -f MySQL -t PostgreSQL foo.sql > foo-pg.sql
The "from" and "to" options are case-sensitive and must match exactly
the names of the Parser and Producer classes in SQL::Translator. For
a complete listing of your options, execute "sqlt" with the "--list"
flag.
=head1 EXTRACT SQL SCHEMAS DIRECTLY FROM DATABASE
It is possible to extract some schemas directly from the database
without parsing a text file (the "foo.sql" in the above example).
This can prove significantly faster than parsing a text file. To
do this, use the "DBI" parser and provide the necessary arguments to
connect to the database and indicate the producer class, like so:
$ sqlt -f DBI --dsn dbi:mysql:FOO --db-user guest \
--db-password p4ssw0rd -t PostgreSQL > foo
The "--list" option to "sqlt" will show the databases supported by
DBI parsers.
=head1 HANDLING NON-SQL DATA
Certain structured document formats can be easily thought of as
tables. SQLFairy can parse Microsoft Excel spreadsheets and
arbitrarily delimited text files just as if they were schemas which
contained only one table definition. The column names are normalized
to something sane for most databases (whitespace is converted to
underscores and non-word characters are removed), and the data in each
field is scanned to determine the appropriate data type (character,
integer, or float) and size. For instance, to convert a
comma-separated file to an SQLite database, do the following:
$ sqlt -f xSV --fs ',' -t SQLite foo.csv > foo-sqlite.sql
Additionally, there is a non-SQL representation of relational schemas namely
XML. Additionally, the only XML supported is our own version; however, it
would be fairly easy to add an XML parser for something like the TorqueDB
(http://db.apache.org/torque/) project. The actual parsing of XML should be
trivial given the number of XML parsers available, so all that would be left
would be to map the specific concepts in the source file to the Schema objects
in SQLFairy.
To convert a schema in SQLFairy's XML dialect to Oracle, do the following:
$ sqlt -f XML-SQLFairy -t Oracle foo.xml > foo-oracle.sql
=head1 SERIALIZING SCHEMAS
Parsing a schema is generally the most computationally expensive
operation performed by SQLFairy, so it may behoove you to serialize a
parsed schema if you need to perform repeated conversions. For
example, as part of a build process the author converts a MySQL schema
first to YAML, then to PostgreSQL, Oracle, SQLite and Sybase.
Additionally, a variety of documentation in HTML and images is produced.
This can be accomplished like so:
$ sqlt -f MySQL -t YAML schema-mysql.sql > schema.yaml
$ sqlt -f YAML -t Oracle schema.yaml > schema-oracle.sql
$ sqlt -f YAML -t PostgreSQL schema.yaml > schema-postgresql.sql
$ ...
SQLFairy has three serialization producers, none of which is superior
to the other in their description of a schema.
=over 4
=item * XML-SQLFairy
This is the aforementioned XML format. It is essentially a direct
mapping of the Schema objects into XML. This can also provide a very
convenient bridge to describing a schema to a non-Perl application.
Providing a producer argument to "sqlt" of just "XML" will default to
using "XML-SQLFairy."
=item * Storable
This producer stores the Schema object using Perl's Storable.pm module
available on CPAN.
=item * YAML
This producer serialized the Schema object with the very readable
structured data format of YAML (http://www.yaml.org/). Earlier
examples show serializing to YAML.
=back
=head1 VISUALIZING SQL SCHEMAS
The visualization tools in SQLFairy can graphically represent the
tables, fields, datatypes and sizes, constraints, and foreign key
relationships in a very compact and intuitive format. This can be
very beneficial in understanding and document large or small schemas.
Two producers in SQLFairy will create pseudo-E/R (entity-relationship)
diagrams:
=over 4
=item * Diagram
The first visualization tool in SQLFairy, this producer uses libgd to
draw a picture of the schema. The tables are evenly distributed in
definition order running in columns (i.e., no graphing algorithms are
used), so the many of the lines showing the foreign key relationships
may cross over each other and the table boxes. Please read the
documentation of the "sqlt-diagram" script for all the options
available to this producer.
=item * GraphViz
The layout of the GraphViz producer is far superior to the Diagram
producer as it uses the Graphviz binary from Bell Labs to create very
professional-looking graphs. There are several different layout
algorithms and node shapes available. Please see the documentation of
the "sqlt-graph" script for more information.
=back
=head1 AUTOMATED CODE-GENERATION
Given that so many applications interact with SQL databases, it's no
wonder that people have automated code to deal with this interaction.
Class::DBI from CPAN is one such module that allows a developer to
describe the relationships between tables and fields in class
declarations and then generates all the SQL to interact (SELECT,
UPDATE, DELETE, INSERT statements) at runtime. Obviously, the schema
already describes itself, so it only makes sense that you should be
able to generate this kind of code directly from the schema. The
"ClassDBI" producer in SQLFairy does just this, creating a Perl module
that inherits from Class::DBI and sets up most of the code needed to
interact with the database. Here is an example of how to do this:
$ sqlt -f MySQL -t ClassDBI foo.sql > Foo.pm
Then simply edit Foo.pm as needed and include it in your code.
=head1 CREATING A DATA DUMPER SCRIPT
The Dumper producer creates a Perl script that can select the fields
in each table and then create "INSERT" statements for each record in
the database similar to the output generated by MySQL's "mysqldump"
program:
$ sqlt -f YAML -t Dumper --dumper-db-user guest \
> --dumper-db-pass p4ssw0rd --dumper-dsn dbi:mysql:FOO \
> foo.yaml > foo-dumper.pl
And then execute the resulting script to dump the data:
$ chmod +x foo-dumper.pl
$ ./foo-dumper.pl > foo-data.sql
The dumper script also has a number of options available. Execute the
script with the "--help" flag to read about them.
=head1 DOCUMENTING WITH SQL::TRANSLATOR
SQLFairy offers two producers to help document schemas:
=over 4
=item * HTML
This producer creates a single HTML document which uses HTML
formatting to describe the Schema objects and to create hyperlinks on
foreign key relationships. This can be a surprisingly useful
documentation aid as it creates a very readable format that allows one
to jump easily to specific tables and fields. It's also possible to
plugin your own CSS to further control the presentation of the HTML.
=item * POD
This is arguably not that useful of a producer by itself, but the
number of POD-conversion tools could be used to further transform the
POD into something more interesting. The schema is basically
represented in POD sections where tables are broken down into fields,
indices, constraints, foreign keys, etc.
=back
=head1 TEMPLATE-BASED MANIPULATION OF SCHEMA OBJECTS
All of the producers which create text output could have been coded
using a templating system to mix in the dynamic output with static
text. CPAN offers several diverse templating systems, but few are as
powerful as Template Toolkit (http://www.template-toolkit.org/). You
can easily create your own producer without writing any Perl code at
all simply by writing a template using Template Toolkit's syntax. The
template will be passed a reference to the Schema object briefly
described at the beginning of this document and mentioned many times
throughout. For example, you could create a template that simply
prints the name of each table and field that looks like this:
# file: schema.tt
[% FOREACH table IN schema.get_tables %]
Table: [% table.name %]
Fields:
[% FOREACH field IN table.get_fields -%]
[% field.name %]
[% END -%]
[% END %]
And then process it like so:
$ sqlt -f YAML -t TTSchema --template schema.tt foo.yaml
To create output like this:
Table: foo
Fields:
foo_id
foo_name
For more information on Template Toolkit, please install the
"Template" module and read the POD.
=head1 FINDING THE DIFFERENCES BETWEEN TWO SCHEMAS
As mentioned above, the "sqlt-diff" schema examines two schemas and
creates SQL schema modification statements that can be used to
transform the first schema into the second. The flag syntax is
somewhat quirky:
$ sqlt-diff foo-v1.sql=MySQL foo-v2.sql=Oracle > diff.sql
As demonstrated, the schemas need not even be from the same vendor,
though this is likely to produce some spurious results as
datatypes are not currently viewed equivalent unless they match
exactly, even if they would be converted to the same. For example,
MySQL's "integer" data type would be converted to Oracle's "number,"
but the differ isn't quite smart enough yet to figure this out. Also,
as the SQL to ALTER a field definition varies from database vendor to
vendor, these statements are made using just the keyword "CHANGE" and
will likely need to be corrected for the target database.
=head1 A UNIFIED GRAPHICAL INTERFACE
Seeing all the above options and scripts, you may be pining for a
single, graphical interface to handle all these transformations and
choices. This is exactly what the "sqlt.cgi" script provides. Simply
drop this script into your web server's CGI directory and enable the
execute bit and you can point your web browser to an HTML form which
provides a simple interface to all the SQLFairy parsers and producers.
=head1 PLUGIN YOUR OWN PARSERS AND PRODUCERS
Now that you have seen how the parsers and producers interact via the
Schema objects, you may wish to create your own versions to plugin.
Producers are probably the easier concept to grok, so let's cover that
first. By far the easiest way to create custom output is to use the
TTSchema producer in conjunction with a Template Toolkit template as
described earlier. However, you can also easily pass a reference to a
subroutine that SQL::Translator can call for the production of the
output. This subroutine will be passed a single argument of the
SQL::Translator object which you can use to access the Schema objects.
Please read the POD for SQL::Translator and SQL::Translator::Schema to
learn the methods you can call. Here is a very simple example:
#!/usr/bin/perl
use strict;
use SQL::Translator;
my $input = q[
create table foo (
foo_id int not null default '0' primary key,
foo_name varchar(30) not null default ''
);
create table bar (
bar_id int not null default '0' primary key,
bar_value varchar(100) not null default ''
);
];
my $t = SQL::Translator->new;
$t->parser('MySQL') or die $t->error;
$t->producer( \&produce ) or die $t->error;
my $output = $t->translate( \$input ) or die $t->error;
print $output;
sub produce {
my $tr = shift;
my $schema = $tr->schema;
my $output = '';
for my $t ( $schema->get_tables ) {
$output .= join('', "Table = ", $t->name, "\n");
}
return $output;
}
Executing this script produces the following:
$ ./my-producer.pl
Table = foo
Table = bar
A custom parser will be passed two arguments: the SQL::Translator
object and the data to be parsed. In this example, the schema will be
represented in a simple text format. Each line is a table definition
where the fields are separated by colons. The first field is the
table name and the following fields are column definitions where the
column name, data type and size are separated by spaces. The
specifics of the example are unimportant -- what is being demonstrated
is that you have to decide how to parse the incoming data and then
map the concepts in the data to the Schema object.
#!/usr/bin/perl
use strict;
use SQL::Translator;
my $input =
"foo:foo_id int 11:foo_name varchar 30\n" .
"bar:bar_id int 11:bar_value varchar 30"
;
my $t = SQL::Translator->new;
$t->parser( \&parser ) or die $t->error;
$t->producer('Oracle') or die $t->error;
my $output = $t->translate( \$input ) or die $t->error;
print $output;
sub parser {
my ( $tr, $data ) = @_;
my $schema = $tr->schema;
for my $line ( split( /\n/, $data ) ) {
my ( $table_name, @fields ) = split( /:/, $line );
my $table = $schema->add_table( name => $table_name )
or die $schema->error;
for ( @fields ) {
my ( $f_name, $type, $size ) = split;
$table->add_field(
name => $f_name,
data_type => $type,
size => $size,
) or die $table->error;
}
}
return 1;
}
And here is the output produced by this script:
--
-- Created by SQL::Translator::Producer::Oracle
-- Created on Wed Mar 31 15:43:30 2004
--
--
-- Table: foo
--
CREATE TABLE foo (
foo_id number(11),
foo_name varchar2(30)
);
--
-- Table: bar
--
CREATE TABLE bar (
bar_id number(11),
bar_value varchar2(30)
);
If you create a useful parser or producer, you are encouraged to
submit your work to the SQLFairy project!
=head1 PLUGIN TEMPLATE TOOLKIT PRODUCERS
You may find that the TTSchema producer doesn't give you enough control over
templating and you want to play with the Template config or add you own
variables. Or maybe you just have a really good template you want to submit to
SQLFairy :) If so, the SQL::Translator::Producer::TT::Base producer may be
just for you! Instead of working like a normal producer it provides a base
class so you can cheaply build new producer modules based on templates.
It's simplest use is when we just want to put a single template in its own
module. So to create a Foo producer we create a F file as
follows, putting our template in the __DATA__ section.
package Custom::Foo.pm;
use base qw/SQL::Translator::Producer::TT::Base/;
# Use our new class as the producer
sub produce { return __PACKAGE__->new( translator => shift )->run; };
__DATA__
[% FOREACH table IN schema.get_tables %]
Table: [% table.name %]
Fields:
[% FOREACH field IN table.get_fields -%]
[% field.name %]
[% END -%]
[% END %]
For that we get a producer called Custom::Foo that we can now call like a
normal producer (as long as the directory with F is in our @INC
path):
$ sqlt -f YAML -t Custom-Foo foo.yaml
The template gets variables of C and C to use in building
its output. You also get a number of methods you can override to hook into the
template generation.
B Allows you to set the config options used by the Template object.
The Template Toolkit provides a huge number of options which allow you to do all
sorts of magic (See L for details). This method
provides a hook into them by returning a hash of options for the Template. e.g.
Say you want to use the INTERPOLATE option to save some typing in your template;
sub tt_config { ( INTERPOLATE => 1 ); }
Another common use for this is to add you own filters to the template:
sub tt_config {(
INTERPOLATE => 1,
FILTERS => { foo_filter => \&foo_filter, }
);}
Another common extension is adding your own template variables. This is done
with B:
sub tt_vars { ( foo => "bar" ); }
What about using template files instead of DATA sections? You can already - if
you give a template on the command line your new producer will use that instead
of reading the DATA section:
$ sqlt -f YAML -t Custom-Foo --template foo.tt foo.yaml
This is useful as you can set up a producer that adds a set of filters and
variables that you can then use in templates given on the command line. (There
is also a tt_schema method to over ride if you need even finer control over the
source of your template). Note that if you leave out the DATA section all
together then your producer will require a template file name to be given.
See L for more details.
=head1 AUTHOR
Ken Y. Clark Ekclark@cpan.orgE.
SQL-Translator-1.65/lib/SQL/Translator/Utils.pm 0000644 0000000 0000000 00000040427 14551163724 021244 0 ustar 00root root 0000000 0000000 package SQL::Translator::Utils;
use strict;
use warnings;
use Digest::SHA qw( sha1_hex );
use File::Spec;
use Scalar::Util qw(blessed);
use Try::Tiny;
use Carp qw(carp croak);
use List::Util qw(any);
our $VERSION = '1.65';
use base qw(Exporter);
our @EXPORT_OK = qw(
debug normalize_name header_comment parse_list_arg truncate_id_uniquely
$DEFAULT_COMMENT parse_mysql_version parse_dbms_version
ddl_parser_instance batch_alter_table_statements
uniq throw ex2err carp_ro
normalize_quote_options
);
use constant COLLISION_TAG_LENGTH => 8;
our $DEFAULT_COMMENT = '--';
sub debug {
my ($pkg, $file, $line, $sub) = caller(0);
{
no strict qw(refs);
return unless ${"$pkg\::DEBUG"};
}
$sub =~ s/^$pkg\:://;
while (@_) {
my $x = shift;
chomp $x;
$x =~ s/\bPKG\b/$pkg/g;
$x =~ s/\bLINE\b/$line/g;
$x =~ s/\bSUB\b/$sub/g;
#warn '[' . $x . "]\n";
print STDERR '[' . $x . "]\n";
}
}
sub normalize_name {
my $name = shift or return '';
# The name can only begin with a-zA-Z_; if there's anything
# else, prefix with _
$name =~ s/^([^a-zA-Z_])/_$1/;
# anything other than a-zA-Z0-9_ in the non-first position
# needs to be turned into _
$name =~ tr/[a-zA-Z0-9_]/_/c;
# All duplicated _ need to be squashed into one.
$name =~ tr/_/_/s;
# Trim a trailing _
$name =~ s/_$//;
return $name;
}
sub normalize_quote_options {
my $config = shift;
my $quote;
if (defined $config->{quote_identifiers}) {
$quote = $config->{quote_identifiers};
for (qw/quote_table_names quote_field_names/) {
carp "Ignoring deprecated parameter '$_', since 'quote_identifiers' is supplied"
if defined $config->{$_};
}
}
# Legacy one set the other is not
elsif (defined $config->{'quote_table_names'} xor defined $config->{'quote_field_names'}) {
if (defined $config->{'quote_table_names'}) {
carp
"Explicitly disabling the deprecated 'quote_table_names' implies disabling 'quote_identifiers' which in turn implies disabling 'quote_field_names'"
unless $config->{'quote_table_names'};
$quote = $config->{'quote_table_names'} ? 1 : 0;
} else {
carp
"Explicitly disabling the deprecated 'quote_field_names' implies disabling 'quote_identifiers' which in turn implies disabling 'quote_table_names'"
unless $config->{'quote_field_names'};
$quote = $config->{'quote_field_names'} ? 1 : 0;
}
}
# Legacy both are set
elsif (defined $config->{'quote_table_names'}) {
croak 'Setting quote_table_names and quote_field_names to conflicting values is no longer supported'
if ($config->{'quote_table_names'} xor $config->{'quote_field_names'});
$quote = $config->{'quote_table_names'} ? 1 : 0;
}
return $quote;
}
sub header_comment {
my $producer = shift || caller;
my $comment_char = shift;
my $now = scalar localtime;
$comment_char = $DEFAULT_COMMENT
unless defined $comment_char;
my $header_comment = <<"HEADER_COMMENT";
${comment_char}
${comment_char} Created by $producer
${comment_char} Created on $now
${comment_char}
HEADER_COMMENT
# Any additional stuff passed in
for my $additional_comment (@_) {
$header_comment .= "${comment_char} ${additional_comment}\n";
}
return $header_comment;
}
sub parse_list_arg {
my $list = UNIVERSAL::isa($_[0], 'ARRAY') ? shift : [@_];
#
# This protects stringification of references.
#
if (any { ref $_ } @$list) {
return $list;
}
#
# This processes string-like arguments.
#
else {
return [
map { s/^\s+|\s+$//g; $_ }
map { split /,/ }
grep { defined && length } @$list
];
}
}
sub truncate_id_uniquely {
my ($desired_name, $max_symbol_length) = @_;
return $desired_name
unless defined $desired_name && length $desired_name > $max_symbol_length;
my $truncated_name = substr $desired_name, 0, $max_symbol_length - COLLISION_TAG_LENGTH - 1;
# Hex isn't the most space-efficient, but it skirts around allowed
# charset issues
my $digest = sha1_hex($desired_name);
my $collision_tag = substr $digest, 0, COLLISION_TAG_LENGTH;
return $truncated_name . '_' . $collision_tag;
}
sub parse_mysql_version {
my ($v, $target) = @_;
return undef unless $v;
$target ||= 'perl';
my @vers;
# X.Y.Z style
if ($v =~ / ^ (\d+) \. (\d{1,3}) (?: \. (\d{1,3}) )? $ /x) {
push @vers, $1, $2, $3;
}
# XYYZZ (mysql) style
elsif ($v =~ / ^ (\d) (\d{2}) (\d{2}) $ /x) {
push @vers, $1, $2, $3;
}
# XX.YYYZZZ (perl) style or simply X
elsif ($v =~ / ^ (\d+) (?: \. (\d{3}) (\d{3}) )? $ /x) {
push @vers, $1, $2, $3;
} else {
#how do I croak sanely here?
die "Unparseable MySQL version '$v'";
}
if ($target eq 'perl') {
return sprintf('%d.%03d%03d', map { $_ || 0 } (@vers));
} elsif ($target eq 'mysql') {
return sprintf('%d%02d%02d', map { $_ || 0 } (@vers));
} else {
#how do I croak sanely here?
die "Unknown version target '$target'";
}
}
sub parse_dbms_version {
my ($v, $target) = @_;
return undef unless $v;
my @vers;
# X.Y.Z style
if ($v =~ / ^ (\d+) \. (\d{1,3}) (?: \. (\d{1,3}) )? $ /x) {
push @vers, $1, $2, $3;
}
# XX.YYYZZZ (perl) style or simply X
elsif ($v =~ / ^ (\d+) (?: \. (\d{3}) (\d{3}) )? $ /x) {
push @vers, $1, $2, $3;
} else {
#how do I croak sanely here?
die "Unparseable database server version '$v'";
}
if ($target eq 'perl') {
return sprintf('%d.%03d%03d', map { $_ || 0 } (@vers));
} elsif ($target eq 'native') {
return join '.' => grep defined, @vers;
} else {
#how do I croak sanely here?
die "Unknown version target '$target'";
}
}
#my ($parsers_libdir, $checkout_dir);
sub ddl_parser_instance {
my $type = shift;
# it may differ from our caller, even though currently this is not the case
eval "require SQL::Translator::Parser::$type"
or die "Unable to load grammar-spec container SQL::Translator::Parser::$type:\n$@";
# handle DB2 in a special way, since the grammar source was lost :(
if ($type eq 'DB2') {
require SQL::Translator::Parser::DB2::Grammar;
return SQL::Translator::Parser::DB2::Grammar->new;
}
require Parse::RecDescent;
return Parse::RecDescent->new(do {
no strict 'refs';
${"SQL::Translator::Parser::${type}::GRAMMAR"}
|| die "No \$SQL::Translator::Parser::${type}::GRAMMAR defined, unable to instantiate PRD parser\n";
});
# this is disabled until RT#74593 is resolved
=begin sadness
unless ($parsers_libdir) {
# are we in a checkout?
if ($checkout_dir = _find_co_root()) {
$parsers_libdir = File::Spec->catdir($checkout_dir, 'share', 'PrecompiledParsers');
}
else {
require File::ShareDir;
$parsers_libdir = File::Spec->catdir(
File::ShareDir::dist_dir('SQL-Translator'),
'PrecompiledParsers'
);
}
unshift @INC, $parsers_libdir;
}
my $precompiled_mod = "Parse::RecDescent::DDL::SQLT::$type";
# FIXME FIXME FIXME
# Parse::RecDescent has horrible architecture where each precompiled parser
# instance shares global state with all its siblings
# What we do here is gross, but scarily efficient - the parser compilation
# is much much slower than an unload/reload cycle
require Class::Unload;
Class::Unload->unload($precompiled_mod);
# There is also a sub-namespace that P::RD uses, but simply unsetting
# $^W to stop redefine warnings seems to be enough
#Class::Unload->unload("Parse::RecDescent::$precompiled_mod");
eval "local \$^W; require $precompiled_mod" or do {
if ($checkout_dir) {
die "Unable to find precompiled grammar for $type - run Makefile.PL to generate it\n";
}
else {
die "Unable to load precompiled grammar for $type... this is not supposed to happen if you are not in a checkout, please file a bugreport:\n$@"
}
};
my $grammar_spec_fn = $INC{"SQL/Translator/Parser/$type.pm"};
my $precompiled_fn = $INC{"Parse/RecDescent/DDL/SQLT/$type.pm"};
if (
(stat($grammar_spec_fn))[9]
>
(stat($precompiled_fn))[9]
) {
die (
"Grammar spec '$grammar_spec_fn' is newer than precompiled parser '$precompiled_fn'"
. ($checkout_dir
? " - run Makefile.PL to regenerate stale versions\n"
: "... this is not supposed to happen if you are not in a checkout, please file a bugreport\n"
)
);
}
return $precompiled_mod->new;
=end sadness
=cut
}
# Try to determine the root of a checkout/untar if possible
# or return undef
sub _find_co_root {
my @mod_parts = split /::/, (__PACKAGE__ . '.pm');
my $rel_path = join('/', @mod_parts); # %INC stores paths with / regardless of OS
return undef unless ($INC{$rel_path});
# a bit convoluted, but what we do here essentially is:
# - get the file name of this particular module
# - do 'cd ..' as many times as necessary to get to lib/SQL/Translator/../../..
my $root = (File::Spec::Unix->splitpath($INC{$rel_path}))[1];
for (1 .. @mod_parts) {
$root = File::Spec->catdir($root, File::Spec->updir);
}
return (-f File::Spec->catfile($root, 'Makefile.PL'))
? $root
: undef;
}
{
package SQL::Translator::Utils::Error;
use overload
'""' => sub { ${ $_[0] } },
fallback => 1;
sub new {
my ($class, $msg) = @_;
bless \$msg, $class;
}
}
sub uniq {
my (%seen, $seen_undef, $numeric_preserving_copy);
grep { not(defined $_ ? $seen{ $numeric_preserving_copy = $_ }++ : $seen_undef++) } @_;
}
sub throw {
die SQL::Translator::Utils::Error->new($_[0]);
}
sub ex2err {
my ($orig, $self, @args) = @_;
return try {
$self->$orig(@args);
} catch {
die $_ unless blessed($_) && $_->isa("SQL::Translator::Utils::Error");
$self->error("$_");
};
}
sub carp_ro {
my ($name) = @_;
return sub {
my ($orig, $self) = (shift, shift);
carp "'$name' is a read-only accessor" if @_;
return $self->$orig;
};
}
sub batch_alter_table_statements {
my ($diff_hash, $options, @meths) = @_;
@meths = qw(
rename_table
alter_drop_constraint
alter_drop_index
drop_field
add_field
alter_field
rename_field
alter_create_index
alter_create_constraint
alter_table
) unless @meths;
my $package = caller;
return map {
my $meth = $package->can($_) or die "$package cant $_";
map { $meth->(ref $_ eq 'ARRAY' ? @$_ : $_, $options) } @{ $diff_hash->{$_} }
} grep { @{ $diff_hash->{$_} || [] } } @meths;
}
1;
=pod
=head1 NAME
SQL::Translator::Utils - SQL::Translator Utility functions
=head1 SYNOPSIS
use SQL::Translator::Utils qw(debug);
debug("PKG: Bad things happened");
=head1 DESCSIPTION
C contains utility functions designed to be
used from the other modules within the C modules.
Nothing is exported by default.
=head1 EXPORTED FUNCTIONS AND CONSTANTS
=head2 debug
C takes 0 or more messages, which will be sent to STDERR using
C. Occurances of the strings I, I, and I
will be replaced by the calling package, subroutine, and line number,
respectively, as reported by C.
For example, from within C in F, at line 666:
debug("PKG: Error reading file at SUB/LINE");
Will warn
[SQL::Translator: Error reading file at foo/666]
The entire message is enclosed within C<[> and C<]> for visual clarity
when STDERR is intermixed with STDOUT.
=head2 normalize_name
C takes a string and ensures that it is suitable for
use as an identifier. This means: ensure that it starts with a letter
or underscore, and that the rest of the string consists of only
letters, numbers, and underscores. A string that begins with
something other than [a-zA-Z] will be prefixer with an underscore, and
all other characters in the string will be replaced with underscores.
Finally, a trailing underscore will be removed, because that's ugly.
normalize_name("Hello, world");
Produces:
Hello_world
A more useful example, from the C test
suite:
normalize_name("silly field (with random characters)");
returns:
silly_field_with_random_characters
=head2 header_comment
Create the header comment. Takes 1 mandatory argument (the producer
classname), an optional comment character (defaults to $DEFAULT_COMMENT),
and 0 or more additional comments, which will be appended to the header,
prefixed with the comment character. If additional comments are provided,
then a comment string must be provided ($DEFAULT_COMMENT is exported for
this use). For example, this:
package My::Producer;
use SQL::Translator::Utils qw(header_comment $DEFAULT_COMMENT);
print header_comment(__PACKAGE__,
$DEFAULT_COMMENT,
"Hi mom!");
produces:
--
-- Created by My::Prodcuer
-- Created on Fri Apr 25 06:56:02 2003
--
-- Hi mom!
--
Note the gratuitous spacing.
=head2 parse_list_arg
Takes a string, list or arrayref (all of which could contain
comma-separated values) and returns an array reference of the values.
All of the following will return equivalent values:
parse_list_arg('id');
parse_list_arg('id', 'name');
parse_list_arg( 'id, name' );
parse_list_arg( [ 'id', 'name' ] );
parse_list_arg( qw[ id name ] );
=head2 truncate_id_uniquely
Takes a string ($desired_name) and int ($max_symbol_length). Truncates
$desired_name to $max_symbol_length by including part of the hash of
the full name at the end of the truncated name, giving a high
probability that the symbol will be unique. For example,
truncate_id_uniquely( 'a' x 100, 64 )
truncate_id_uniquely( 'a' x 99 . 'b', 64 );
truncate_id_uniquely( 'a' x 99, 64 )
Will give three different results; specifically:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_7f900025
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_6191e39a
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_8cd96af2
=head2 $DEFAULT_COMMENT
This is the default comment string, '--' by default. Useful for
C.
=head2 parse_mysql_version
Used by both L and
L in order to provide a
consistent format for both C<< parser_args->{mysql_parser_version} >> and
C<< producer_args->{mysql_version} >> respectively. Takes any of the following
version specifications:
5.0.3
4.1
3.23.2
5
5.001005 (perl style)
30201 (mysql style)
=head2 parse_dbms_version
Takes a version string (X.Y.Z) or perl style (XX.YYYZZZ) and a target ('perl'
or 'native') transforms the string to the given target style.
to
=head2 throw
Throws the provided string as an object that will stringify back to the
original string. This stops it from being mangled by L's C
code.
=head2 ex2err
Wraps an attribute accessor to catch any exception raised using
L and store them in C<< $self->error() >>, finally returning
undef. A reference to this function can be passed directly to
L.
around foo => \&ex2err;
around bar => sub {
my ($orig, $self) = (shift, shift);
return ex2err($orig, $self, @_) if @_;
...
};
=head2 carp_ro
Takes a field name and returns a reference to a function can be used
L a read-only accessor to make it L
instead of die when passed an argument.
=head2 batch_alter_table_statements
Takes diff and argument hashes as passed to
L
and an optional list of producer functions to call on the calling package.
Returns the list of statements returned by the producer functions.
If no producer functions are specified, the following functions in the
calling package are called:
=over
=item 1. rename_table
=item 2. alter_drop_constraint
=item 3. alter_drop_index
=item 4. drop_field
=item 5. add_field
=item 5. alter_field
=item 6. rename_field
=item 7. alter_create_index
=item 8. alter_create_constraint
=item 9. alter_table
=back
If the corresponding array in the hash has any elements, but the
caller doesn't implement that function, an exception is thrown.
=head1 AUTHORS
Darren Chamberlain Edarren@cpan.orgE,
Ken Y. Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Types.pm 0000644 0000000 0000000 00000004631 14541265163 021244 0 ustar 00root root 0000000 0000000 package SQL::Translator::Types;
use warnings;
use strict;
=head1 NAME
SQL::Translator::Types - Type checking functions
=head1 SYNOPSIS
package Foo;
use Moo;
use SQL::Translator::Types qw(schema_obj enum);
has foo => ( is => 'rw', isa => schema_obj('Trigger') );
has bar => ( is => 'rw', isa => enum([qw(baz quux quuz)], {
msg => "Invalid value for bar: '%s'", icase => 1,
});
=head1 DESCRIPTIONS
This module exports functions that return coderefs suitable for L
C type checks.
Errors are reported using L.
=cut
use SQL::Translator::Utils qw(throw);
use Scalar::Util qw(blessed);
use Exporter qw(import);
our @EXPORT_OK = qw(schema_obj enum);
=head1 FUNCTIONS
=head2 schema_obj($type)
Returns a coderef that checks that its arguments is an object of the
class C<< SQL::Translator::Schema::I<$type> >>.
=cut
sub schema_obj {
my ($class) = @_;
my $name = lc $class;
$class = 'SQL::Translator::Schema' . ($class eq 'Schema' ? '' : "::$class");
return sub {
throw("Not a $name object")
unless blessed($_[0])
and $_[0]->isa($class);
};
}
=head2 enum(\@strings, [$msg | \%parameters])
Returns a coderef that checks that the argument is one of the provided
C<@strings>.
=head3 Parameters
=over
=item msg
L string for the error message.
If no other parameters are needed, this can be provided on its own,
instead of the C<%parameters> hashref.
The invalid value is passed as the only argument.
Defaults to C.
=item icase
If true, folds the values to lower case before checking for equality.
=item allow_undef
If true, allow C in addition to the specified strings.
=item allow_false
If true, allow any false value in addition to the specified strings.
=back
=cut
sub enum {
my ($values, $args) = @_;
$args ||= {};
$args = { msg => $args } unless ref($args) eq 'HASH';
my $icase = !!$args->{icase};
my %values = map { ($icase ? lc : $_) => undef } @{$values};
my $msg = $args->{msg} || "Invalid value: '%s'";
my $extra_test
= $args->{allow_undef} ? sub { defined $_[0] }
: $args->{allow_false} ? sub { !!$_[0] }
: undef;
return sub {
my $val = $icase ? lc $_[0] : $_[0];
throw(sprintf($msg, $val))
if (!defined($extra_test) || $extra_test->($val))
&& !exists $values{$val};
};
}
1;
SQL-Translator-1.65/lib/SQL/Translator/Role/ 0000755 0000000 0000000 00000000000 14551164244 020476 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Role/BuildArgs.pm 0000644 0000000 0000000 00000001223 14541265164 022710 0 ustar 00root root 0000000 0000000 package SQL::Translator::Role::BuildArgs;
=head1 NAME
SQL::Translator::Role::BuildArgs - Remove undefined constructor arguments
=head1 SYNOPSIS
package Foo;
use Moo;
with qw(SQL::Translator::Role::BuildArgs);
=head1 DESCRIPTION
This L wraps BUILDARGS to remove C constructor
arguments for backwards compatibility with the old L-based
L.
=cut
use Moo::Role;
around BUILDARGS => sub {
my $orig = shift;
my $self = shift;
my $args = $self->$orig(@_);
foreach my $arg (keys %{$args}) {
delete $args->{$arg} unless defined($args->{$arg});
}
return $args;
};
1;
SQL-Translator-1.65/lib/SQL/Translator/Role/Error.pm 0000644 0000000 0000000 00000003002 14541265164 022122 0 ustar 00root root 0000000 0000000 package SQL::Translator::Role::Error;
=head1 NAME
SQL::Translator::Role::Error - Error setter/getter for objects and classes
=head1 SYNOPSIS
In the class consuming the role:
package Foo;
use Moo;
with qw(SQL::Translator::Role::Error);
sub foo {
...
return $self->error("Something failed")
unless $some_condition;
...
}
In code using the class:
Foo->foo or die Foo->error;
# or
$foo->foo or die $foo->error;
=head1 DESCRIPTION
This L provides a method for getting and setting error on a
class or object.
=cut
use Moo::Role;
use Sub::Quote qw(quote_sub);
has _ERROR => (
is => 'rw',
accessor => 'error',
init_arg => undef,
default => quote_sub(q{ '' }),
);
=head1 METHODS
=head2 $object_or_class->error([$message])
If called with an argument, sets the error message and returns undef,
otherwise returns the message.
As an implementation detail, for compatibility with L, the
message is stored in C<< $object->{_ERROR} >> or C<< $Class::ERROR >>,
depending on whether the invocant is an object.
=cut
around error => sub {
my ($orig, $self) = (shift, shift);
# Emulate horrible Class::Base API
unless (ref($self)) {
my $errref = do { no strict 'refs'; \${"${self}::ERROR"} };
return $$errref unless @_;
$$errref = $_[0];
return undef;
}
return $self->$orig unless @_;
$self->$orig(@_);
return undef;
};
=head1 SEE ALSO
=over
=item *
L
=back
=cut
1;
SQL-Translator-1.65/lib/SQL/Translator/Role/Debug.pm 0000644 0000000 0000000 00000001414 14541265164 022064 0 ustar 00root root 0000000 0000000 package SQL::Translator::Role::Debug;
use Moo::Role;
use Sub::Quote qw(quote_sub);
has _DEBUG => (
is => 'rw',
accessor => 'debugging',
init_arg => 'debugging',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
lazy => 1,
builder => 1,
);
sub _build__DEBUG {
my ($self) = @_;
my $class = ref $self;
no strict 'refs';
return ${"${class}::DEBUG"};
}
around debugging => sub {
my ($orig, $self) = (shift, shift);
# Emulate horrible Class::Base API
unless (ref $self) {
my $dbgref = do { no strict 'refs'; \${"${self}::DEBUG"} };
$$dbgref = $_[0] if @_;
return $$dbgref;
}
return $self->$orig(@_);
};
sub debug {
my $self = shift;
return unless $self->debugging;
print STDERR '[', (ref $self || $self), '] ', @_, "\n";
}
1;
SQL-Translator-1.65/lib/SQL/Translator/Role/ListAttr.pm 0000644 0000000 0000000 00000005507 14541265164 022613 0 ustar 00root root 0000000 0000000 package SQL::Translator::Role::ListAttr;
use warnings;
use strict;
=head1 NAME
SQL::Translator::Role::ListAttr - context-sensitive list attributes
=head1 SYNOPSIS
package Foo;
use Moo;
use SQL::Translator::Role::ListAttr;
with ListAttr foo => ( uniq => 1, append => 1 );
=head1 DESCRIPTION
This package provides a variable L for context-sensitive list
attributes.
=cut
use SQL::Translator::Utils qw(parse_list_arg ex2err uniq);
use Sub::Quote qw(quote_sub);
use Package::Variant (
importing => {
'Moo::Role' => [],
},
subs => [qw(has around)],
);
=head1 FUNCTIONS
=head2 ListAttr $name => %parameters;
Returns a L providing an arrayref attribute named C<$name>,
and wrapping the accessor to provide context-sensitivity both for
setting and getting. If no C or C is provided, the
default value is the empty list.
On setting, the arguments are parsed using
L, and the accessor will return
an array reference or a list, depending on context.
=head3 Parameters
=over
=item append
If true, the setter will append arguments to the existing ones, rather
than replacing them.
=item uniq
If true, duplicate items will be removed, keeping the first one seen.
=item may_throw
If accessing the attribute might L
an exception (e.g. from a C or C check), this should be
set to make the accessor store the exception using
L and return undef.
=item undef_if_empty
If true, and the list is empty, the accessor will return C
instead of a reference to an empty in scalar context.
=back
Unknown parameters are passed through to the L call for
the attribute.
=cut
sub make_variant {
my ($class, $target_package, $name, %arguments) = @_;
my $may_throw = delete $arguments{may_throw};
my $undef_if_empty = delete $arguments{undef_if_empty};
my $append = delete $arguments{append};
my $coerce
= delete $arguments{uniq}
? sub { [ uniq @{ parse_list_arg($_[0]) } ] }
: \&parse_list_arg;
has($name => (
is => 'rw',
(!$arguments{builder} ? (default => quote_sub(q{ [] }),) : ()),
coerce => $coerce,
%arguments,
));
around(
$name => sub {
my ($orig, $self) = (shift, shift);
my $list = parse_list_arg(@_);
$self->$orig([ @{ $append ? $self->$orig : [] }, @$list ])
if @$list;
my $return;
if ($may_throw) {
$return = ex2err($orig, $self) or return;
} else {
$return = $self->$orig;
}
my $scalar_return = !@{$return} && $undef_if_empty ? undef : $return;
return wantarray ? @{$return} : $scalar_return;
}
);
}
=head1 SEE ALSO
=over
=item L
=item L
=back
=cut
1;
SQL-Translator-1.65/lib/SQL/Translator/Producer.pm 0000644 0000000 0000000 00000005241 14551163724 021722 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer;
use strict;
use warnings;
use Scalar::Util ();
our $VERSION = '1.65';
sub produce {""}
# Do not rely on this if you are not bundled with SQL::Translator.
# -- rjbs, 2008-09-30
## $exceptions contains an arrayref of paired values
## Each pair contains a pattern match or string, and a value to be used as
## the default if matched.
## They are special per Producer, and provide support for the old 'now()'
## default value exceptions
sub _apply_default_value {
my ($self, $field, $field_ref, $exceptions) = @_;
my $default = $field->default_value;
return if !defined $default;
if ($exceptions and !ref $default) {
for (my $i = 0; $i < @$exceptions; $i += 2) {
my ($pat, $val) = @$exceptions[ $i, $i + 1 ];
if (ref $pat and $default =~ $pat) {
$default = $val;
last;
} elsif (lc $default eq lc $pat) {
$default = $val;
last;
}
}
}
my $type = lc $field->data_type;
my $is_numeric_datatype
= ($type =~ /^(?:(?:big|medium|small|tiny)?int(?:eger)?|decimal|double|float|num(?:ber|eric)?|real)$/);
if (ref $default) {
$$field_ref .= " DEFAULT $$default";
} elsif ($is_numeric_datatype && Scalar::Util::looks_like_number($default)) {
# we need to check the data itself in addition to the datatype, for basic safety
$$field_ref .= " DEFAULT $default";
} else {
$default = $self->_quote_string($default);
$$field_ref .= " DEFAULT $default";
}
}
sub _quote_string {
my ($self, $string) = @_;
$string =~ s/'/''/g;
return qq{'$string'};
}
1;
# -------------------------------------------------------------------
# A burnt child loves the fire.
# Oscar Wilde
# -------------------------------------------------------------------
=pod
=head1 NAME
SQL::Translator::Producer - describes how to write a producer
=head1 DESCRIPTION
Producer modules designed to be used with SQL::Translator need to
implement a single function, called B. B will be
called with the SQL::Translator object from which it is expected to
retrieve the SQL::Translator::Schema object which has been populated
by the parser. It is expected to return a string.
=head1 METHODS
=over 4
=item produce
=item create_table($table)
=item create_field($field)
=item create_view($view)
=item create_index($index)
=item create_constraint($constraint)
=item create_trigger($trigger)
=item alter_field($from_field, $to_field)
=item add_field($table, $new_field)
=item drop_field($table, $old_field)
=back
=head1 AUTHORS
Darren Chamberlain Edarren@cpan.orgE,
Ken Y. Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
perl(1), SQL::Translator, SQL::Translator::Schema.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/ 0000755 0000000 0000000 00000000000 14551164244 021360 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Producer/Oracle.pm 0000644 0000000 0000000 00000062253 14551163724 023135 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::Oracle;
=head1 NAME
SQL::Translator::Producer::Oracle - Oracle SQL producer
=head1 SYNOPSIS
use SQL::Translator;
my $t = SQL::Translator->new( parser => '...', producer => 'Oracle' );
print $translator->translate( $file );
=head1 DESCRIPTION
Creates an SQL DDL suitable for Oracle.
=head1 producer_args
=over
=item delay_constraints
This option remove the primary key and other key constraints from the
CREATE TABLE statement and adds ALTER TABLEs at the end with it.
=item quote_field_names
Controls whether quotes are being used around column names in generated DDL.
=item quote_table_names
Controls whether quotes are being used around table, sequence and trigger names in
generated DDL.
=back
=head1 NOTES
=head2 Autoincremental primary keys
This producer uses sequences and triggers to autoincrement primary key
columns, if necessary. SQLPlus and DBI expect a slightly different syntax
of CREATE TRIGGER statement. You might have noticed that this
producer returns a scalar containing all statements concatenated by
newlines or an array of single statements depending on the context
(scalar, array) it has been called in.
SQLPlus expects following trigger syntax:
CREATE OR REPLACE TRIGGER ai_person_id
BEFORE INSERT ON person
FOR EACH ROW WHEN (
new.id IS NULL OR new.id = 0
)
BEGIN
SELECT sq_person_id.nextval
INTO :new.id
FROM dual;
END;
/
Whereas if you want to create the same trigger using L, you need
to omit the last slash:
my $dbh = DBI->connect('dbi:Oracle:mysid', 'scott', 'tiger');
$dbh->do("
CREATE OR REPLACE TRIGGER ai_person_id
BEFORE INSERT ON person
FOR EACH ROW WHEN (
new.id IS NULL OR new.id = 0
)
BEGIN
SELECT sq_person_id.nextval
INTO :new.id
FROM dual;
END;
");
If you call this producer in array context, we expect you want to process
the returned array of statements using L like
L does.
To get this working we removed the slash in those statements in version
0.09002 of L when called in array context. In scalar
context the slash will be still there to ensure compatibility with SQLPlus.
=cut
use strict;
use warnings;
our ($DEBUG, $WARN);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use base 'SQL::Translator::Producer';
use SQL::Translator::Schema::Constants;
use SQL::Translator::Utils qw(debug header_comment);
use Data::Dumper;
my %translate = (
#
# MySQL types
#
bigint => 'number',
double => 'float',
decimal => 'number',
float => 'float',
int => 'number',
integer => 'number',
mediumint => 'number',
smallint => 'number',
tinyint => 'number',
char => 'char',
varchar => 'varchar2',
tinyblob => 'blob',
blob => 'blob',
mediumblob => 'blob',
longblob => 'blob',
tinytext => 'varchar2',
text => 'clob',
longtext => 'clob',
mediumtext => 'clob',
enum => 'varchar2',
set => 'varchar2',
date => 'date',
datetime => 'date',
time => 'date',
timestamp => 'date',
year => 'date',
#
# PostgreSQL types
#
numeric => 'number',
'double precision' => 'number',
serial => 'number',
bigserial => 'number',
money => 'number',
character => 'char',
'character varying' => 'varchar2',
bytea => 'BLOB',
interval => 'number',
boolean => 'number',
point => 'number',
line => 'number',
lseg => 'number',
box => 'number',
path => 'number',
polygon => 'number',
circle => 'number',
cidr => 'number',
inet => 'varchar2',
macaddr => 'varchar2',
bit => 'number',
'bit varying' => 'number',
#
# Oracle types
#
number => 'number',
varchar2 => 'varchar2',
long => 'clob',
);
#
# Oracle 8/9 max size of data types from:
# http://www.ss64.com/orasyntax/datatypes.html
#
my %max_size = (
char => 2000,
float => 126,
nchar => 2000,
nvarchar2 => 4000,
number => [ 38, 127 ],
raw => 2000,
varchar => 4000, # only synonym for varchar2
varchar2 => 4000,
);
my $max_id_length = 30;
my %used_identifiers = ();
my %global_names;
my %truncated;
# Quote used to escape table, field, sequence and trigger names
my $quote_char = '"';
sub produce {
my $translator = shift;
$DEBUG = $translator->debug;
$WARN = $translator->show_warnings || 0;
my $no_comments = $translator->no_comments;
my $add_drop_table = $translator->add_drop_table;
my $schema = $translator->schema;
my $oracle_version = $translator->producer_args->{oracle_version} || 0;
my $delay_constraints = $translator->producer_args->{delay_constraints};
my ($output, $create, @table_defs, @fk_defs, @trigger_defs, @index_defs, @constraint_defs);
debug("ORA: Beginning production");
$create .= header_comment unless ($no_comments);
my $qt = 1 if $translator->quote_table_names;
my $qf = 1 if $translator->quote_field_names;
if ($translator->parser_type =~ /mysql/i) {
$create
.= "-- We assume that default NLS_DATE_FORMAT has been changed\n"
. "-- but we set it here anyway to be self-consistent.\n"
unless $no_comments;
$create .= "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\n\n";
}
for my $table ($schema->get_tables) {
debug("ORA: Producing for table " . $table->name);
my ($table_def, $fk_def, $trigger_def, $index_def, $constraint_def) = create_table(
$table,
{
add_drop_table => $add_drop_table,
show_warnings => $WARN,
no_comments => $no_comments,
delay_constraints => $delay_constraints,
quote_table_names => $qt,
quote_field_names => $qf,
}
);
push @table_defs, @$table_def;
push @fk_defs, @$fk_def;
push @trigger_defs, @$trigger_def;
push @index_defs, @$index_def;
push @constraint_defs, @$constraint_def;
}
my (@view_defs);
foreach my $view ($schema->get_views) {
my ($view_def) = create_view(
$view,
{
add_drop_view => $add_drop_table,
quote_table_names => $qt,
}
);
push @view_defs, @$view_def;
}
if (wantarray) {
return defined $create ? $create : (), @table_defs, @view_defs,
@fk_defs, @trigger_defs, @index_defs, @constraint_defs;
} else {
$create .= join(";\n\n", @table_defs, @view_defs, @fk_defs, @index_defs, @constraint_defs);
$create .= ";\n\n";
# If wantarray is not set we have to add "/" in this statement
# DBI->do() needs them omitted
# triggers may NOT end with a semicolon but a "/" instead
$create .= "$_/\n\n" for @trigger_defs;
return $create;
}
}
sub create_table {
my ($table, $options) = @_;
my $qt = $options->{quote_table_names};
my $qf = $options->{quote_field_names};
my $table_name = $table->name;
my $table_name_q = quote($table_name, $qt);
my $item = '';
my $drop;
my (@create, @field_defs, @constraint_defs, @fk_defs, @trigger_defs);
push @create, "--\n-- Table: $table_name\n--"
unless $options->{no_comments};
push @create, qq[DROP TABLE $table_name_q CASCADE CONSTRAINTS]
if $options->{add_drop_table};
my (%field_name_scope, @field_comments);
for my $field ($table->get_fields) {
debug("ORA: Creating field " . $field->name . "(" . $field->data_type . ")");
my ($field_create, $field_defs, $trigger_defs, $field_comments)
= create_field($field, $options, \%field_name_scope);
push @create, @$field_create if ref $field_create;
push @field_defs, @$field_defs if ref $field_defs;
push @trigger_defs, @$trigger_defs if ref $trigger_defs;
push @field_comments, @$field_comments if ref $field_comments;
}
#
# Table options
#
my @table_options;
for my $opt ($table->options) {
if (ref $opt eq 'HASH') {
my ($key, $value) = each %$opt;
if (ref $value eq 'ARRAY') {
push @table_options,
"$key\n(\n"
. join(
"\n", map {" $_->[0]\t$_->[1]"}
map { [ each %$_ ] } @$value
) . "\n)";
} elsif (!defined $value) {
push @table_options, $key;
} else {
push @table_options, "$key $value";
}
}
}
#
# Table constraints
#
for my $c ($table->get_constraints) {
my $constr = create_constraint($c, $options);
if ($constr) {
if ($c->type eq FOREIGN_KEY) { # FK defs always come later as alters
push @fk_defs, sprintf("ALTER TABLE %s ADD %s", $table_name_q, $constr);
} else {
push @constraint_defs, $constr;
}
}
}
#
# Index Declarations
#
my @index_defs = ();
for my $index ($table->get_indices) {
my $index_name = $index->name || '';
my $index_type = $index->type || NORMAL;
my @fields = map { quote($_, $qf) } $index->fields;
next unless @fields;
debug("ORA: Creating $index_type index on fields (" . join(', ', @fields) . ") named $index_name");
my @index_options;
for my $opt ($index->options) {
if (ref $opt eq 'HASH') {
my ($key, $value) = each %$opt;
if (ref $value eq 'ARRAY') {
push @table_options,
"$key\n(\n"
. join(
"\n", map {" $_->[0]\t$_->[1]"}
map { [ each %$_ ] } @$value
) . "\n)";
} elsif (!defined $value) {
push @index_options, $key;
} else {
push @index_options, "$key $value";
}
}
}
my $index_options = @index_options ? "\n" . join("\n", @index_options) : '';
if ($index_type eq PRIMARY_KEY) {
$index_name
= $index_name
? mk_name($index_name)
: mk_name($table_name, 'pk');
$index_name = quote($index_name, $qf);
push @field_defs, 'CONSTRAINT ' . $index_name . ' PRIMARY KEY ' . '(' . join(', ', @fields) . ')';
} elsif ($index_type eq NORMAL or $index_type eq UNIQUE) {
push @index_defs, create_index($index, $options, $index_options);
} else {
warn "Unknown index type ($index_type) on table $table_name.\n"
if $WARN;
}
}
if (my @table_comments = $table->comments) {
for my $comment (@table_comments) {
next unless $comment;
$comment = __PACKAGE__->_quote_string($comment);
push @field_comments, "COMMENT ON TABLE $table_name_q is\n $comment"
unless $options->{no_comments};
}
}
my $table_options = @table_options ? "\n" . join("\n", @table_options) : '';
push @create,
"CREATE TABLE $table_name_q (\n"
. join(",\n", map {" $_"} @field_defs, ($options->{delay_constraints} ? () : @constraint_defs))
. "\n)$table_options";
@constraint_defs = map {"ALTER TABLE $table_name_q ADD $_"} @constraint_defs;
if ($WARN) {
if (%truncated) {
warn "Truncated " . keys(%truncated) . " names:\n";
warn "\t" . join("\n\t", sort keys %truncated) . "\n";
}
}
return \@create, \@fk_defs, \@trigger_defs, \@index_defs, ($options->{delay_constraints} ? \@constraint_defs : []);
}
sub alter_field {
my ($from_field, $to_field, $options) = @_;
my $qt = $options->{quote_table_names};
my ($field_create, $field_defs, $trigger_defs, $field_comments)
= create_field($to_field, $options, {});
# Fix ORA-01442
if (!$from_field->is_nullable && $to_field->is_nullable) {
if ($from_field->data_type =~ /text/) {
die 'Cannot alter CLOB field in this way';
} else {
@$field_defs = map { $_ .= ' NULL' } @$field_defs;
}
} elsif (!$from_field->is_nullable && !$to_field->is_nullable) {
@$field_defs = map { s/ NOT NULL//; $_ } @$field_defs;
}
my $table_name = quote($to_field->table->name, $qt);
return 'ALTER TABLE ' . $table_name . ' MODIFY ( ' . join('', @$field_defs) . ' )';
}
sub drop_field {
my ($old_field, $options) = @_;
my $qi = $options->{quote_identifiers};
my $table_name = quote($old_field->table->name, $qi);
my $out = sprintf('ALTER TABLE %s DROP COLUMN %s', $table_name, quote($old_field->name, $qi));
return $out;
}
sub add_field {
my ($new_field, $options) = @_;
my $qt = $options->{quote_table_names};
my ($field_create, $field_defs, $trigger_defs, $field_comments)
= create_field($new_field, $options, {});
my $table_name = quote($new_field->table->name, $qt);
my $out = sprintf('ALTER TABLE %s ADD ( %s )', $table_name, join('', @$field_defs));
return $out;
}
sub create_field {
my ($field, $options, $field_name_scope) = @_;
my $qf = $options->{quote_field_names};
my $qt = $options->{quote_table_names};
my (@create, @field_defs, @trigger_defs, @field_comments);
my $table_name = $field->table->name;
my $table_name_q = quote($table_name, $qt);
#
# Field name
#
my $field_name = mk_name($field->name, '', $field_name_scope, 1);
my $field_name_q = quote($field_name, $qf);
my $field_def = quote($field_name, $qf);
$field->name($field_name);
#
# Datatype
#
my $check;
my $data_type = lc $field->data_type;
my @size = $field->size;
my %extra = $field->extra;
my $list = $extra{'list'} || [];
my $commalist = join(', ', map { __PACKAGE__->_quote_string($_) } @$list);
if ($data_type eq 'enum') {
$check = "CHECK ($field_name_q IN ($commalist))";
$data_type = 'varchar2';
} elsif ($data_type eq 'set') {
# XXX add a CHECK constraint maybe
# (trickier and slower, than enum :)
$data_type = 'varchar2';
} else {
if (defined $translate{$data_type}) {
if (ref $translate{$data_type} eq "ARRAY") {
($data_type, $size[0]) = @{ $translate{$data_type} };
} else {
$data_type = $translate{$data_type};
}
}
$data_type ||= 'varchar2';
}
# ensure size is not bigger than max size oracle allows for data type
if (defined $max_size{$data_type}) {
for (my $i = 0; $i < scalar @size; $i++) {
my $max
= ref($max_size{$data_type}) eq 'ARRAY'
? $max_size{$data_type}->[$i]
: $max_size{$data_type};
$size[$i] = $max if $size[$i] > $max;
}
}
#
# Fixes ORA-02329: column of datatype LOB cannot be
# unique or a primary key
#
if ($data_type eq 'clob' && $field->is_primary_key) {
$data_type = 'varchar2';
$size[0] = 4000;
warn "CLOB cannot be a primary key, changing to VARCHAR2\n"
if $WARN;
}
if ($data_type eq 'clob' && $field->is_unique) {
$data_type = 'varchar2';
$size[0] = 4000;
warn "CLOB cannot be a unique key, changing to VARCHAR2\n"
if $WARN;
}
#
# Fixes ORA-00907: missing right parenthesis
#
if ($data_type =~ /(date|clob)/i) {
undef @size;
}
#
# Fixes ORA-00906: missing right parenthesis
# if size is 0 or undefined
#
for (qw/varchar2/) {
if ($data_type =~ /^($_)$/i) {
$size[0] ||= $max_size{$_};
}
}
$field_def .= " $data_type";
if (defined $size[0] && $size[0] > 0) {
$field_def .= '(' . join(',', @size) . ')';
}
#
# Default value
#
my $default = $field->default_value;
if (defined $default) {
debug("ORA: Handling default value: $default");
#
# Wherein we try to catch a string being used as
# a default value for a numerical field. If "true/false,"
# then sub "1/0," otherwise just test the truthity of the
# argument and use that (naive?).
#
if (ref $default and defined $$default) {
$default = $$default;
} elsif (ref $default) {
$default = 'NULL';
} elsif ($data_type =~ /^number$/i
&& $default !~ /^-?\d+$/
&& $default !~ m/null/i) {
if ($default =~ /^true$/i) {
$default = "'1'";
} elsif ($default =~ /^false$/i) {
$default = "'0'";
} else {
$default = $default ? "'1'" : "'0'";
}
} elsif (
$data_type =~ /date/
&& ( $default eq 'current_timestamp'
|| $default eq 'now()')
) {
$default = 'SYSDATE';
} else {
$default
= $default =~ m/null/i
? 'NULL'
: __PACKAGE__->_quote_string($default);
}
$field_def .= " DEFAULT $default",;
}
#
# Not null constraint
#
unless ($field->is_nullable) {
debug("ORA: Field is NOT NULL");
$field_def .= ' NOT NULL';
}
$field_def .= " $check" if $check;
#
# Auto_increment
#
if ($field->is_auto_increment) {
debug("ORA: Handling auto increment");
my $base_name = $table_name . "_" . $field_name;
my $seq_name = quote(mk_name($base_name, 'sq'), $qt);
my $trigger_name = quote(mk_name($base_name, 'ai'), $qt);
push @create, qq[DROP SEQUENCE $seq_name] if $options->{add_drop_table};
push @create, "CREATE SEQUENCE $seq_name";
my $trigger
= "CREATE OR REPLACE TRIGGER $trigger_name\n"
. "BEFORE INSERT ON $table_name_q\n"
. "FOR EACH ROW WHEN (\n"
. " new.$field_name_q IS NULL"
. " OR new.$field_name_q = 0\n" . ")\n"
. "BEGIN\n"
. " SELECT $seq_name.nextval\n"
. " INTO :new."
. $field_name_q . "\n"
. " FROM dual;\n"
. "END;\n";
push @trigger_defs, $trigger;
}
push @field_defs, $field_def;
if (my $comment = $field->comments) {
debug("ORA: Handling comment");
$comment =~ __PACKAGE__->_quote_string($comment);
push @field_comments, "COMMENT ON COLUMN $table_name_q.$field_name_q is\n $comment;"
unless $options->{no_comments};
}
return \@create, \@field_defs, \@trigger_defs, \@field_comments;
}
sub drop_table {
my ($table, $options) = @_;
my $qi = $options->{quote_identifiers};
my @foreign_key_constraints = grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints;
my @statements;
for my $constraint (@foreign_key_constraints) {
push @statements, alter_drop_constraint($constraint, $options);
}
return @statements, 'DROP TABLE ' . quote($table, $qi);
}
sub alter_create_index {
my ($index, $options) = @_;
return create_index($index, $options);
}
sub create_index {
my ($index, $options, $index_options) = @_;
$index_options = $index_options || '';
my $qf = $options->{quote_field_names} || $options->{quote_identifiers};
my $qt = $options->{quote_table_names} || $options->{quote_identifiers};
my $index_name = $index->name || '';
$index_name
= $index_name
? mk_name($index_name)
: mk_name($index->table, $index_name || 'i');
return join(' ',
map { $_ || () } 'CREATE',
lc $index->type eq 'normal' ? 'INDEX' : $index->type . ' INDEX',
$index_name ? quote($index_name, $qf) : '',
'ON',
quote($index->table, $qt),
'(' . join(', ', map { quote($_, $qf) } $index->fields) . ")$index_options");
}
sub alter_drop_index {
my ($index, $options) = @_;
return 'DROP INDEX ' . $index->name;
}
sub alter_drop_constraint {
my ($c, $options) = @_;
my $qi = $options->{quote_identifiers};
my $table_name = quote($c->table->name, $qi);
my @out = ('ALTER', 'TABLE', $table_name, 'DROP',);
if ($c->name) {
push @out, ('CONSTRAINT', quote($c->name, $qi));
} elsif ($c->type eq PRIMARY_KEY) {
push @out, 'PRIMARY KEY';
}
return join(' ', @out);
}
sub alter_create_constraint {
my ($c, $options) = @_;
my $qi = $options->{quote_identifiers};
my $table_name = quote($c->table->name, $qi);
return join(' ', 'ALTER TABLE', $table_name, 'ADD', create_constraint($c, $options));
}
sub create_constraint {
my ($c, $options) = @_;
my $qt = $options->{quote_table_names};
my $qf = $options->{quote_field_names};
my $table = $c->table;
my $table_name = $table->name;
my $table_name_q = quote($table_name, $qt);
my $name = $c->name || '';
my @fields = map { quote($_, $qf) } $c->fields;
my @rfields = map { quote($_, $qf) } $c->reference_fields;
return undef if !@fields && $c->type ne 'CHECK';
my $definition;
if ($c->type eq PRIMARY_KEY) {
debug("ORA: Creating PK constraint on fields (" . join(', ', @fields) . ")");
# create a name if delay_constraints
$name ||= mk_name($table_name, 'pk')
if $options->{delay_constraints};
$name = quote($name, $qf);
$definition = ($name ? "CONSTRAINT $name " : '') . 'PRIMARY KEY (' . join(', ', @fields) . ')';
} elsif ($c->type eq UNIQUE) {
# Don't create UNIQUE constraints identical to the primary key
if (my $pk = $table->primary_key) {
my $u_fields = join(":", @fields);
my $pk_fields = join(":", $pk->fields);
next if $u_fields eq $pk_fields;
}
if ($name) {
# Force prepend of table_name as ORACLE doesn't allow duplicate
# CONSTRAINT names even for different tables (ORA-02264)
$name = mk_name("${table_name}_$name", 'u')
unless $name =~ /^$table_name/;
} else {
$name = mk_name($table_name, 'u');
}
debug("ORA: Creating UNIQUE constraint on fields (" . join(', ', @fields) . ") named $name");
$name = quote($name, $qf);
for my $f ($c->fields) {
my $field_def = $table->get_field($f) or next;
my $dtype = $translate{
ref $field_def->data_type eq "ARRAY"
? $field_def->data_type->[0]
: $field_def->data_type
}
or next;
if ($WARN && $dtype =~ /clob/i) {
warn "Oracle will not allow UNIQUE constraints on "
. "CLOB field '"
. $field_def->table->name . '.'
. $field_def->name . ".'\n";
}
}
$definition = "CONSTRAINT $name UNIQUE " . '(' . join(', ', @fields) . ')';
} elsif ($c->type eq CHECK_C) {
$name ||= mk_name($name || $table_name, 'ck');
$name = quote($name, $qf);
my $expression = $c->expression || '';
debug("ORA: Creating CHECK constraint on fields (" . join(', ', @fields) . ") named $name");
$definition = "CONSTRAINT $name CHECK ($expression)";
} elsif ($c->type eq FOREIGN_KEY) {
$name = mk_name(join('_', $table_name, $c->fields) . '_fk');
$name = quote($name, $qf);
my $on_delete = uc($c->on_delete || '');
$definition = "CONSTRAINT $name FOREIGN KEY ";
if (@fields) {
$definition .= '(' . join(', ', @fields) . ')';
}
my $ref_table = quote($c->reference_table, $qt);
debug("ORA: Creating FK constraint on fields (" . join(', ', @fields) . ") named $name referencing $ref_table");
$definition .= " REFERENCES $ref_table";
if (@rfields) {
$definition .= ' (' . join(', ', @rfields) . ')';
}
if ($c->match_type) {
$definition .= ' MATCH ' . ($c->match_type =~ /full/i) ? 'FULL' : 'PARTIAL';
}
if ($on_delete && $on_delete ne "RESTRICT") {
$definition .= ' ON DELETE ' . $c->on_delete;
}
}
return $definition ? $definition : undef;
}
sub create_view {
my ($view, $options) = @_;
my $qt = $options->{quote_table_names};
my $view_name = quote($view->name, $qt);
my $extra = $view->extra;
my $view_type = 'VIEW';
my $view_options = '';
if (my $materialized = $extra->{materialized}) {
$view_type = 'MATERIALIZED VIEW';
$view_options .= ' ' . $materialized;
}
my @create;
push @create, qq[DROP $view_type $view_name]
if $options->{add_drop_view};
push @create, sprintf("CREATE %s %s%s AS\n%s", $view_type, $view_name, $view_options, $view->sql);
return \@create;
}
sub mk_name {
my $basename = shift || '';
my $type = shift || '';
$type = '' if $type =~ /^\d/;
my $scope = shift || '';
my $critical = shift || '';
my $basename_orig = $basename;
my $max_name
= $type
? $max_id_length - (length($type) + 1)
: $max_id_length;
$basename = substr($basename, 0, $max_name)
if length($basename) > $max_name;
my $name = $type ? "${type}_$basename" : $basename;
if ($basename ne $basename_orig and $critical) {
my $show_type = $type ? "+'$type'" : "";
warn "Truncating '$basename_orig'$show_type to $max_id_length ", "character limit to make '$name'\n"
if $WARN;
$truncated{$basename_orig} = $name;
}
$scope ||= \%global_names;
if (my $prev = $scope->{$name}) {
my $name_orig = $name;
substr($name, $max_id_length - 2) = ""
if length($name) >= $max_id_length - 1;
$name .= sprintf("%02d", $prev++);
warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n"
if $WARN;
$scope->{$name_orig}++;
}
$scope->{$name}++;
return $name;
}
1;
sub quote {
my ($name, $q) = @_;
return $name unless $q && $name;
$name =~ s/\Q$quote_char/$quote_char$quote_char/g;
return "$quote_char$name$quote_char";
}
# -------------------------------------------------------------------
# All bad art is the result of good intentions.
# Oscar Wilde
# -------------------------------------------------------------------
=pod
=head1 CREDITS
Mad props to Tim Bunce for much of the logic stolen from his "mysql2ora"
script.
=head1 AUTHORS
Ken Youens-Clark Ekclark@cpan.orgE,
Alexander Hartmaier Eabraxxa@cpan.orgE,
Fabien Wernli Efaxmodem@cpan.orgE.
=head1 SEE ALSO
SQL::Translator, DDL::Oracle, mysql2ora.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/PostgreSQL.pm 0000644 0000000 0000000 00000106041 14551163724 023725 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::PostgreSQL;
=head1 NAME
SQL::Translator::Producer::PostgreSQL - PostgreSQL producer for SQL::Translator
=head1 SYNOPSIS
my $t = SQL::Translator->new( parser => '...', producer => 'PostgreSQL' );
$t->translate;
=head1 DESCRIPTION
Creates a DDL suitable for PostgreSQL. Very heavily based on the Oracle
producer.
Now handles PostGIS Geometry and Geography data types on table definitions.
Does not yet support PostGIS Views.
=head2 Producer Args
You can change the global behavior of the producer by passing the following options to the
C attribute of C.
=over 4
=item postgres_version
The version of postgres to generate DDL for. Turns on features only available in later versions. The following features are supported
=over 4
=item IF EXISTS
If your postgres_version is higher than 8.003 (I should hope it is by now), then the DDL
generated for dropping objects in the database will contain IF EXISTS.
=back
=item attach_comments
Generates table and column comments via the COMMENT command rather than as a comment in
the DDL. You could then look it up with \dt+ or \d+ (for tables and columns respectively)
in psql. The comment is dollar quoted with $comment$ so you can include ' in it. Just to clarify: you get this
CREATE TABLE foo ...;
COMMENT on TABLE foo IS $comment$hi there$comment$;
instead of this
-- comment
CREAT TABLE foo ...;
=back
=head2 Extra args
Various schema types support various options via the C attribute.
=over 2
=item Tables
=over 2
=item temporary
Produces a temporary table.
=back
=item Views
=over 2
=item temporary
Produces a temporary view.
=item materialized
Produces a materialized view.
=back
=item Fields
=over 2
=item list, custom_type_name
For enum types, list is the list of valid values, and custom_type_name is the name that
the type should have. Defaults to $table_$field_type.
=item geometry_type, srid, dimensions, geography_type
Fields for use with PostGIS types.
=back
=back
=cut
use strict;
use warnings;
our ($DEBUG, $WARN);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use base qw(SQL::Translator::Producer);
use SQL::Translator::Schema::Constants;
use SQL::Translator::Utils
qw(debug header_comment parse_dbms_version batch_alter_table_statements normalize_quote_options);
use SQL::Translator::Generator::DDL::PostgreSQL;
use Data::Dumper;
use constant MAX_ID_LENGTH => 62;
{
my ($quoting_generator, $nonquoting_generator);
sub _generator {
my $options = shift;
return $options->{generator} if exists $options->{generator};
return normalize_quote_options($options)
? $quoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new
: $nonquoting_generator ||= SQL::Translator::Generator::DDL::PostgreSQL->new(quote_chars => [],);
}
}
my (%translate);
BEGIN {
%translate = (
#
# MySQL types
#
double => 'double precision',
decimal => 'numeric',
int => 'integer',
mediumint => 'integer',
tinyint => 'smallint',
char => 'character',
varchar => 'character varying',
longtext => 'text',
mediumtext => 'text',
tinytext => 'text',
tinyblob => 'bytea',
blob => 'bytea',
mediumblob => 'bytea',
longblob => 'bytea',
enum => 'character varying',
set => 'character varying',
datetime => 'timestamp',
year => 'date',
#
# Oracle types
#
number => 'integer',
varchar2 => 'character varying',
long => 'text',
clob => 'text',
#
# Sybase types
#
comment => 'text',
#
# MS Access types
#
memo => 'text',
);
}
my %truncated;
=pod
=head1 PostgreSQL Create Table Syntax
CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name (
{ column_name data_type [ DEFAULT default_expr ] [ column_constraint [, ... ] ]
| table_constraint } [, ... ]
)
[ INHERITS ( parent_table [, ... ] ) ]
[ WITH OIDS | WITHOUT OIDS ]
where column_constraint is:
[ CONSTRAINT constraint_name ]
{ NOT NULL | NULL | UNIQUE | PRIMARY KEY |
CHECK (expression) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
and table_constraint is:
[ CONSTRAINT constraint_name ]
{ UNIQUE ( column_name [, ... ] ) |
PRIMARY KEY ( column_name [, ... ] ) |
CHECK ( expression ) |
EXCLUDE [USING acc_method] (expression) [INCLUDE (column [, ...])] [WHERE (predicate)]
FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
=head1 Create Index Syntax
CREATE [ UNIQUE ] INDEX index_name ON table
[ USING acc_method ] ( column [ ops_name ] [, ...] )
[ INCLUDE ( column [, ...] ) ]
[ WHERE predicate ]
CREATE [ UNIQUE ] INDEX index_name ON table
[ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] )
[ WHERE predicate ]
=cut
sub produce {
my $translator = shift;
local $DEBUG = $translator->debug;
local $WARN = $translator->show_warnings;
my $no_comments = $translator->no_comments;
my $add_drop_table = $translator->add_drop_table;
my $schema = $translator->schema;
my $pargs = $translator->producer_args;
my $postgres_version = parse_dbms_version($pargs->{postgres_version}, 'perl');
my $generator = _generator({ quote_identifiers => $translator->quote_identifiers });
my @output;
push @output, header_comment unless ($no_comments);
my (@table_defs, @fks);
my %type_defs;
for my $table ($schema->get_tables) {
my ($table_def, $fks) = create_table(
$table,
{
generator => $generator,
no_comments => $no_comments,
postgres_version => $postgres_version,
add_drop_table => $add_drop_table,
type_defs => \%type_defs,
attach_comments => $pargs->{attach_comments}
}
);
push @table_defs, $table_def;
push @fks, @$fks;
}
for my $view ($schema->get_views) {
push @table_defs,
create_view(
$view,
{
postgres_version => $postgres_version,
add_drop_view => $add_drop_table,
generator => $generator,
no_comments => $no_comments,
}
);
}
for my $trigger ($schema->get_triggers) {
push @table_defs,
create_trigger(
$trigger,
{
add_drop_trigger => $add_drop_table,
generator => $generator,
no_comments => $no_comments,
}
);
}
push @output, map {"$_;\n\n"} values %type_defs;
push @output, map {"$_;\n\n"} @table_defs;
if (@fks) {
push @output, "--\n-- Foreign Key Definitions\n--\n\n"
unless $no_comments;
push @output, map {"$_;\n\n"} @fks;
}
if ($WARN) {
if (%truncated) {
warn "Truncated " . keys(%truncated) . " names:\n";
warn "\t" . join("\n\t", sort keys %truncated) . "\n";
}
}
return wantarray
? @output
: join('', @output);
}
{
my %global_names;
sub mk_name {
my $basename = shift || '';
my $type = shift || '';
my $scope = shift || '';
my $critical = shift || '';
my $basename_orig = $basename;
my $max_name
= $type
? MAX_ID_LENGTH - (length($type) + 1)
: MAX_ID_LENGTH;
$basename = substr($basename, 0, $max_name)
if length($basename) > $max_name;
my $name = $type ? "${type}_$basename" : $basename;
if ($basename ne $basename_orig and $critical) {
my $show_type = $type ? "+'$type'" : "";
warn "Truncating '$basename_orig'$show_type to ", MAX_ID_LENGTH, " character limit to make '$name'\n"
if $WARN;
$truncated{$basename_orig} = $name;
}
$scope ||= \%global_names;
if (my $prev = $scope->{$name}) {
my $name_orig = $name;
$name .= sprintf("%02d", ++$prev);
substr($name, MAX_ID_LENGTH - 3) = "00"
if length($name) > MAX_ID_LENGTH;
warn "The name '$name_orig' has been changed to ", "'$name' to make it unique.\n"
if $WARN;
$scope->{$name_orig}++;
}
$scope->{$name}++;
return $name;
}
}
sub is_geometry {
my $field = shift;
return 1 if $field->data_type eq 'geometry';
}
sub is_geography {
my $field = shift;
return 1 if $field->data_type eq 'geography';
}
sub create_table {
my ($table, $options) = @_;
my $generator = _generator($options);
my $no_comments = $options->{no_comments} || 0;
my $add_drop_table = $options->{add_drop_table} || 0;
my $postgres_version = $options->{postgres_version} || 0;
my $type_defs = $options->{type_defs} || {};
my $attach_comments = $options->{attach_comments};
my $table_name = $table->name or next;
my $table_name_qt = $generator->quote($table_name);
my (@comments, @field_defs, @index_defs, @constraint_defs, @fks);
push @comments, "--\n-- Table: $table_name\n--\n" unless $no_comments;
my @comment_statements;
if (my $comments = $table->comments) {
if ($attach_comments) {
# this follows the example in the MySQL producer, where all comments are added as
# table comments, even though they could have originally been parsed as DDL comments
# quoted via $$ string so there can be 'quotes' inside the comments
my $comment_ddl = "COMMENT on TABLE $table_name_qt IS \$comment\$$comments\$comment\$";
push @comment_statements, $comment_ddl;
} elsif (!$no_comments) {
$comments =~ s/^/-- /mg;
push @comments, "-- Comments:\n$comments\n--\n";
}
}
#
# Fields
#
for my $field ($table->get_fields) {
push @field_defs,
create_field(
$field,
{
generator => $generator,
postgres_version => $postgres_version,
type_defs => $type_defs,
constraint_defs => \@constraint_defs,
attach_comments => $attach_comments
}
);
if ($attach_comments) {
my $field_comments = $field->comments;
next unless $field_comments;
my $field_name_qt = $generator->quote($field->name);
my $comment_ddl = "COMMENT on COLUMN $table_name_qt.$field_name_qt IS \$comment\$$field_comments\$comment\$";
push @comment_statements, $comment_ddl;
}
}
#
# Index Declarations
#
for my $index ($table->get_indices) {
my ($idef, $constraints) = create_index(
$index,
{
generator => $generator,
postgres_version => $postgres_version,
}
);
$idef and push @index_defs, $idef;
push @constraint_defs, @$constraints;
}
#
# Table constraints
#
for my $c ($table->get_constraints) {
my ($cdefs, $fks) = create_constraint(
$c,
{
generator => $generator,
}
);
push @constraint_defs, @$cdefs;
push @fks, @$fks;
}
my $create_statement = join("\n", @comments);
if ($add_drop_table) {
if ($postgres_version >= 8.002) {
$create_statement .= "DROP TABLE IF EXISTS $table_name_qt CASCADE;\n";
} else {
$create_statement .= "DROP TABLE $table_name_qt CASCADE;\n";
}
}
my $temporary = $table->extra->{temporary} ? "TEMPORARY " : "";
$create_statement .= "CREATE ${temporary}TABLE $table_name_qt (\n"
. join(",\n", map {" $_"} @field_defs, @constraint_defs) . "\n)";
$create_statement .= @index_defs ? ';' : q{};
$create_statement .= ($create_statement =~ /;$/ ? "\n" : q{}) . join(";\n", @index_defs);
#
# Geometry
#
if (my @geometry_columns = grep { is_geometry($_) } $table->get_fields) {
$create_statement .= join(";\n", '', map { drop_geometry_column($_, $options) } @geometry_columns)
if $options->{add_drop_table};
$create_statement .= join(";\n", '', map { add_geometry_column($_, $options) } @geometry_columns);
}
if (@comment_statements) {
$create_statement .= join(";\n", '', @comment_statements);
}
return $create_statement, \@fks;
}
sub create_view {
my ($view, $options) = @_;
my $generator = _generator($options);
my $postgres_version = $options->{postgres_version} || 0;
my $add_drop_view = $options->{add_drop_view};
my $view_name = $view->name;
debug("PKG: Looking at view '${view_name}'\n");
my $create = '';
$create .= "--\n-- View: " . $generator->quote($view_name) . "\n--\n"
unless $options->{no_comments};
if ($add_drop_view) {
if ($postgres_version >= 8.002) {
$create .= "DROP VIEW IF EXISTS " . $generator->quote($view_name) . ";\n";
} else {
$create .= "DROP VIEW " . $generator->quote($view_name) . ";\n";
}
}
$create .= 'CREATE';
my $extra = $view->extra;
$create .= " TEMPORARY"
if exists($extra->{temporary}) && $extra->{temporary};
$create .= " MATERIALIZED"
if exists($extra->{materialized}) && $extra->{materialized};
$create .= " VIEW " . $generator->quote($view_name);
if (my @fields = $view->fields) {
my $field_list = join ', ', map { $generator->quote($_) } @fields;
$create .= " ( ${field_list} )";
}
if (my $sql = $view->sql) {
$create .= " AS\n ${sql}\n";
}
if ($extra->{check_option}) {
$create .= ' WITH ' . uc $extra->{check_option} . ' CHECK OPTION';
}
return $create;
}
# Returns a enum custom type name and list of values iff the field looks like an enum.
sub _enum_typename_and_values {
my $field = shift;
if (ref $field->extra->{list} eq 'ARRAY') { # can't do anything unless we know the list
if ($field->extra->{custom_type_name}) {
return ($field->extra->{custom_type_name}, $field->extra->{list});
} elsif ($field->data_type eq 'enum') {
my $name = $field->table->name . '_' . $field->name . '_type';
return ($name, $field->extra->{list});
}
}
return ();
}
{
my %field_name_scope;
sub create_field {
my ($field, $options) = @_;
my $generator = _generator($options);
my $table_name = $field->table->name;
my $constraint_defs = $options->{constraint_defs} || [];
my $postgres_version = $options->{postgres_version} || 0;
my $type_defs = $options->{type_defs} || {};
my $attach_comments = $options->{attach_comments};
$field_name_scope{$table_name} ||= {};
my $field_name = $field->name;
my $field_comments = '';
if (!$attach_comments and my $comments = $field->comments) {
$comments =~ s/(?quote($field_name);
#
# Datatype
#
my $data_type = lc $field->data_type;
my %extra = $field->extra;
my ($enum_typename, $list) = _enum_typename_and_values($field);
if ($postgres_version >= 8.003 && $enum_typename) {
my $commalist = join(', ', map { __PACKAGE__->_quote_string($_) } @$list);
$field_def .= ' ' . $enum_typename;
my $new_type_def
= "DROP TYPE IF EXISTS $enum_typename CASCADE;\n" . "CREATE TYPE $enum_typename AS ENUM ($commalist)";
if (!exists $type_defs->{$enum_typename}) {
$type_defs->{$enum_typename} = $new_type_def;
} elsif ($type_defs->{$enum_typename} ne $new_type_def) {
die "Attempted to redefine type name '$enum_typename' as a different type.\n";
}
} else {
$field_def .= ' ' . convert_datatype($field);
}
#
# Default value
#
__PACKAGE__->_apply_default_value(
$field,
\$field_def,
[
'NULL' => \'NULL',
'now()' => 'now()',
'CURRENT_TIMESTAMP' => 'CURRENT_TIMESTAMP',
],
);
#
# Not null constraint
#
$field_def .= ' NOT NULL' unless $field->is_nullable;
#
# Geometry constraints
#
if (is_geometry($field)) {
foreach (create_geometry_constraints($field, $options)) {
my ($cdefs, $fks) = create_constraint($_, $options);
push @$constraint_defs, @$cdefs;
push @$fks, @$fks;
}
}
return $field_def;
}
}
sub create_geometry_constraints {
my ($field, $options) = @_;
my $fname = _generator($options)->quote($field);
my @constraints;
push @constraints,
SQL::Translator::Schema::Constraint->new(
name => "enforce_dims_" . $field->name,
expression => "(ST_NDims($fname) = " . $field->extra->{dimensions} . ")",
table => $field->table,
type => CHECK_C,
);
push @constraints,
SQL::Translator::Schema::Constraint->new(
name => "enforce_srid_" . $field->name,
expression => "(ST_SRID($fname) = " . $field->extra->{srid} . ")",
table => $field->table,
type => CHECK_C,
);
push @constraints,
SQL::Translator::Schema::Constraint->new(
name => "enforce_geotype_" . $field->name,
expression => "(GeometryType($fname) = "
. __PACKAGE__->_quote_string($field->extra->{geometry_type})
. "::text OR $fname IS NULL)",
table => $field->table,
type => CHECK_C,
);
return @constraints;
}
sub _extract_extras_from_options {
my ($options_haver, $dispatcher) = @_;
for my $opt ($options_haver->options) {
if (ref $opt eq 'HASH') {
for my $key (keys %$opt) {
my $val = $opt->{$key};
next unless defined $val;
$dispatcher->{ lc $key }->($val);
}
}
}
}
{
my %index_name;
sub create_index {
my ($index, $options) = @_;
my $generator = _generator($options);
my $table_name = $index->table->name;
my $postgres_version = $options->{postgres_version} || 0;
my ($index_def, @constraint_defs);
my $name = $index->name
|| join('_', $table_name, 'idx', ++$index_name{$table_name});
my $type = $index->type || NORMAL;
my @fields = $index->fields;
return unless @fields;
my %index_extras;
_extract_extras_from_options(
$index,
{
using => sub { $index_extras{using} = "USING $_[0]" },
where => sub { $index_extras{where} = "WHERE $_[0]" },
include => sub {
my ($value) = @_;
return unless $postgres_version >= 11;
die 'Include list must be an arrayref'
unless ref $value eq 'ARRAY';
my $value_list = join ', ', @$value;
$index_extras{include} = "INCLUDE ($value_list)";
}
}
);
my $def_start = 'CONSTRAINT ' . $generator->quote($name) . ' ';
my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ($generator->quote($_)) } @fields)) . ')';
if ($type eq PRIMARY_KEY) {
push @constraint_defs, "${def_start}PRIMARY KEY " . $field_names;
} elsif ($type eq UNIQUE) {
push @constraint_defs, "${def_start}UNIQUE " . $field_names;
} elsif ($type eq NORMAL) {
$index_def
= 'CREATE INDEX ' . $generator->quote($name) . ' on ' . $generator->quote($table_name) . ' ' . join ' ',
grep {defined} $index_extras{using}, $field_names,
@index_extras{ 'include', 'where' };
} else {
warn "Unknown index type ($type) on table $table_name.\n"
if $WARN;
}
return $index_def, \@constraint_defs;
}
}
sub create_constraint {
my ($c, $options) = @_;
my $generator = _generator($options);
my $postgres_version = $options->{postgres_version} || 0;
my $table_name = $c->table->name;
my (@constraint_defs, @fks);
my %constraint_extras;
_extract_extras_from_options(
$c,
{
using => sub { $constraint_extras{using} = "USING $_[0]" },
where => sub { $constraint_extras{where} = "WHERE ( $_[0] )" },
include => sub {
my ($value) = @_;
return unless $postgres_version >= 11;
die 'Include list must be an arrayref'
unless ref $value eq 'ARRAY';
my $value_list = join ', ', @$value;
$constraint_extras{include} = "INCLUDE ( $value_list )";
},
}
);
my $name = $c->name || '';
my @fields = grep {defined} $c->fields;
my @rfields = grep {defined} $c->reference_fields;
return if !@fields && ($c->type ne CHECK_C && $c->type ne EXCLUDE);
my $def_start = $name ? 'CONSTRAINT ' . $generator->quote($name) : '';
my $field_names = '(' . join(", ", (map { $_ =~ /\(.*\)/ ? $_ : ($generator->quote($_)) } @fields)) . ')';
my $include = $constraint_extras{include} || '';
if ($c->type eq PRIMARY_KEY) {
push @constraint_defs, join ' ', grep $_, $def_start, "PRIMARY KEY", $field_names, $include;
} elsif ($c->type eq UNIQUE) {
push @constraint_defs, join ' ', grep $_, $def_start, "UNIQUE", $field_names, $include;
} elsif ($c->type eq CHECK_C) {
my $expression = $c->expression;
push @constraint_defs, join ' ', grep $_, $def_start, "CHECK ($expression)";
} elsif ($c->type eq FOREIGN_KEY) {
my $def .= join ' ', grep $_, "ALTER TABLE",
$generator->quote($table_name), 'ADD', $def_start,
"FOREIGN KEY $field_names";
$def .= "\n REFERENCES " . $generator->quote($c->reference_table);
if (@rfields) {
$def .= ' (' . join(', ', map { $generator->quote($_) } @rfields) . ')';
}
if ($c->match_type) {
$def .= ' MATCH ' . ($c->match_type =~ /full/i) ? 'FULL' : 'PARTIAL';
}
if ($c->on_delete) {
$def .= ' ON DELETE ' . $c->on_delete;
}
if ($c->on_update) {
$def .= ' ON UPDATE ' . $c->on_update;
}
if ($c->deferrable) {
$def .= ' DEFERRABLE';
}
push @fks, "$def";
} elsif ($c->type eq EXCLUDE) {
my $using = $constraint_extras{using} || '';
my $expression = $c->expression;
my $where = $constraint_extras{where} || '';
push @constraint_defs, join ' ', grep $_, $def_start, 'EXCLUDE', $using, "( $expression )", $include, $where;
}
return \@constraint_defs, \@fks;
}
sub create_trigger {
my ($trigger, $options) = @_;
my $generator = _generator($options);
my @statements;
push @statements, sprintf('DROP TRIGGER IF EXISTS %s', $generator->quote($trigger->name))
if $options->{add_drop_trigger};
my $scope = $trigger->scope || '';
$scope = " FOR EACH $scope" if $scope;
push @statements,
sprintf(
'CREATE TRIGGER %s %s %s ON %s%s %s',
$generator->quote($trigger->name),
$trigger->perform_action_when,
join(' OR ', @{ $trigger->database_events }),
$generator->quote($trigger->on_table),
$scope, $trigger->action,
);
return @statements;
}
sub convert_datatype {
my ($field) = @_;
my @size = $field->size;
my $data_type = lc $field->data_type;
my $array = $data_type =~ s/\[\]$//;
if ($data_type eq 'enum') {
# my $len = 0;
# $len = ($len < length($_)) ? length($_) : $len for (@$list);
# my $chk_name = mk_name( $table_name.'_'.$field_name, 'chk' );
# push @$constraint_defs,
# 'CONSTRAINT "$chk_name" CHECK (' . $generator->quote(field_name) .
# qq[IN ($commalist))];
$data_type = 'character varying';
} elsif ($data_type eq 'set') {
$data_type = 'character varying';
} elsif ($field->is_auto_increment) {
if ((defined $size[0] && $size[0] > 11) or $data_type eq 'bigint') {
$data_type = 'bigserial';
} else {
$data_type = 'serial';
}
undef @size;
} else {
$data_type
= defined $translate{ lc $data_type }
? $translate{ lc $data_type }
: $data_type;
}
if ($data_type =~ /^time/i || $data_type =~ /^interval/i) {
if (defined $size[0] && $size[0] > 6) {
$size[0] = 6;
}
}
if ($data_type eq 'integer') {
if (defined $size[0] && $size[0] > 0) {
if ($size[0] > 10) {
$data_type = 'bigint';
} elsif ($size[0] < 5) {
$data_type = 'smallint';
} else {
$data_type = 'integer';
}
} else {
$data_type = 'integer';
}
}
my $type_with_size = join('|',
'bit', 'varbit', 'character', 'bit varying', 'character varying',
'time', 'timestamp', 'interval', 'numeric', 'float');
if ($data_type !~ /$type_with_size/) {
@size = ();
}
if (defined $size[0] && $size[0] > 0 && $data_type =~ /^time/i) {
$data_type =~ s/^(time.*?)( with.*)?$/$1($size[0])/;
$data_type .= $2 if (defined $2);
} elsif (defined $size[0] && $size[0] > 0) {
$data_type .= '(' . join(',', @size) . ')';
}
if ($array) {
$data_type .= '[]';
}
#
# Geography
#
if ($data_type eq 'geography') {
$data_type .= '(' . $field->extra->{geography_type} . ',' . $field->extra->{srid} . ')';
}
return $data_type;
}
sub alter_field {
my ($from_field, $to_field, $options) = @_;
die "Can't alter field in another table"
if ($from_field->table->name ne $to_field->table->name);
my $generator = _generator($options);
my @out;
# drop geometry column and constraints
push @out, drop_geometry_column($from_field, $options), drop_geometry_constraints($from_field, $options),
if is_geometry($from_field);
# it's necessary to start with rename column cause this would affect
# all of the following statements which would be broken if do the
# rename later
# BUT: drop geometry is done before the rename, cause it work's on the
# $from_field directly
push @out,
sprintf('ALTER TABLE %s RENAME COLUMN %s TO %s',
map($generator->quote($_), $to_field->table->name, $from_field->name, $to_field->name,),)
if ($from_field->name ne $to_field->name);
push @out,
sprintf('ALTER TABLE %s ALTER COLUMN %s SET NOT NULL',
map($generator->quote($_), $to_field->table->name, $to_field->name),)
if (!$to_field->is_nullable and $from_field->is_nullable);
push @out,
sprintf('ALTER TABLE %s ALTER COLUMN %s DROP NOT NULL',
map($generator->quote($_), $to_field->table->name, $to_field->name),)
if (!$from_field->is_nullable and $to_field->is_nullable);
my $from_dt = convert_datatype($from_field);
my $to_dt = convert_datatype($to_field);
push @out,
sprintf('ALTER TABLE %s ALTER COLUMN %s TYPE %s',
map($generator->quote($_), $to_field->table->name, $to_field->name), $to_dt,)
if ($to_dt ne $from_dt);
my ($from_enum_typename, $from_list) = _enum_typename_and_values($from_field);
my ($to_enum_typename, $to_list) = _enum_typename_and_values($to_field);
if ( $from_enum_typename
&& $to_enum_typename
&& $from_enum_typename eq $to_enum_typename) {
# See if new enum values were added, and update the enum
my %existing_vals = map +($_ => 1), @$from_list;
my %desired_vals = map +($_ => 1), @$to_list;
my @add_vals = grep !$existing_vals{$_}, keys %desired_vals;
my @del_vals = grep !$desired_vals{$_}, keys %existing_vals;
my $pg_ver_ok = ($options->{postgres_version} || 0) >= 9.001;
push @out, '-- Set $sqlt->producer_args->{postgres_version} >= 9.001 to alter enums'
if !$pg_ver_ok && @add_vals;
for (@add_vals) {
push @out, sprintf '%sALTER TYPE %s ADD VALUE IF NOT EXISTS %s',
($pg_ver_ok ? '' : '-- '), $to_enum_typename,
$generator->quote_string($_);
}
push @out, "-- Unimplemented: delete values from enum type '$to_enum_typename': " . join(", ", @del_vals)
if @del_vals;
}
my $old_default = $from_field->default_value;
my $new_default = $to_field->default_value;
my $default_value = $to_field->default_value;
# fixes bug where output like this was created:
# ALTER TABLE users ALTER COLUMN column SET DEFAULT ThisIsUnescaped;
if (ref $default_value eq "SCALAR") {
$default_value = $$default_value;
} elsif (defined $default_value
&& $to_dt =~ /^(character|text|timestamp|date)/xsmi) {
$default_value = __PACKAGE__->_quote_string($default_value);
}
push @out,
sprintf(
'ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s',
map($generator->quote($_), $to_field->table->name, $to_field->name,),
$default_value,
)
if (defined $new_default
&& (!defined $old_default || $old_default ne $new_default));
# fixes bug where removing the DEFAULT statement of a column
# would result in no change
push @out,
sprintf('ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT',
map($generator->quote($_), $to_field->table->name, $to_field->name,),)
if (!defined $new_default && defined $old_default);
# add geometry column and constraints
push @out, add_geometry_column($to_field, $options), add_geometry_constraints($to_field, $options),
if is_geometry($to_field);
return wantarray ? @out : join(";\n", @out);
}
sub rename_field { alter_field(@_) }
sub add_field {
my ($new_field, $options) = @_;
my $out = sprintf(
'ALTER TABLE %s ADD COLUMN %s',
_generator($options)->quote($new_field->table->name),
create_field($new_field, $options)
);
$out .= ";\n" . add_geometry_column($new_field, $options) . ";\n" . add_geometry_constraints($new_field, $options)
if is_geometry($new_field);
return $out;
}
sub drop_field {
my ($old_field, $options) = @_;
my $generator = _generator($options);
my $out = sprintf(
'ALTER TABLE %s DROP COLUMN %s',
$generator->quote($old_field->table->name),
$generator->quote($old_field->name)
);
$out .= ";\n" . drop_geometry_column($old_field, $options)
if is_geometry($old_field);
return $out;
}
sub add_geometry_column {
my ($field, $options) = @_;
return sprintf(
"INSERT INTO geometry_columns VALUES (%s,%s,%s,%s,%s,%s,%s)",
map(__PACKAGE__->_quote_string($_),
'',
$field->table->schema->name,
$options->{table} ? $options->{table} : $field->table->name,
$field->name,
$field->extra->{dimensions},
$field->extra->{srid},
$field->extra->{geometry_type},
),
);
}
sub drop_geometry_column {
my ($field) = @_;
return
sprintf("DELETE FROM geometry_columns WHERE f_table_schema = %s AND f_table_name = %s AND f_geometry_column = %s",
map(__PACKAGE__->_quote_string($_), $field->table->schema->name, $field->table->name, $field->name,),);
}
sub add_geometry_constraints {
my ($field, $options) = @_;
return join(";\n", map { alter_create_constraint($_, $options) } create_geometry_constraints($field, $options));
}
sub drop_geometry_constraints {
my ($field, $options) = @_;
return join(";\n", map { alter_drop_constraint($_, $options) } create_geometry_constraints($field, $options));
}
sub alter_table {
my ($to_table, $options) = @_;
my $generator = _generator($options);
my $out = sprintf('ALTER TABLE %s %s', $generator->quote($to_table->name), $options->{alter_table_action});
$out .= ";\n" . $options->{geometry_changes}
if $options->{geometry_changes};
return $out;
}
sub rename_table {
my ($old_table, $new_table, $options) = @_;
my $generator = _generator($options);
$options->{alter_table_action} = "RENAME TO " . $generator->quote($new_table);
my @geometry_changes
= map { drop_geometry_column($_, $options), add_geometry_column($_, { %{$options}, table => $new_table }), }
grep { is_geometry($_) } $old_table->get_fields;
$options->{geometry_changes} = join(";\n", @geometry_changes)
if @geometry_changes;
return alter_table($old_table, $options);
}
sub alter_create_index {
my ($index, $options) = @_;
my $generator = _generator($options);
my ($idef, $constraints) = create_index($index, $options);
return $index->type eq NORMAL
? $idef
: sprintf('ALTER TABLE %s ADD %s', $generator->quote($index->table->name), join(q{}, @$constraints));
}
sub alter_drop_index {
my ($index, $options) = @_;
return 'DROP INDEX ' . _generator($options)->quote($index->name);
}
sub alter_drop_constraint {
my ($c, $options) = @_;
my $generator = _generator($options);
# NOT NULL constraint does not require a DROP CONSTRAINT statement
if ($c->type eq NOT_NULL) {
return;
}
# attention: Postgres has a very special naming structure for naming
# foreign keys and primary keys. It names them using the name of the
# table as prefix and fkey or pkey as suffix, concatenated by an underscore
my $c_name;
if ($c->name) {
# Already has a name, just use it
$c_name = $c->name;
} else {
# if the name is dotted we need the table, not schema nor database
my ($tablename) = reverse split /[.]/, $c->table->name;
if ($c->type eq FOREIGN_KEY) {
# Doesn't have a name, and is foreign key, append '_fkey'
$c_name = $tablename . '_' . ($c->fields)[0] . '_fkey';
} elsif ($c->type eq PRIMARY_KEY) {
# Doesn't have a name, and is primary key, append '_pkey'
$c_name = $tablename . '_pkey';
}
}
return sprintf('ALTER TABLE %s DROP CONSTRAINT %s', map { $generator->quote($_) } $c->table->name, $c_name,);
}
sub alter_create_constraint {
my ($index, $options) = @_;
my $generator = _generator($options);
my ($defs, $fks) = create_constraint(@_);
# return if there are no constraint definitions so we don't run
# into output like this:
# ALTER TABLE users ADD ;
return unless (@{$defs} || @{$fks});
return $index->type eq FOREIGN_KEY
? join(q{}, @{$fks})
: join(' ', 'ALTER TABLE', $generator->quote($index->table->name), 'ADD', join(q{}, @{$defs}, @{$fks}));
}
sub drop_table {
my ($table, $options) = @_;
my $generator = _generator($options);
my $out = "DROP TABLE " . $generator->quote($table) . " CASCADE";
my @geometry_drops = map { drop_geometry_column($_); }
grep { is_geometry($_) } $table->get_fields;
$out .= join(";\n", '', @geometry_drops) if @geometry_drops;
return $out;
}
sub batch_alter_table {
my ($table, $diff_hash, $options) = @_;
# as long as we're not renaming the table we don't need to be here
if (@{ $diff_hash->{rename_table} } == 0) {
return batch_alter_table_statements($diff_hash, $options);
}
# first we need to perform drops which are on old table
my @sql = batch_alter_table_statements(
$diff_hash, $options, qw(
alter_drop_constraint
alter_drop_index
drop_field
)
);
# next comes the rename_table
my $old_table = $diff_hash->{rename_table}[0][0];
push @sql, rename_table($old_table, $table, $options);
# for alter_field (and so also rename_field) we need to make sure old
# field has table name set to new table otherwise calling alter_field dies
$diff_hash->{alter_field} = [ map { $_->[0]->table($table) && $_ } @{ $diff_hash->{alter_field} } ];
$diff_hash->{rename_field} = [ map { $_->[0]->table($table) && $_ } @{ $diff_hash->{rename_field} } ];
# now add everything else
push @sql, batch_alter_table_statements(
$diff_hash, $options, qw(
add_field
alter_field
rename_field
alter_create_index
alter_create_constraint
alter_table
)
);
return @sql;
}
1;
# -------------------------------------------------------------------
# Life is full of misery, loneliness, and suffering --
# and it's all over much too soon.
# Woody Allen
# -------------------------------------------------------------------
=pod
=head1 SEE ALSO
SQL::Translator, SQL::Translator::Producer::Oracle.
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/YAML.pm 0000644 0000000 0000000 00000013124 14551163724 022463 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::YAML;
=head1 NAME
SQL::Translator::Producer::YAML - A YAML producer for SQL::Translator
=head1 SYNOPSIS
use SQL::Translator;
my $translator = SQL::Translator->new(producer => 'YAML');
=head1 DESCRIPTION
This module uses YAML to serialize a schema to a string so that it
can be saved to disk. Serializing a schema and then calling producers
on the stored can realize significant performance gains when parsing
takes a long time.
=cut
use strict;
use warnings;
our $VERSION = '1.65';
use YAML qw(Dump);
sub produce {
my $translator = shift;
my $schema = $translator->schema;
return Dump({
schema => {
tables => { map { ($_->name => view_table($_)) } $schema->get_tables, },
views => { map { ($_->name => view_view($_)) } $schema->get_views, },
triggers => { map { ($_->name => view_trigger($_)) } $schema->get_triggers, },
procedures => { map { ($_->name => view_procedure($_)) } $schema->get_procedures, },
},
translator => {
add_drop_table => $translator->add_drop_table,
filename => $translator->filename,
no_comments => $translator->no_comments,
parser_args => $translator->parser_args,
producer_args => $translator->producer_args,
parser_type => $translator->parser_type,
producer_type => $translator->producer_type,
show_warnings => $translator->show_warnings,
trace => $translator->trace,
version => $translator->version,
},
keys %{ $schema->extra } ? ('extra' => { $schema->extra }) : (),
});
}
sub view_table {
my $table = shift;
return {
'name' => $table->name,
'order' => $table->order,
'options' => $table->options || [],
$table->comments ? ('comments' => [ $table->comments ]) : (),
'constraints' => [
map { view_constraint($_) } $table->get_constraints
],
'indices' => [
map { view_index($_) } $table->get_indices
],
'fields' => {
map { ($_->name => view_field($_)) }
$table->get_fields
},
keys %{ $table->extra } ? ('extra' => { $table->extra }) : (),
};
}
sub view_constraint {
my $constraint = shift;
return {
'deferrable' => scalar $constraint->deferrable,
'expression' => scalar $constraint->expression,
'fields' => [ map { ref $_ ? $_->name : $_ } $constraint->field_names ],
'match_type' => scalar $constraint->match_type,
'name' => scalar $constraint->name,
'options' => scalar $constraint->options,
'on_delete' => scalar $constraint->on_delete,
'on_update' => scalar $constraint->on_update,
'reference_fields' => [ map { ref $_ ? $_->name : $_ } $constraint->reference_fields ],
'reference_table' => scalar $constraint->reference_table,
'type' => scalar $constraint->type,
keys %{ $constraint->extra }
? ('extra' => { $constraint->extra })
: (),
};
}
sub view_field {
my $field = shift;
return {
'order' => scalar $field->order,
'name' => scalar $field->name,
'data_type' => scalar $field->data_type,
'size' => [ $field->size ],
'default_value' => scalar $field->default_value,
'is_nullable' => scalar $field->is_nullable,
'is_primary_key' => scalar $field->is_primary_key,
'is_unique' => scalar $field->is_unique,
$field->is_auto_increment ? ('is_auto_increment' => 1) : (),
$field->comments ? ('comments' => [ $field->comments ]) : (),
keys %{ $field->extra } ? ('extra' => { $field->extra }) : (),
};
}
sub view_procedure {
my $procedure = shift;
return {
'order' => scalar $procedure->order,
'name' => scalar $procedure->name,
'sql' => scalar $procedure->sql,
'parameters' => scalar $procedure->parameters,
'owner' => scalar $procedure->owner,
$procedure->comments ? ('comments' => [ $procedure->comments ]) : (),
keys %{ $procedure->extra } ? ('extra' => { $procedure->extra }) : (),
};
}
sub view_trigger {
my $trigger = shift;
return {
'order' => scalar $trigger->order,
'name' => scalar $trigger->name,
'perform_action_when' => scalar $trigger->perform_action_when,
'database_events' => scalar $trigger->database_events,
'fields' => scalar $trigger->fields,
'on_table' => scalar $trigger->on_table,
'action' => scalar $trigger->action,
(
defined $trigger->scope
? ('scope' => scalar $trigger->scope,)
: ()
),
keys %{ $trigger->extra } ? ('extra' => { $trigger->extra }) : (),
};
}
sub view_view {
my $view = shift;
return {
'order' => scalar $view->order,
'name' => scalar $view->name,
'sql' => scalar $view->sql,
'fields' => scalar $view->fields,
keys %{ $view->extra } ? ('extra' => { $view->extra }) : (),
};
}
sub view_index {
my $index = shift;
return {
'name' => scalar $index->name,
'type' => scalar $index->type,
# If the index has extra properties, make sure these are written too
'fields' => [
map { ref($_) && $_->extra && keys %{ $_->extra } ? { name => $_->name, %{ $_->extra } } : "$_" }
$index->fields
],
'options' => scalar $index->options,
keys %{ $index->extra } ? ('extra' => { $index->extra }) : (),
};
}
1;
=head1 SEE ALSO
SQL::Translator, YAML, http://www.yaml.org/.
=head1 AUTHORS
darren chamberlain Edarren@cpan.orgE,
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/GraphViz.pm 0000644 0000000 0000000 00000041644 14551163724 023463 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::GraphViz;
=pod
=head1 NAME
SQL::Translator::Producer::GraphViz - GraphViz producer for SQL::Translator
=head1 SYNOPSIS
use SQL::Translator;
my $trans = SQL::Translator->new(
from => 'MySQL', # or your db of choice
to => 'GraphViz',
producer_args => {
out_file => 'schema.png',
bgcolor => 'lightgoldenrodyellow',
show_constraints => 1,
show_datatypes => 1,
show_sizes => 1
}
) or die SQL::Translator->error;
$trans->translate or die $trans->error;
=head1 DESCRIPTION
Creates a graph of a schema using the amazing graphviz
(see http://www.graphviz.org/) application (via
the L module). It's nifty--you should try it!
=head1 PRODUCER ARGS
All L constructor attributes are accepted and passed
through to L. The following defaults are assumed
for some attributes:
layout => 'dot',
overlap => 'false',
node => {
shape => 'record',
style => 'filled',
fillcolor => 'white',
},
# in inches
width => 8.5,
height => 11,
See the documentation of L for more info on these
and other attributes.
In addition this producer accepts the following arguments:
=over 4
=item * skip_tables
An arrayref or a comma-separated list of table names that should be
skipped. Note that a skipped table node may still appear if another
table has foreign key constraints pointing to the skipped table. If
this happens no table field/index information will be included.
=item * skip_tables_like
An arrayref or a comma-separated list of regular expressions matching
table names that should be skipped.
=item * cluster
Clustering of tables allows you to group and box tables according to
function or domain or whatever criteria you choose. The syntax for
clustering tables is:
cluster => 'cluster1=table1,table2;cluster2=table3,table4'
Or pass it as an arrayref like so:
cluster => [ 'cluster1=table1,table2', 'cluster2=table3,table4' ]
Or like so:
cluster => [
{ name => 'cluster1', tables => [ 'table1', 'table2' ] },
{ name => 'cluster2', tables => [ 'table3', 'table4' ] },
]
=item * out_file
The name of the file where the resulting GraphViz output will be
written. Alternatively an open filehandle can be supplied. If
undefined (the default) - the result is returned as a string.
=item * output_type (DEFAULT: 'png')
This determines which
L for details.
See the L above for an example of creating a simple producer using
a single template stored in the producers DATA section.
=head1 SUB CLASS HOOKS
Sub-classes can override these methods to control the templating by giving
the template source, adding variables and giving config to the Tempate object.
=head2 tt_config
sub tt_config { ( INTERPOLATE => 1 ); }
Return hash of Template config to add to that given to the L C
method.
=head2 tt_schema
sub tt_schema { "foo.tt"; }
sub tt_schema { local $/ = undef; \; }
The template to use, return a file name or a scalar ref of TT
source, or an L. See L for details, as the return from
this is passed on to it's C method.
The default implementation uses the producer arg C as a filename to read
the template from. If the arg isn't there it will look for a C<__DATA__> section
in the class, reading it as template source if found. Returns undef if both
these fail, causing the produce call to fail with a 'no template!' error.
=head2 tt_vars
sub tt_vars { ( foo => "bar" ); }
Return hash of template vars to use in the template. Nothing added here
by default, but see L for the variables you get for free.
=head2 tt_default_vars
Return a hash-ref of the default vars given to the template.
You wouldn't normally over-ride this, just inherit the default implementation,
to get the C & C variables, then over-ride L to add
your own.
The current default variables are:
=over 4
=item schema
The schema to template.
=item translator
The L object.
=back
=head2 pre_process_schema
WARNING: This method is Experimental so may change!
Called with the L object and should return one (it
doesn't have to be the same one) that will become the C variable used
in the template.
Gets called from tt_default_vars.
=head1 PRODUCER OBJECT
The rest of the methods in the class set up a sub-classable producer object.
You normally just inherit them.
=head2 new
my $tt_producer = TT::Base->new( translator => $translator );
Construct a new TT Producer object. Takes a single, named arg of the
L object running the translation. Dies if this is not given.
=head2 translator
Return the L object.
=head2 schema
Return the L we are translating. This is equivalent
to C<< $tt_producer->translator->schema >>.
=head2 run
Called to actually produce the output, calling the sub class hooks. Returns the
produced text.
=head2 args
Util wrapper method around C<< TT::Base->translator->producer_args >> for
(mostly) readonly access to the producer args. How it works depends on the
number of arguments you give it and the context.
No args - Return hashref (the actual hash in Translator) or hash of args.
1 arg - Return value of the arg with the passed name.
2+ args - List of names. In list context returns values of the given arg
names, returns as a hashref in scalar context. Any names given
that don't exist in the args are returned as undef.
This is still a bit messy but is a handy way to access the producer args when
you use your own to drive the templating.
=head1 SEE ALSO
L,
L,
L.
=head1 TODO
- Add support for a sqlf template repository, set as an INCLUDE_PATH,
so that sub-classes can easily include file based templates using relative
paths.
- Pass in template vars from the producer args and command line.
- Merge in L.
- Hooks to pre-process the schema and post-process the output.
=head1 AUTHOR
Mark Addison Egrommit@users.sourceforge.netE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/TT/Table.pm 0000644 0000000 0000000 00000021047 14551163724 023302 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::TT::Table;
=pod
=head1 NAME
SQL::Translator::Producer::TT::Table -
Produces output using the Template Toolkit from a SQL schema, per table.
=head1 SYNOPSIS
# Normal STDOUT version
#
my $translator = SQL::Translator->new(
from => 'MySQL',
filename => 'foo_schema.sql',
to => 'TT::Table',
producer_args => {
tt_table => 'foo_table.tt',
},
);
print $translator->translate;
# To generate a file per table
#
my $translator = SQL::Translator->new(
from => 'MySQL',
filename => 'foo_schema.sql',
to => 'TT::Table',
producer_args => {
tt_table => 'foo_table.tt.html',
mk_files => 1,
mk_files_base => "./doc/tables",
mk_file_ext => ".html",
on_exists => "replace",
},
);
#
# ./doc/tables/ now contains the templated tables as $tablename.html
#
=head1 DESCRIPTION
Produces schema output using a given Template Tookit template,
processing that template for each table in the schema. Optionally
allows you to write the result for each table to a separate file.
It needs one additional producer_arg of C which is the file
name of the template to use. This template will be passed a template
var of C, which is the current
L table we are producing,
which you can then use to walk the schema via the methods documented
in that module. You also get C as a shortcut to the
L for the table and C,
the L object for this parse in case you want to get
access to any of the options etc set here.
Here's a brief example of what the template could look like:
[% table.name %]
================
[% FOREACH field = table.get_fields %]
[% field.name %] [% field.data_type %]([% field.size %])
[% END -%]
See F for a more complete example.
You can also set any of the options used to initialize the Template
object by adding them to your producer_args. See Template Toolkit docs
for details of the options.
$translator = SQL::Translator->new(
to => 'TT',
producer_args => {
ttfile => 'foo_template.tt',
INCLUDE_PATH => '/foo/templates/tt',
INTERPOLATE => 1,
},
);
If you set C and its additional options the producer will
write a separate file for each table in the schema. This is useful for
producing things like HTML documentation where every table gets its
own page (you could also use TTSchema producer to add an index page).
It's also particularly good for code generation where you want to
produce a class file per table.
=head1 OPTIONS
=over 4
=item tt_table
File name of the template to run for each table.
=item mk_files
Set to true to output a file for each table in the schema (as well as
returning the whole lot back to the Translalor and hence STDOUT). The
file will be named after the table, with the optional C
added and placed in the directory C.
=item mk_files_ext
Extension (without the dot) to add to the filename when using mk_files.
=item mk_files_base = DIR
Dir to build the table files into when using mk_files. Defaults to the
current directory.
=item mk_file_dir
Set true and if the file needs to written to a directory that doesn't
exist, it will be created first.
=item on_exists [Default:replace]
What to do if we are running with mk_files and a file already exists
where we want to write our output. One of "skip", "die", "replace",
"insert". The default is die.
B - Over-write the existing file with the new one, clobbering
anything already there.
B - Leave the original file as it was and don't write the new
version anywhere.
B - Die with an existing file error.
B - Insert the generated output into the file between a set of
special comments (defined by the following options.) Any code between
the comments will be overwritten (ie the results from a previous
produce) but the rest of the file is left alone (your custom code).
This is particularly useful for code generation as it allows you to
generate schema derived code and then add your own custom code
to the file. Then when the schema changes you just re-produce to
insert the new code.
=item insert_comment_start
The comment to look for in the file when on_exists is C. Default
is C. Must appear on it own line, with only
whitespace either side, to be recognised.
=item insert_comment_end
The end comment to look for in the file when on_exists is C.
Default is C. Must appear on it own line, with only
whitespace either side, to be recognised.
=back
=cut
use strict;
use warnings;
our ($DEBUG, @EXPORT_OK);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use File::Path;
use Template;
use Data::Dumper;
use Exporter;
use base qw(Exporter);
@EXPORT_OK = qw(produce);
use SQL::Translator::Utils 'debug';
my $Translator;
sub produce {
$Translator = shift;
local $DEBUG = $Translator->debug;
my $scma = $Translator->schema;
my $pargs = $Translator->producer_args;
my $file = $pargs->{'tt_table'} or die "No template file given!";
$pargs->{on_exists} ||= "die";
debug "Processing template $file\n";
my $out;
my $tt = Template->new(
DEBUG => $DEBUG,
ABSOLUTE => 1, # Set so we can use from the command line sensibly
RELATIVE => 1, # Maybe the cmd line code should set it! Security!
%$pargs, # Allow any TT opts to be passed in the producer_args
) || die "Failed to initialize Template object: " . Template->error;
for my $tbl (sort { $a->order <=> $b->order } $scma->get_tables) {
my $outtmp;
$tt->process(
$file,
{
translator => $Translator,
schema => $scma,
table => $tbl,
},
\$outtmp
) or die "Error processing template '$file' for table '" . $tbl->name . "': " . $tt->error;
$out .= $outtmp;
# Write out the file...
write_file(table_file($tbl), $outtmp) if $pargs->{mk_files};
}
return $out;
}
# Work out the filename for a given table.
sub table_file {
my ($tbl) = shift;
my $pargs = $Translator->producer_args;
my $root = $pargs->{mk_files_base};
my $ext = $pargs->{mk_file_ext};
return "$root/$tbl.$ext";
}
# Write the src given to the file given, handling the on_exists arg.
sub write_file {
my ($file, $src) = @_;
my $pargs = $Translator->producer_args;
my $root = $pargs->{mk_files_base};
if (-e $file) {
if ($pargs->{on_exists} eq "skip") {
warn "Skipping existing $file\n";
return 1;
} elsif ($pargs->{on_exists} eq "die") {
die "File $file already exists.\n";
} elsif ($pargs->{on_exists} eq "replace") {
warn "Replacing $file.\n";
} elsif ($pargs->{on_exists} eq "insert") {
warn "Inserting into $file.\n";
$src = insert_code($file, $src);
} else {
die "Unknown on_exists action: $pargs->{on_exists}\n";
}
} else {
if (my $interactive = -t STDIN && -t STDOUT) {
warn "Creating $file.\n";
}
}
my ($dir) = $file =~ m!^(.*)/!; # Want greedy, everything before the last /
if ($dir and not -d $dir and $pargs->{mk_file_dir}) { mkpath($dir); }
debug "Writing to $file\n";
open(FILE, ">$file") or die "Error opening file $file : $!\n";
print FILE $src;
close(FILE);
}
# Reads file and inserts code between the insert comments and returns the new
# source.
sub insert_code {
my ($file, $src) = @_;
my $pargs = $Translator->producer_args;
my $cstart = $pargs->{insert_comment_start} || "SQLF_INSERT_START";
my $cend = $pargs->{insert_comment_end} || "SQLF_INSERT_END";
# Slurp in the original file
open(FILE, "<", "$file") or die "Error opening file $file : $!\n";
local $/ = undef;
my $orig = ;
close(FILE);
# Insert the new code between the insert comments
unless ($orig =~ s/^\s*?$cstart\s*?\n.*?^\s*?$cend\s*?\n/\n$cstart\n$src\n$cend\n/ms) {
warn "No insert done\n";
}
return $orig;
}
1;
=pod
=head1 AUTHOR
Mark Addison Egrommit@users.sourceforge.netE.
=head1 TODO
- Some tests for the various on exists options (they have been tested
implicitly through use in a project but need some proper tests).
- More docs on code generation strategies.
- Better hooks for filename generation.
- Integrate with L and
L.
=head1 SEE ALSO
SQL::Translator.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Producer/DB2.pm 0000644 0000000 0000000 00000033720 14551163724 022274 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::DB2;
=head1 NAME
SQL::Translator::Producer::DB2 - DB2 SQL producer
=head1 SYNOPSIS
use SQL::Translator;
my $t = SQL::Translator->new( parser => '...', producer => 'DB2' );
print $translator->translate( $file );
=head1 DESCRIPTION
Creates an SQL DDL suitable for DB2.
=cut
use warnings;
use strict;
use warnings;
our ($DEBUG, $WARN);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use SQL::Translator::Schema::Constants;
use SQL::Translator::Utils qw(header_comment);
# http://publib.boulder.ibm.com/infocenter/db2help/topic/com.ibm.db2.udb.doc/ad/r0006844.htm
# This is a terrible WTDI, each Parser should parse down to some standard set
# of SQL data types, with field->extra entries being used to convert back to
# weird types like "polygon" if needed (IMO anyway)
my %dt_translate;
BEGIN {
%dt_translate = (
#
# MySQL types
#
int => 'integer',
mediumint => 'integer',
tinyint => 'smallint',
char => 'char',
tinyblob => 'blob',
mediumblob => 'blob',
longblob => 'long varchar for bit data',
tinytext => 'varchar',
text => 'varchar',
longtext => 'varchar',
mediumtext => 'varchar',
enum => 'varchar',
set => 'varchar',
date => 'date',
datetime => 'timestamp',
time => 'time',
year => 'date',
#
# PostgreSQL types
#
'double precision' => 'double',
serial => 'integer',
bigserial => 'integer',
money => 'double',
character => 'char',
'character varying' => 'varchar',
bytea => 'BLOB',
interval => 'integer',
boolean => 'smallint',
point => 'integer',
line => 'integer',
lseg => 'integer',
box => 'integer',
path => 'integer',
polygon => 'integer',
circle => 'integer',
cidr => 'integer',
inet => 'varchar',
macaddr => 'varchar',
bit => 'number',
'bit varying' => 'number',
#
# DB types
#
number => 'integer',
varchar2 => 'varchar',
long => 'clob',
);
}
my %db2_reserved = map { $_ => 1 } qw/
ADD DETERMINISTIC LEAVE RESTART
AFTER DISALLOW LEFT RESTRICT
ALIAS DISCONNECT LIKE RESULT
ALL DISTINCT LINKTYPE RESULT_SET_LOCATOR
ALLOCATE DO LOCAL RETURN
ALLOW DOUBLE LOCALE RETURNS
ALTER DROP LOCATOR REVOKE
AND DSNHATTR LOCATORS RIGHT
ANY DSSIZE LOCK ROLLBACK
APPLICATION DYNAMIC LOCKMAX ROUTINE
AS EACH LOCKSIZE ROW
ASSOCIATE EDITPROC LONG ROWS
ASUTIME ELSE LOOP RRN
AUDIT ELSEIF MAXVALUE RUN
AUTHORIZATION ENCODING MICROSECOND SAVEPOINT
AUX END MICROSECONDS SCHEMA
AUXILIARY END-EXEC MINUTE SCRATCHPAD
BEFORE END-EXEC1 MINUTES SECOND
BEGIN ERASE MINVALUE SECONDS
BETWEEN ESCAPE MODE SECQTY
BINARY EXCEPT MODIFIES SECURITY
BUFFERPOOL EXCEPTION MONTH SELECT
BY EXCLUDING MONTHS SENSITIVE
CACHE EXECUTE NEW SET
CALL EXISTS NEW_TABLE SIGNAL
CALLED EXIT NO SIMPLE
CAPTURE EXTERNAL NOCACHE SOME
CARDINALITY FENCED NOCYCLE SOURCE
CASCADED FETCH NODENAME SPECIFIC
CASE FIELDPROC NODENUMBER SQL
CAST FILE NOMAXVALUE SQLID
CCSID FINAL NOMINVALUE STANDARD
CHAR FOR NOORDER START
CHARACTER FOREIGN NOT STATIC
CHECK FREE NULL STAY
CLOSE FROM NULLS STOGROUP
CLUSTER FULL NUMPARTS STORES
COLLECTION FUNCTION OBID STYLE
COLLID GENERAL OF SUBPAGES
COLUMN GENERATED OLD SUBSTRING
COMMENT GET OLD_TABLE SYNONYM
COMMIT GLOBAL ON SYSFUN
CONCAT GO OPEN SYSIBM
CONDITION GOTO OPTIMIZATION SYSPROC
CONNECT GRANT OPTIMIZE SYSTEM
CONNECTION GRAPHIC OPTION TABLE
CONSTRAINT GROUP OR TABLESPACE
CONTAINS HANDLER ORDER THEN
CONTINUE HAVING OUT TO
COUNT HOLD OUTER TRANSACTION
COUNT_BIG HOUR OVERRIDING TRIGGER
CREATE HOURS PACKAGE TRIM
CROSS IDENTITY PARAMETER TYPE
CURRENT IF PART UNDO
CURRENT_DATE IMMEDIATE PARTITION UNION
CURRENT_LC_CTYPE IN PATH UNIQUE
CURRENT_PATH INCLUDING PIECESIZE UNTIL
CURRENT_SERVER INCREMENT PLAN UPDATE
CURRENT_TIME INDEX POSITION USAGE
CURRENT_TIMESTAMP INDICATOR PRECISION USER
CURRENT_TIMEZONE INHERIT PREPARE USING
CURRENT_USER INNER PRIMARY VALIDPROC
CURSOR INOUT PRIQTY VALUES
CYCLE INSENSITIVE PRIVILEGES VARIABLE
DATA INSERT PROCEDURE VARIANT
DATABASE INTEGRITY PROGRAM VCAT
DAY INTO PSID VIEW
DAYS IS QUERYNO VOLUMES
DB2GENERAL ISOBID READ WHEN
DB2GENRL ISOLATION READS WHERE
DB2SQL ITERATE RECOVERY WHILE
DBINFO JAR REFERENCES WITH
DECLARE JAVA REFERENCING WLM
DEFAULT JOIN RELEASE WRITE
DEFAULTS KEY RENAME YEAR
DEFINITION LABEL REPEAT YEARS
DELETE LANGUAGE RESET
DESCRIPTOR LC_CTYPE RESIGNAL
/;
sub produce {
my ($translator) = @_;
$DEBUG = $translator->debug;
$WARN = $translator->show_warnings;
my $no_comments = $translator->no_comments;
my $add_drop_table = $translator->add_drop_table;
my $schema = $translator->schema;
my $output = '';
my $indent = ' ';
$output .= header_comment unless ($no_comments);
my (@table_defs, @fks, @index_defs);
foreach my $table ($schema->get_tables) {
push @table_defs, 'DROP TABLE ' . $table->name . ";" if $add_drop_table;
my ($table_def, $fks) = create_table(
$table,
{
no_comments => $no_comments
}
);
push @table_defs, $table_def;
push @fks, @$fks;
foreach my $index ($table->get_indices) {
push @index_defs, create_index($index);
}
}
my (@view_defs);
foreach my $view ($schema->get_views) {
push @view_defs, create_view($view);
}
my (@trigger_defs);
foreach my $trigger ($schema->get_triggers) {
push @trigger_defs, create_trigger($trigger);
}
return wantarray
? (@table_defs, @fks, @index_defs, @view_defs, @trigger_defs)
: $output . join("\n\n", @table_defs, @fks, @index_defs, @view_defs, @trigger_defs) . "\n";
}
{
my %objnames;
sub check_name {
my ($name, $type, $length) = @_;
my $newname = $name;
if (length($name) > $length) ## Maximum table name length is 18
{
warn "Table name $name is longer than $length characters, truncated"
if $WARN;
# if(grep {$_ eq substr($name, 0, $length) }
# values(%{$objnames{$type}}))
# {
# die "Got multiple matching table names when truncated";
# }
# $objnames{$type}{$name} = substr($name, 0,$length);
# $newname = $objnames{$type}{$name};
}
if ($db2_reserved{ uc($newname) }) {
warn "$newname is a reserved word in DB2!" if $WARN;
}
# return sprintf("%-*s", $length-5, $newname);
return $newname;
}
}
sub create_table {
my ($table, $options) = @_;
my $table_name = check_name($table->name, 'tables', 128);
# this limit is 18 in older DB2s ! (<= 8)
my (@field_defs, @comments);
push @comments, "--\n-- Table: $table_name\n--"
unless $options->{no_comments};
foreach my $field ($table->get_fields) {
push @field_defs, create_field($field);
}
my (@con_defs, @fks);
foreach my $con ($table->get_constraints) {
my ($cdefs, $fks) = create_constraint($con);
push @con_defs, @$cdefs;
push @fks, @$fks;
}
my $tablespace = $table->extra()->{'TABLESPACE'} || '';
my $table_def = "CREATE TABLE $table_name (\n";
$table_def .= join(",\n", map {" $_"} @field_defs, @con_defs);
$table_def .= "\n)";
$table_def .= $tablespace ? "IN $tablespace;" : ';';
return $table_def, \@fks;
}
sub create_field {
my ($field) = @_;
my $field_name = check_name($field->name, 'fields', 30);
# use Data::Dumper;
# print Dumper(\%dt_translate);
# print $field->data_type, " ", $dt_translate{lc($field->data_type)}, "\n";
my $data_type = uc($dt_translate{ lc($field->data_type) } || $field->data_type);
my $size = $field->size();
my $field_def = "$field_name $data_type";
$field_def
.= $field->is_auto_increment
? ' GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)'
: '';
$field_def .= $data_type =~ /(CHAR|CLOB|NUMERIC|DECIMAL)/i ? "(${size})" : '';
$field_def .= !$field->is_nullable ? ' NOT NULL' : '';
# $field_def .= $field->is_primary_key ? ' PRIMARY KEY':'';
$field_def
.= !defined $field->default_value
? ''
: $field->default_value =~ /current( |_)timestamp/i
|| $field->default_value =~ /\Qnow()\E/i ? ' DEFAULT CURRENT TIMESTAMP'
: defined $field->default_value ? (
" DEFAULT "
. (
$data_type =~ /(INT|DOUBLE)/i
? $field->default_value
: "'" . $field->default_value . "'"
)
)
: '';
return $field_def;
}
sub create_index {
my ($index) = @_;
my $out = sprintf(
'CREATE %sINDEX %s ON %s ( %s );',
$index->type() =~ /^UNIQUE$/i ? 'UNIQUE' : '',
$index->name, $index->table->name, join(', ', $index->fields)
);
return $out;
}
sub create_constraint {
my ($constraint) = @_;
my (@con_defs, @fks);
my $ctype
= $constraint->type =~ /^PRIMARY(_|\s)KEY$/i ? 'PRIMARY KEY'
: $constraint->type =~ /^UNIQUE$/i ? 'UNIQUE'
: $constraint->type =~ /^CHECK_C$/i ? 'CHECK'
: $constraint->type =~ /^FOREIGN(_|\s)KEY$/i ? 'FOREIGN KEY'
: '';
my $expr
= $constraint->type =~ /^CHECK_C$/i
? $constraint->expression
: '';
my $ref
= $constraint->type =~ /^FOREIGN(_|\s)KEY$/i
? ('REFERENCES ' . $constraint->reference_table . '(' . join(', ', $constraint->reference_fields) . ')')
: '';
my $update = $constraint->on_update ? $constraint->on_update : '';
my $delete = $constraint->on_delete ? $constraint->on_delete : '';
my $out = join(' ',
grep {$_} $constraint->name ? ('CONSTRAINT ' . $constraint->name) : '',
$ctype,
'(' . join(', ', $constraint->fields) . ')',
$expr ? $expr : $ref,
$update, $delete);
if ($constraint->type eq FOREIGN_KEY) {
my $table_name = $constraint->table->name;
$out = "ALTER TABLE $table_name ADD $out;";
push @fks, $out;
} else {
push @con_defs, $out;
}
return \@con_defs, \@fks;
}
sub create_view {
my ($view) = @_;
my $out = sprintf("CREATE VIEW %s AS\n%s;", $view->name, $view->sql);
return $out;
}
sub create_trigger {
my ($trigger) = @_;
# create: CREATE TRIGGER trigger_name before type /ON/i table_name reference_b(?) /FOR EACH ROW/i 'MODE DB2SQL' triggered_action
my $db_events = join ', ', $trigger->database_events;
my $out = sprintf(
'CREATE TRIGGER %s %s %s ON %s %s %s MODE DB2SQL %s',
$trigger->name,
$trigger->perform_action_when || 'AFTER',
$db_events =~ /update_on/i
? ('UPDATE OF ' . join(', ', $trigger->fields))
: $db_events || 'UPDATE',
$trigger->table->name,
$trigger->extra->{reference} || 'REFERENCING OLD AS oldrow NEW AS newrow',
$trigger->extra->{granularity} || 'FOR EACH ROW',
$trigger->action
);
return $out;
}
sub alter_field {
my ($from_field, $to_field) = @_;
my $data_type = uc($dt_translate{ lc($to_field->data_type) } || $to_field->data_type);
my $size = $to_field->size();
$data_type .= $data_type =~ /CHAR/i ? "(${size})" : '';
# DB2 will only allow changing of varchar/vargraphic datatypes
# to extend their lengths. Or changing of text types to other
# texttypes, and numeric types to larger numeric types. (v8)
# We can also drop/add keys, checks and constraints, but not
# columns !?
my $out = sprintf('ALTER TABLE %s ALTER %s SET DATATYPE %s', $to_field->table->name, $to_field->name, $data_type);
}
sub add_field {
my ($new_field) = @_;
my $out = sprintf('ALTER TABLE %s ADD COLUMN %s', $new_field->table->name, create_field($new_field));
return $out;
}
sub drop_field {
my ($field) = @_;
return '';
}
1;
SQL-Translator-1.65/lib/SQL/Translator/Producer/XML.pm 0000644 0000000 0000000 00000001244 14551163724 022361 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::XML;
=pod
=head1 NAME
SQL::Translator::Producer::XML - Alias to XML::SQLFairy producer
=head1 DESCRIPTION
Previous versions of SQL::Translator included an XML producer, but the
namespace has since been further subdivided. Therefore, this module is
now just an alias to the XML::SQLFairy producer.
=head1 SEE ALSO
SQL::Translator::Producer::XML::SQLFairy.
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
use strict;
use warnings;
our $DEBUG;
our $VERSION = '1.65';
$DEBUG = 1 unless defined $DEBUG;
use SQL::Translator::Producer::XML::SQLFairy;
*produce = \&SQL::Translator::Producer::XML::SQLFairy::produce;
1;
SQL-Translator-1.65/lib/SQL/Translator/Producer/TTSchema.pm 0000644 0000000 0000000 00000011504 14551163724 023371 0 ustar 00root root 0000000 0000000 package SQL::Translator::Producer::TTSchema;
=pod
=head1 NAME
SQL::Translator::Producer::TTSchema -
Produces output using the Template Toolkit from a SQL schema
=head1 SYNOPSIS
use SQL::Translator;
my $translator = SQL::Translator->new(
from => 'MySQL',
filename => 'foo_schema.sql',
to => 'TTSchema',
producer_args => {
ttfile => 'foo_template.tt', # Template file to use
# Extra template variables
tt_vars => {
author => "Mr Foo",
},
# Template config options
tt_conf => {
INCLUDE_PATH => '/foo/templates',
},
},
);
print $translator->translate;
=head1 DESCRIPTION
Produces schema output using a given Template Tookit template.
It needs one additional producer arg of C which is the file
name of the template to use. This template will be passed a variable
called C, which is the C object
created by the parser. You can then use it to walk the schema via the
methods documented in that module.
Here's a brief example of what the template could look like:
database: [% schema.database %]
tables:
[% FOREACH table = schema.get_tables %]
[% table.name %]
================
[% FOREACH field = table.get_fields %]
[% field.name %] [% field.data_type %]([% field.size %])
[% END -%]
[% END %]
See F for a more complete example.
The template will also get the set of extra variables given as a
hashref via the C producer arg. (Note that the old style of
passing this config in the C producer arg has been
deprecated).
You can set any of the options used to initialize the Template object by
adding a C producer arg. See Template Toolkit docs for details of
the options.
(Note that the old style of passing this config directly in the C producer args
has been deprecated).
$translator = SQL::Translator->new(
to => 'TT',
producer_args => {
ttfile => 'foo_template.tt',
tt_vars => {},
tt_conf => {
INCLUDE_PATH => '/foo/templates/tt',
INTERPOLATE => 1,
}
},
);
You can use this producer to create any type of text output you like,
even using it to create your own versions of what the other producers
make. For example, you could create a template that translates the
schema into MySQL's syntax, your own HTML documentation, your own
Class::DBI classes (or some other code) -- the opportunities are
limitless!
=head2 Producer Args
=over 4
=item ttfile
The template file to generate the output with.
=item tt_vars
A hash ref of extra variables you want to add to the template.
=item tt_conf
A hash ref of configuration options to pass to the L object's
constructor.
=back
=cut
use strict;
use warnings;
our ($DEBUG, @EXPORT_OK);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use Template;
use Data::Dumper;
use Exporter;
use base qw(Exporter);
@EXPORT_OK = qw(produce);
use SQL::Translator::Utils 'debug';
sub produce {
my $translator = shift;
local $DEBUG = $translator->debug;
my $scma = $translator->schema;
my $args = $translator->producer_args;
my $file = delete $args->{'ttfile'} or die "No template file!";
my $tt_vars = delete $args->{'tt_vars'} || {};
if (exists $args->{ttargs}) {
warn "Use of 'ttargs' producer arg is deprecated." . " Please use 'tt_vars' instead.\n";
%$tt_vars = { %{ $args->{ttargs} }, %$tt_vars };
}
my %tt_conf = exists $args->{tt_conf} ? %{ $args->{tt_conf} } : ();
# sqlt passes the producer args for _all_ producers in, so we use this
# grep hack to test for the old usage.
debug(Dumper(\%tt_conf)) if $DEBUG;
if (grep /^[A-Z_]+$/, keys %$args) {
warn "Template config directly in the producer args is deprecated." . " Please use 'tt_conf' instead.\n";
%tt_conf = (%tt_conf, %$args);
}
debug "Processing template $file\n";
my $out;
my $tt = Template->new(
DEBUG => $DEBUG,
ABSOLUTE => 1, # Set so we can use from the command line sensibly
RELATIVE => 1, # Maybe the cmd line code should set it! Security!
%tt_conf,
);
debug("Template ERROR: " . Template->error . "\n") if (!$tt);
$tt || die "Failed to initialize Template object: " . Template->error;
my $ttproc = $tt->process($file, { schema => $scma, %$tt_vars }, \$out);
debug("ERROR: " . $tt->error . "\n") if (!$ttproc);
$ttproc or die "Error processing template '$file': " . $tt->error;
return $out;
}
1;
=pod
=head1 AUTHOR
Mark Addison Egrommit@users.sourceforge.netE.
=head1 TODO
B e.g. [% tables %] as a shortcut for
[% schema.get_tables %].
=head1 SEE ALSO
SQL::Translator.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Filter/ 0000755 0000000 0000000 00000000000 14551164244 021022 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Filter/Names.pm 0000644 0000000 0000000 00000006712 14551163724 022433 0 ustar 00root root 0000000 0000000 package SQL::Translator::Filter::Names;
=head1 NAME
SQL::Translator::Filter::Names - Tweak the names of schema objects.
=head1 SYNOPSIS
#! /usr/bin/perl -w
use SQL::Translator;
# Lowercase all table names and upper case the first letter of all field
# names. (MySql style!)
#
my $sqlt = SQL::Translator->new(
filename => \@ARGV,
from => 'MySQL',
to => 'MySQL',
filters => [
Names => {
'tables' => 'lc',
'fields' => 'ucfirst',
},
],
) || die "SQLFairy error : ".SQL::Translator->error;
print($sqlt->translate) || die "SQLFairy error : ".$sqlt->error;
=cut
use strict;
use warnings;
our $VERSION = '1.65';
sub filter {
my $schema = shift;
my %args = %{ $_[0] };
# Tables
#if ( my $func = $args{tables} ) {
# _filtername($_,$func) foreach ( $schema->get_tables );
#}
# ,
foreach my $type (qw/tables procedures triggers views/) {
if (my $func = $args{$type}) {
my $meth = "get_$type";
_filtername($_, $func) foreach $schema->$meth;
}
}
# Fields
if (my $func = $args{fields}) {
_filtername($_, $func) foreach map { $_->get_fields } $schema->get_tables;
}
}
# _filtername( OBJ, FUNC_NAME )
# Update the name attribute on the schema object given using the named filter.
# Objects with no name are skipped.
# Returns true if the name was changed. Dies if there is an error running func.
sub _filtername {
my ($obj, $func) = @_;
return unless my $name = $obj->name;
$func = _getfunc($func);
my $newname = eval { $func->($name) };
die "$@" if $@; # TODO - Better message!
return if $name eq $newname;
$_->name($newname);
}
# _getfunc( NAME ) - Returns code ref to func NAME or dies.
sub _getfunc {
my ($name) = @_;
no strict 'refs';
my $func = "SQL::Translator::Filter::Names::$name";
die "Table name filter - unknown function '$name'\n" unless exists &$func;
\&$func;
}
# The name munging functions
#=============================================================================
# Get called with name to munge as first arg and return the new name. Die on
# errors.
sub lc { lc shift; }
sub uc { uc shift; }
sub ucfirst { ucfirst shift; }
1; #==========================================================================
__END__
=head1 DESCRIPTION
Tweak the names of schema objects by providing functions to filter the names
from the given into the desired forms.
=head1 SEE ALSO
C, L
=over 4
=item Name Groups
Define a bunch of useful groups to run the name filters over. e.g. all, fkeys,
pkeys etc.
=item More Functions
e.g. camelcase, titlecase, single word etc.
Also a way to pass in a regexp.
May also want a way to pass in arguments for the func e.g. prefix.
=item Multiple Filters on the same name (filter order)?
Do we actually need this, you could just run lots of filters. Would make adding
func args to the interface easier.
filters => [
[ 'Names', { all => 'lc' } ],
[ 'Names', {
tables => 'lc',
fields => 'ucfirst',
} ],
],
Mind you if you could give the filter a list this wouldn't be a problem!
filters => [
[ 'Names',
all => 'lc'
fields => 'ucfirst',
],
],
Which is nice. Might have to change the calling conventions for filters.
Would also provide an order to run the filters in rather than having to hard
code it into the filter it's self.
=back
=cut
SQL-Translator-1.65/lib/SQL/Translator/Filter/Globals.pm 0000644 0000000 0000000 00000011511 14551163724 022744 0 ustar 00root root 0000000 0000000 package SQL::Translator::Filter::Globals;
=head1 NAME
SQL::Translator::Filter::Globals - Add global fields and indices to all tables.
=head1 SYNOPSIS
# e.g. Add timestamp field to all tables.
use SQL::Translator;
my $sqlt = SQL::Translator->new(
from => 'MySQL',
to => 'MySQL',
filters => [
Globals => {
fields => [
{
name => 'modified'
data_type => 'TIMESTAMP'
}
],
indices => [
{
fields => 'modifed',
},
]
constraints => [
{
}
]
},
],
) || die "SQLFairy error : ".SQL::Translator->error;
my $sql = $sqlt->translate || die "SQLFairy error : ".$sqlt->error;
=cut
use strict;
use warnings;
our $VERSION = '1.65';
sub filter {
my $schema = shift;
my %args = @_;
my $global_table = $args{global_table} ||= '_GLOBAL_';
my (@global_fields, @global_indices, @global_constraints);
push @global_fields, @{ $args{fields} } if $args{fields};
push @global_indices, @{ $args{indices} } if $args{indices};
push @global_constraints, @{ $args{constraints} } if $args{constraints};
# Pull fields and indices off global table and then remove it.
if (my $gtbl = $schema->get_table($global_table)) {
foreach ($gtbl->get_fields) {
# We don't copy the order attrib so the added fields should get
# pushed on the end of each table.
push @global_fields,
{
name => $_->name,
comments => "" . $_->comments,
data_type => $_->data_type,
default_value => $_->default_value,
size => [ $_->size ],
extra => scalar($_->extra),
foreign_key_reference => $_->foreign_key_reference,
is_auto_increment => $_->is_auto_increment,
is_foreign_key => $_->is_foreign_key,
is_nullable => $_->is_nullable,
is_primary_key => $_->is_primary_key,
is_unique => $_->is_unique,
is_valid => $_->is_valid,
};
}
foreach ($gtbl->get_indices) {
push @global_indices,
{
name => $_->name,
type => $_->type,
fields => [ $_->fields ],
options => [ $_->options ],
extra => scalar($_->extra),
};
}
foreach ($gtbl->get_constraints) {
push @global_constraints,
{
name => $_->name,
fields => [ $_->fields ],
deferrable => $_->deferrable,
expression => $_->expression,
match_type => $_->match_type,
options => [ $_->options ],
on_delete => $_->on_delete,
on_update => $_->on_update,
reference_fields => [ $_->reference_fields ],
reference_table => $_->reference_table,
table => $_->table,
type => $_->type,
extra => scalar($_->extra),
};
}
$schema->drop_table($gtbl);
}
# Add globals to tables
foreach my $tbl ($schema->get_tables) {
foreach my $new_fld (@global_fields) {
# Don't add if field already there
next if $tbl->get_field($new_fld->{name});
$tbl->add_field(%$new_fld);
}
foreach my $new_index (@global_indices) {
$tbl->add_index(%$new_index);
}
foreach my $new_constraint (@global_constraints) {
$tbl->add_constraint(%$new_constraint);
}
}
}
1;
__END__
=head1 DESCRIPTION
Adds global fields, indices and constraints to all tables in the schema.
The globals to add can either be defined in the filter args or using a _GLOBAL_
table (see below).
If a table already contains a field with the same name as a global then it is
skipped for that table.
=head2 The _GLOBAL_ Table
An alternative to using the args is to add a table called C<_GLOBAL_> to the
schema and then just use the filter. Any fields and indices defined on this table
will be added to all the tables in the schema and the _GLOBAL_ table removed.
The name of the global can be changed using a C arg to the
filter.
=head1 SEE ALSO
C, L
=head1 BUGS
Will generate duplicate indices if an index already exists on a table the same
as one added globally.
Will generate duplicate constraints if a constraint already exists on a table
the same as one added globally.
=head1 TODO
Some extra data values that can be used to control the global addition. e.g.
'skip_global'.
=head1 AUTHOR
Mark Addison
=cut
SQL-Translator-1.65/lib/SQL/Translator/Filter/DefaultExtra.pm 0000644 0000000 0000000 00000002750 14551163724 023756 0 ustar 00root root 0000000 0000000 package SQL::Translator::Filter::DefaultExtra;
=head1 NAME
SQL::Translator::Filter::DefaultExtra - Set default extra data values for schema
objects.
=head1 SYNOPSIS
use SQL::Translator;
my $sqlt = SQL::Translator->new(
from => 'MySQL',
to => 'MySQL',
filters => [
DefaultExtra => {
# XXX - These should really be ordered
# Default widget for fields to basic text edit.
'field.widget' => 'text',
# idea:
'field(data_type=BIT).widget' => 'yesno',
# Default label (human formated name) for fields and tables
'field.label' => '=ucfirst($name)',
'table.label' => '=ucfirst($name)',
},
],
) || die "SQLFairy error : ".SQL::Translator->error;
my $sql = $sqlt->translate || die "SQLFairy error : ".$sqlt->error;
=cut
use strict;
use warnings;
our $VERSION = '1.65';
sub filter {
my $schema = shift;
my %args = { +shift };
# Tables
foreach ($schema->get_tables) {
my %extra = $_->extra;
$extra{label} ||= ucfirst($_->name);
$_->extra(%extra);
}
# Fields
foreach (map { $_->get_fields } $schema->get_tables) {
my %extra = $_->extra;
$extra{label} ||= ucfirst($_->name);
$_->extra(%extra);
}
}
1;
__END__
=head1 DESCRIPTION
Maybe I'm trying to do too much in one go. Args set a match and then an update,
if you want to set lots of things, use lots of filters!
=head1 SEE ALSO
C, L
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/ 0000755 0000000 0000000 00000000000 14551164244 020775 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Schema/Procedure.pm 0000644 0000000 0000000 00000010351 14551163724 023265 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Procedure;
=pod
=head1 NAME
SQL::Translator::Schema::Procedure - SQL::Translator procedure object
=head1 SYNOPSIS
use SQL::Translator::Schema::Procedure;
my $procedure = SQL::Translator::Schema::Procedure->new(
name => 'foo',
sql => 'CREATE PROC foo AS SELECT * FROM bar',
parameters => 'foo,bar',
owner => 'nomar',
comments => 'blah blah blah',
schema => $schema,
);
=head1 DESCRIPTION
C is a class for dealing with
stored procedures (and possibly other pieces of nameable SQL code?).
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Utils qw(ex2err);
use SQL::Translator::Role::ListAttr;
use SQL::Translator::Types qw(schema_obj);
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
=head2 new
Object constructor.
my $schema = SQL::Translator::Schema::Procedure->new;
=cut
=head2 parameters
Gets and set the parameters of the stored procedure.
$procedure->parameters('id');
$procedure->parameters('id', 'name');
$procedure->parameters( 'id, name' );
$procedure->parameters( [ 'id', 'name' ] );
$procedure->parameters( qw[ id name ] );
my @parameters = $procedure->parameters;
=cut
with ListAttr parameters => (uniq => 1);
=head2 name
Get or set the procedure's name.
$procedure->name('foo');
my $name = $procedure->name;
=cut
has name => (is => 'rw', default => quote_sub(q{ '' }));
=head2 sql
Get or set the procedure's SQL.
$procedure->sql('select * from foo');
my $sql = $procedure->sql;
=cut
has sql => (is => 'rw', default => quote_sub(q{ '' }));
=head2 order
Get or set the order of the procedure.
$procedure->order( 3 );
my $order = $procedure->order;
=cut
has order => (is => 'rw');
=head2 owner
Get or set the owner of the procedure.
$procedure->owner('nomar');
my $sql = $procedure->owner;
=cut
has owner => (is => 'rw', default => quote_sub(q{ '' }));
=head2 comments
Get or set the comments on a procedure.
$procedure->comments('foo');
$procedure->comments('bar');
print join( ', ', $procedure->comments ); # prints "foo, bar"
=cut
has comments => (
is => 'rw',
coerce => quote_sub(q{ ref($_[0]) eq 'ARRAY' ? $_[0] : [$_[0]] }),
default => quote_sub(q{ [] }),
);
around comments => sub {
my $orig = shift;
my $self = shift;
my @comments = ref $_[0] ? @{ $_[0] } : @_;
for my $arg (@comments) {
$arg = $arg->[0] if ref $arg;
push @{ $self->$orig }, $arg if defined $arg && $arg;
}
return wantarray ? @{ $self->$orig } : join("\n", @{ $self->$orig });
};
=head2 schema
Get or set the procedures's schema object.
$procedure->schema( $schema );
my $schema = $procedure->schema;
=cut
has schema => (is => 'rw', isa => schema_obj('Schema'), weak_ref => 1);
around schema => \&ex2err;
=head2 equals
Determines if this procedure is the same as another
my $isIdentical = $procedure1->equals( $procedure2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
my $ignore_sql = shift;
return 0 unless $self->$orig($other);
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
unless ($ignore_sql) {
my $selfSql = $self->sql;
my $otherSql = $other->sql;
# Remove comments
$selfSql =~ s/--.*$//mg;
$otherSql =~ s/--.*$//mg;
# Collapse whitespace to space to avoid whitespace comparison issues
$selfSql =~ s/\s+/ /sg;
$otherSql =~ s/\s+/ /sg;
return 0 unless $selfSql eq $otherSql;
}
return 0
unless $self->_compare_objects(scalar $self->parameters, scalar $other->parameters);
# return 0 unless $self->comments eq $other->comments;
# return 0 unless $case_insensitive ? uc($self->owner) eq uc($other->owner) : $self->owner eq $other->owner;
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHORS
Ken Youens-Clark Ekclark@cshl.orgE,
Paul Harrington EPaul-Harrington@deshaw.comE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/Constraint.pm 0000644 0000000 0000000 00000027201 14551163724 023463 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Constraint;
=pod
=head1 NAME
SQL::Translator::Schema::Constraint - SQL::Translator constraint object
=head1 SYNOPSIS
use SQL::Translator::Schema::Constraint;
my $constraint = SQL::Translator::Schema::Constraint->new(
name => 'foo',
fields => [ id ],
type => PRIMARY_KEY,
);
=head1 DESCRIPTION
C is the constraint object.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Schema::Constants;
use SQL::Translator::Utils qw(ex2err throw);
use SQL::Translator::Role::ListAttr;
use SQL::Translator::Types qw(schema_obj enum);
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
my %VALID_CONSTRAINT_TYPE = (PRIMARY_KEY, 1, UNIQUE, 1, CHECK_C, 1, FOREIGN_KEY, 1, NOT_NULL, 1, EXCLUDE, 1,);
=head2 new
Object constructor.
my $schema = SQL::Translator::Schema::Constraint->new(
table => $table, # table to which it belongs
type => 'foreign_key', # type of table constraint
name => 'fk_phone_id', # name of the constraint
fields => 'phone_id', # field in the referring table
reference_fields => 'phone_id', # referenced field
reference_table => 'phone', # referenced table
match_type => 'full', # how to match
on_delete => 'cascade', # what to do on deletes
on_update => '', # what to do on updates
);
=cut
# Override to remove empty arrays from args.
# t/14postgres-parser breaks without this.
around BUILDARGS => sub {
my $orig = shift;
my $self = shift;
my $args = $self->$orig(@_);
foreach my $arg (keys %{$args}) {
delete $args->{$arg}
if ref($args->{$arg}) eq "ARRAY" && !@{ $args->{$arg} };
}
if (exists $args->{fields}) {
$args->{field_names} = delete $args->{fields};
}
return $args;
};
=head2 deferrable
Get or set whether the constraint is deferrable. If not defined,
then returns "1." The argument is evaluated by Perl for True or
False, so the following are equivalent:
$deferrable = $field->deferrable(0);
$deferrable = $field->deferrable('');
$deferrable = $field->deferrable('0');
=cut
has deferrable => (
is => 'rw',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
default => quote_sub(q{ 1 }),
);
=head2 expression
Gets and set the expression used in a CHECK constraint.
my $expression = $constraint->expression('...');
=cut
has expression => (is => 'rw', default => quote_sub(q{ '' }));
around expression => sub {
my ($orig, $self, $arg) = @_;
$self->$orig($arg || ());
};
sub is_valid {
=pod
=head2 is_valid
Determine whether the constraint is valid or not.
my $ok = $constraint->is_valid;
=cut
my $self = shift;
my $type = $self->type or return $self->error('No type');
my $table = $self->table or return $self->error('No table');
my @fields = $self->fields or return $self->error('No fields');
my $table_name = $table->name or return $self->error('No table name');
for my $f (@fields) {
next if $table->get_field($f);
return $self->error("Constraint references non-existent field '$f' ", "in table '$table_name'");
}
my $schema = $table->schema
or return $self->error('Table ', $table->name, ' has no schema object');
if ($type eq FOREIGN_KEY) {
return $self->error('Only one field allowed for foreign key')
if scalar @fields > 1;
my $ref_table_name = $self->reference_table
or return $self->error('No reference table');
my $ref_table = $schema->get_table($ref_table_name)
or return $self->error("No table named '$ref_table_name' in schema");
my @ref_fields = $self->reference_fields or return;
return $self->error('Only one field allowed for foreign key reference')
if scalar @ref_fields > 1;
for my $ref_field (@ref_fields) {
next if $ref_table->get_field($ref_field);
return $self->error("Constraint from field(s) "
. join(', ', map {qq['$table_name.$_']} @fields)
. " to non-existent field '$ref_table_name.$ref_field'");
}
} elsif ($type eq CHECK_C) {
return $self->error('No expression for CHECK')
unless $self->expression;
}
return 1;
}
=head2 fields
Gets and set the fields the constraint is on. Accepts a string, list or
arrayref; returns an array or array reference. Will unique the field
names and keep them in order by the first occurrence of a field name.
The fields are returned as Field objects if they exist or as plain
names if not. (If you just want the names and want to avoid the Field's overload
magic use L).
Returns undef or an empty list if the constraint has no fields set.
$constraint->fields('id');
$constraint->fields('id', 'name');
$constraint->fields( 'id, name' );
$constraint->fields( [ 'id', 'name' ] );
$constraint->fields( qw[ id name ] );
my @fields = $constraint->fields;
=cut
sub fields {
my $self = shift;
my $table = $self->table;
my @fields = map { $table->get_field($_) || $_ } @{ $self->field_names(@_) || [] };
return
wantarray ? @fields
: @fields ? \@fields
: undef;
}
=head2 field_names
Read-only method to return a list or array ref of the field names. Returns undef
or an empty list if the constraint has no fields set. Useful if you want to
avoid the overload magic of the Field objects returned by the fields method.
my @names = $constraint->field_names;
=cut
with ListAttr field_names => (uniq => 1, undef_if_empty => 1);
=head2 match_type
Get or set the constraint's match_type. Only valid values are "full"
"partial" and "simple"
my $match_type = $constraint->match_type('FULL');
=cut
has match_type => (
is => 'rw',
default => quote_sub(q{ '' }),
coerce => quote_sub(q{ lc $_[0] }),
isa => enum(
[qw(full partial simple)],
{
msg => "Invalid match type: %s",
allow_false => 1,
}
),
);
around match_type => \&ex2err;
=head2 name
Get or set the constraint's name.
my $name = $constraint->name('foo');
=cut
has name => (is => 'rw', default => quote_sub(q{ '' }));
around name => sub {
my ($orig, $self, $arg) = @_;
$self->$orig($arg || ());
};
=head2 options
Gets or adds to the constraints's options (e.g., "INITIALLY IMMEDIATE").
Returns an array or array reference.
$constraint->options('NORELY');
my @options = $constraint->options;
=cut
with ListAttr options => ();
=head2 on_delete
Get or set the constraint's "on delete" action.
my $action = $constraint->on_delete('cascade');
=cut
has on_delete => (is => 'rw', default => quote_sub(q{ '' }));
around on_delete => sub {
my ($orig, $self, $arg) = @_;
$self->$orig($arg || ());
};
=head2 on_update
Get or set the constraint's "on update" action.
my $action = $constraint->on_update('no action');
=cut
has on_update => (is => 'rw', default => quote_sub(q{ '' }));
around on_update => sub {
my ($orig, $self, $arg) = @_;
$self->$orig($arg || ());
};
=head2 reference_fields
Gets and set the fields in the referred table. Accepts a string, list or
arrayref; returns an array or array reference.
$constraint->reference_fields('id');
$constraint->reference_fields('id', 'name');
$constraint->reference_fields( 'id, name' );
$constraint->reference_fields( [ 'id', 'name' ] );
$constraint->reference_fields( qw[ id name ] );
my @reference_fields = $constraint->reference_fields;
=cut
with ListAttr reference_fields => (
may_throw => 1,
builder => 1,
lazy => 1,
);
sub _build_reference_fields {
my ($self) = @_;
my $table = $self->table or throw('No table');
my $schema = $table->schema or throw('No schema');
if (my $ref_table_name = $self->reference_table) {
my $ref_table = $schema->get_table($ref_table_name)
or throw("Can't find table '$ref_table_name'");
if (my $constraint = $ref_table->primary_key) {
return [ $constraint->fields ];
} else {
throw('No reference fields defined and cannot find primary key in ', "reference table '$ref_table_name'");
}
}
}
=head2 reference_table
Get or set the table referred to by the constraint.
my $reference_table = $constraint->reference_table('foo');
=cut
has reference_table => (is => 'rw', default => quote_sub(q{ '' }));
=head2 table
Get or set the constraint's table object.
my $table = $field->table;
=cut
has table => (is => 'rw', isa => schema_obj('Table'), weak_ref => 1);
around table => \&ex2err;
=head2 type
Get or set the constraint's type.
my $type = $constraint->type( PRIMARY_KEY );
=cut
has type => (
is => 'rw',
default => quote_sub(q{ '' }),
coerce => quote_sub(q{ (my $t = $_[0]) =~ s/_/ /g; uc $t }),
isa => enum(
[ keys %VALID_CONSTRAINT_TYPE ],
{
msg => "Invalid constraint type: %s",
allow_false => 1,
}
),
);
around type => \&ex2err;
=head2 equals
Determines if this constraint is the same as another
my $isIdentical = $constraint1->equals( $constraint2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
my $ignore_constraint_names = shift;
return 0 unless $self->$orig($other);
return 0 unless $self->type eq $other->type;
unless ($ignore_constraint_names) {
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
}
return 0 unless $self->deferrable eq $other->deferrable;
#return 0 unless $self->is_valid eq $other->is_valid;
return 0
unless $case_insensitive
? uc($self->table->name) eq uc($other->table->name)
: $self->table->name eq $other->table->name;
return 0 unless $self->expression eq $other->expression;
# Check fields, regardless of order
my %otherFields = (); # create a hash of the other fields
foreach my $otherField ($other->fields) {
$otherField = uc($otherField) if $case_insensitive;
$otherFields{$otherField} = 1;
}
foreach my $selfField ($self->fields) { # check for self fields in hash
$selfField = uc($selfField) if $case_insensitive;
return 0 unless $otherFields{$selfField};
delete $otherFields{$selfField};
}
# Check all other fields were accounted for
return 0 unless keys %otherFields == 0;
# Check reference fields, regardless of order
my %otherRefFields = (); # create a hash of the other reference fields
foreach my $otherRefField ($other->reference_fields) {
$otherRefField = uc($otherRefField) if $case_insensitive;
$otherRefFields{$otherRefField} = 1;
}
foreach my $selfRefField ($self->reference_fields) { # check for self reference fields in hash
$selfRefField = uc($selfRefField) if $case_insensitive;
return 0 unless $otherRefFields{$selfRefField};
delete $otherRefFields{$selfRefField};
}
# Check all other reference fields were accounted for
return 0 unless keys %otherRefFields == 0;
return 0
unless $case_insensitive
? uc($self->reference_table) eq uc($other->reference_table)
: $self->reference_table eq $other->reference_table;
return 0 unless $self->match_type eq $other->match_type;
return 0 unless $self->on_delete eq $other->on_delete;
return 0 unless $self->on_update eq $other->on_update;
return 0
unless $self->_compare_objects(scalar $self->options, scalar $other->options);
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/Role/ 0000755 0000000 0000000 00000000000 14551164244 021676 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Schema/Role/Extra.pm 0000644 0000000 0000000 00000003471 14541265172 023325 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Role::Extra;
=head1 NAME
SQL::Translator::Schema::Role::Extra - "extra" attribute for schema classes
=head1 SYNOPSIS
package Foo;
use Moo;
with qw(SQL::Translator::Schema::Role::Extra);
=head1 DESCRIPTION
This role provides methods to set and get a hashref of extra attributes
for schema objects.
=cut
use Moo::Role;
use Sub::Quote qw(quote_sub);
=head1 METHODS
=head2 extra
Get or set the objects "extra" attributes (e.g., "ZEROFILL" for MySQL fields).
Call with no args to get all the extra data.
Call with a single name arg to get the value of the named extra attribute,
returned as a scalar. Call with a hash or hashref to set extra attributes.
Returns a hash or a hashref.
$field->extra( qualifier => 'ZEROFILL' );
$qualifier = $field->extra('qualifier');
%extra = $field->extra;
$extra = $field->extra;
=cut
has extra => (is => 'rwp', default => quote_sub(q{ +{} }));
around extra => sub {
my ($orig, $self) = (shift, shift);
@_ = %{ $_[0] } if ref $_[0] eq "HASH";
my $extra = $self->$orig;
if (@_ == 1) {
return $extra->{ $_[0] };
} elsif (@_) {
my %args = @_;
while (my ($key, $value) = each %args) {
$extra->{$key} = $value;
}
}
return wantarray ? %$extra : $extra;
};
=head2 remove_extra
L can only be used to get or set "extra" attributes but not to
remove some. Call with no args to remove all extra attributes that
have been set before. Call with a list of key names to remove
certain extra attributes only.
# remove all extra attributes
$field->remove_extra();
# remove timezone and locale attributes only
$field->remove_extra(qw/timezone locale/);
=cut
sub remove_extra {
my ($self, @keys) = @_;
unless (@keys) {
$self->_set_extra({});
} else {
delete @{ $self->extra }{@keys};
}
}
1;
SQL-Translator-1.65/lib/SQL/Translator/Schema/Role/Compare.pm 0000644 0000000 0000000 00000002366 14541265172 023632 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Role::Compare;
=head1 NAME
SQL::Translator::Schema::Role::Compare - compare objects
=head1 SYNOPSIS
package Foo;
use Moo;
with qw(SQL::Translator::Schema::Role::Compare);
$obj->equals($other);
=head1 DESCRIPTION
This L provides a method to compare if two objects are the
same.
=cut
use Moo::Role;
=head1 METHODS
=head2 equals
Determines if this object is the same as another.
my $isIdentical = $object1->equals( $object2 );
=cut
sub equals {
my $self = shift;
my $other = shift;
return 0 unless $other;
return 1 if overload::StrVal($self) eq overload::StrVal($other);
return 0 unless $other->isa(ref($self));
return 1;
}
sub _compare_objects {
# my ($self, $obj1, $obj2) = @_;
my $result = (Data::Dumper->new([ $_[1] ])->Terse(1)->Indent(0)->Deparse(1)
->Sortkeys(1)->Maxdepth(0)->Dump eq Data::Dumper->new([ $_[2] ])->Terse(1)->Indent(0)->Deparse(1)
->Sortkeys(1)->Maxdepth(0)->Dump);
# if ( !$result ) {
# use Carp qw(cluck);
# cluck("How did I get here?");
# use Data::Dumper;
# $Data::Dumper::Maxdepth = 1;
# print "obj1: ", Dumper($obj1), "\n";
# print "obj2: ", Dumper($obj2), "\n";
# }
return $result;
}
1;
SQL-Translator-1.65/lib/SQL/Translator/Schema/Constants.pm 0000644 0000000 0000000 00000002267 14551163724 023320 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Constants;
=head1 NAME
SQL::Translator::Schema::Constants - constants module
=head1 SYNOPSIS
use SQL::Translator::Schema::Constants;
$table->add_constraint(
name => 'foo',
type => PRIMARY_KEY,
);
=head1 DESCRIPTION
This module exports the following constants for Schema features;
=over 4
=item CHECK_C
=item FOREIGN_KEY
=item FULL_TEXT
=item NOT_NULL
=item NORMAL
=item NULL
=item PRIMARY_KEY
=item UNIQUE
=item EXCLUDE
=back
=cut
use strict;
use warnings;
use base qw( Exporter );
require Exporter;
our $VERSION = '1.65';
our @EXPORT = qw[
CHECK_C
FOREIGN_KEY
FULL_TEXT
SPATIAL
NOT_NULL
NORMAL
NULL
PRIMARY_KEY
UNIQUE
EXCLUDE
];
#
# Because "CHECK" is a Perl keyword
#
use constant CHECK_C => 'CHECK';
use constant FOREIGN_KEY => 'FOREIGN KEY';
use constant FULL_TEXT => 'FULLTEXT';
use constant SPATIAL => 'SPATIAL';
use constant NOT_NULL => 'NOT NULL';
use constant NORMAL => 'NORMAL';
use constant NULL => 'NULL';
use constant PRIMARY_KEY => 'PRIMARY KEY';
use constant UNIQUE => 'UNIQUE';
use constant EXCLUDE => 'EXCLUDE';
1;
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/IndexField.pm 0000644 0000000 0000000 00000003321 14546342442 023347 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::IndexField;
=pod
=head1 NAME
SQL::Translator::Schema::IndexField - SQL::Translator index field object
=head1 DESCRIPTION
C is the index field object.
Different databases allow for different options on index fields. Those are supported through here
=head1 METHODS
=cut
use Moo;
extends 'SQL::Translator::Schema::Object';
use overload '""' => sub { shift->name };
=head2 new
Object constructor.
my $schema = SQL::Translator::Schema::IndexField->new;
=head2 name
The name of the index. The object stringifies to this. In addition, you can simply pass
a string to the constructor to only set this attribute.
=head2 extra
All options for the field are stored under the extra hash. The constructor will collect
them for you if passed in straight. In addition, an accessor is provided for all supported options
Currently supported options:
=over 4
=item prefix_length
Supported by MySQL. Indicates that only N characters of the column are indexed.
=back
=cut
around BUILDARGS => sub {
my ($orig, $self, @args) = @_;
if (@args == 1 && !ref $args[0]) {
@args = (name => $args[0]);
}
# there are some weird pathological cases where we get an object passed in rather than a
# hashref. We'll just clone it
if (ref $args[0] eq $self) {
return { %{ $args[0] } };
}
my $args = $self->$orig(@args);
my $extra = delete $args->{extra} || {};
my $name = delete $args->{name};
return {
name => $name,
extra => { %$extra, %$args }
};
};
has name => (
is => 'rw',
required => 1,
);
has extra => (
is => 'rw',
default => sub { {} },
);
=pod
=head1 AUTHOR
Veesh Goldman Eveesh@cpan.orgE.
=cut
9007
SQL-Translator-1.65/lib/SQL/Translator/Schema/Trigger.pm 0000644 0000000 0000000 00000016633 14551163724 022751 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Trigger;
=pod
=head1 NAME
SQL::Translator::Schema::Trigger - SQL::Translator trigger object
=head1 SYNOPSIS
use SQL::Translator::Schema::Trigger;
my $trigger = SQL::Translator::Schema::Trigger->new(
name => 'foo',
perform_action_when => 'before', # or after
database_events => [qw/update insert/], # also update, update_on, delete
fields => [], # if event is "update"
on_table => 'foo', # table name
action => '...', # text of trigger
schema => $schema, # Schema object
scope => 'row', # or statement
);
=head1 DESCRIPTION
C is the trigger object.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Utils qw(parse_list_arg ex2err throw uniq);
use SQL::Translator::Types qw(schema_obj enum);
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
=head2 new
Object constructor.
my $trigger = SQL::Translator::Schema::Trigger->new;
=cut
around BUILDARGS => sub {
my ($orig, $self, @args) = @_;
my $args = $self->$orig(@args);
if (exists $args->{on_table}) {
my $arg = delete $args->{on_table};
my $table = $args->{schema}->get_table($arg)
or die "Table named $arg doesn't exist";
$args->{table} = $table;
}
if (exists $args->{database_event}) {
$args->{database_events} = delete $args->{database_event};
}
return $args;
};
=head2 perform_action_when
Gets or sets whether the event happens "before" or "after" the
C.
$trigger->perform_action_when('after');
=cut
has perform_action_when => (
is => 'rw',
coerce => quote_sub(q{ defined $_[0] ? lc $_[0] : $_[0] }),
isa => enum(
[qw(before after)],
{
msg => "Invalid argument '%s' to perform_action_when",
allow_undef => 1,
}
),
);
around perform_action_when => \&ex2err;
sub database_event {
=pod
=head2 database_event
Obsolete please use database_events!
=cut
my $self = shift;
return $self->database_events(@_);
}
=head2 database_events
Gets or sets the events that triggers the trigger.
my $ok = $trigger->database_events('insert');
=cut
has database_events => (
is => 'rw',
coerce => quote_sub(q{ [ map { lc } ref $_[0] eq 'ARRAY' ? @{$_[0]} : ($_[0]) ] }),
isa => sub {
my @args = @{ $_[0] };
my %valid = map { $_, 1 } qw[ insert update update_on delete ];
my @invalid = grep { !defined $valid{$_} } @args;
if (@invalid) {
throw(sprintf("Invalid events '%s' in database_events", join(', ', @invalid)));
}
},
);
around database_events => sub {
my ($orig, $self) = (shift, shift);
if (@_) {
ex2err($orig, $self, ref $_[0] eq 'ARRAY' ? $_[0] : \@_)
or return;
}
return wantarray
? @{ $self->$orig || [] }
: $self->$orig;
};
=head2 fields
Gets and set which fields to monitor for C.
$view->fields('id');
$view->fields('id', 'name');
$view->fields( 'id, name' );
$view->fields( [ 'id', 'name' ] );
$view->fields( qw[ id name ] );
my @fields = $view->fields;
=cut
has fields => (
is => 'rw',
coerce => sub {
my @fields = uniq @{ parse_list_arg($_[0]) };
@fields ? \@fields : undef;
},
);
around fields => sub {
my $orig = shift;
my $self = shift;
my $fields = parse_list_arg(@_);
$self->$orig($fields) if @$fields;
return wantarray ? @{ $self->$orig || [] } : $self->$orig;
};
=head2 table
Gets or set the table on which the trigger works, as a L object.
$trigger->table($triggered_table);
=cut
has table => (is => 'rw', isa => schema_obj('Table'), weak_ref => 1);
around table => \&ex2err;
sub on_table {
=pod
=head2 on_table
Gets or set the table name on which the trigger works, as a string.
$trigger->on_table('foo');
=cut
my ($self, $arg) = @_;
if (@_ == 2) {
my $table = $self->schema->get_table($arg);
die "Table named $arg doesn't exist"
if !$table;
$self->table($table);
}
return $self->table->name;
}
=head2 action
Gets or set the action of the trigger.
$trigger->action(
q[
BEGIN
select ...;
update ...;
END
]
);
=cut
has action => (is => 'rw', default => quote_sub(q{ '' }));
sub is_valid {
=pod
=head2 is_valid
Determine whether the trigger is valid or not.
my $ok = $trigger->is_valid;
=cut
my $self = shift;
for my $attr (qw[ name perform_action_when database_events on_table action ]) {
return $self->error("Invalid: missing '$attr'") unless $self->$attr();
}
return $self->error("Missing fields for UPDATE ON")
if $self->database_event eq 'update_on' && !$self->fields;
return 1;
}
=head2 name
Get or set the trigger's name.
my $name = $trigger->name('foo');
=cut
has name => (is => 'rw', default => quote_sub(q{ '' }));
=head2 order
Get or set the trigger's order.
my $order = $trigger->order(3);
=cut
has order => (is => 'rw', default => quote_sub(q{ 0 }));
around order => sub {
my ($orig, $self, $arg) = @_;
if (defined $arg && $arg =~ /^\d+$/) {
return $self->$orig($arg);
}
return $self->$orig;
};
=head2 scope
Get or set the trigger's scope (row or statement).
my $scope = $trigger->scope('statement');
=cut
has scope => (
is => 'rw',
isa => enum(
[qw(row statement)],
{
msg => "Invalid scope '%s'",
icase => 1,
allow_undef => 1,
}
),
);
around scope => \&ex2err;
=head2 schema
Get or set the trigger's schema object.
$trigger->schema( $schema );
my $schema = $trigger->schema;
=cut
has schema => (is => 'rw', isa => schema_obj('Schema'), weak_ref => 1);
around schema => \&ex2err;
sub compare_arrays {
=pod
=head2 compare_arrays
Compare two arrays.
=cut
my ($first, $second) = @_;
no warnings; # silence spurious -w undef complaints
return 0 unless (ref $first eq 'ARRAY' and ref $second eq 'ARRAY');
return 0 unless @$first == @$second;
my @first = sort @$first;
my @second = sort @$second;
for (my $i = 0; $i < scalar @first; $i++) {
return 0 if @first[$i] ne @second[$i];
}
return 1;
}
=head2 equals
Determines if this trigger is the same as another
my $is_identical = $trigger1->equals( $trigger2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
return 0 unless $self->$orig($other);
my %names;
for my $name ($self->name, $other->name) {
$name = lc $name if $case_insensitive;
$names{$name}++;
}
if (keys %names > 1) {
return $self->error('Names not equal');
}
if (!$self->perform_action_when eq $other->perform_action_when) {
return $self->error('perform_action_when differs');
}
if (!compare_arrays([ $self->database_events ], [ $other->database_events ])) {
return $self->error('database_events differ');
}
if ($self->on_table ne $other->on_table) {
return $self->error('on_table differs');
}
if ($self->action ne $other->action) {
return $self->error('action differs');
}
if (!$self->_compare_objects(scalar $self->extra, scalar $other->extra)) {
return $self->error('extras differ');
}
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHORS
Anonymous,
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/Index.pm 0000644 0000000 0000000 00000012173 14551163724 022410 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Index;
=pod
=head1 NAME
SQL::Translator::Schema::Index - SQL::Translator index object
=head1 SYNOPSIS
use SQL::Translator::Schema::Index;
my $index = SQL::Translator::Schema::Index->new(
name => 'foo',
fields => [ id ],
type => 'unique',
);
=head1 DESCRIPTION
C is the index object.
Primary and unique keys are table constraints, not indices.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Schema::Constants;
use SQL::Translator::Schema::IndexField;
use SQL::Translator::Utils qw(ex2err throw parse_list_arg);
use SQL::Translator::Role::ListAttr;
use SQL::Translator::Types qw(schema_obj enum);
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
my %VALID_INDEX_TYPE = (
UNIQUE => 1,
NORMAL => 1,
FULLTEXT => 1, # MySQL only (?)
FULL_TEXT => 1, # MySQL only (?)
SPATIAL => 1, # MySQL only (?)
);
=head2 new
Object constructor.
my $schema = SQL::Translator::Schema::Index->new;
=head2 fields
Gets and set the fields the index is on. Accepts a string, list or
arrayref; returns an array or array reference. Will unique the field
names and keep them in order by the first occurrence of a field name.
$index->fields('id');
$index->fields('id', 'name');
$index->fields( 'id, name' );
$index->fields( [ 'id', 'name' ] );
$index->fields( qw[ id name ] );
$index->fields(id => { name => 'name', order_by => 'ASC NULLS LAST' });
my @fields = $index->fields;
=cut
with ListAttr fields => (
coerce => sub {
my %seen;
return [
grep !$seen{ $_->name }++,
map SQL::Translator::Schema::IndexField->new($_),
@{ parse_list_arg($_[0]) }
];
}
);
sub is_valid {
=pod
=head2 is_valid
Determine whether the index is valid or not.
my $ok = $index->is_valid;
=cut
my $self = shift;
my $table = $self->table or return $self->error('No table');
my @fields = $self->fields or return $self->error('No fields');
for my $field (@fields) {
return $self->error("Field '$field' does not exist in table '", $table->name, "'")
unless $table->get_field($field);
}
return 1;
}
=head2 name
Get or set the index's name.
my $name = $index->name('foo');
=cut
has name => (
is => 'rw',
coerce => quote_sub(q{ defined $_[0] ? $_[0] : '' }),
default => quote_sub(q{ '' }),
);
=head2 options
Get or set the index's options (e.g., "using" or "where" for PG). Returns
an array or array reference.
my @options = $index->options;
=cut
with ListAttr options => ();
=head2 table
Get or set the index's table object.
my $table = $index->table;
=cut
has table => (is => 'rw', isa => schema_obj('Table'), weak_ref => 1);
around table => \&ex2err;
=head2 type
Get or set the index's type.
my $type = $index->type('unique');
Get or set the index's type.
Currently there are only four acceptable types: UNIQUE, NORMAL, FULL_TEXT,
and SPATIAL. The latter two might be MySQL-specific. While both lowercase
and uppercase types are acceptable input, this method returns the type in
uppercase.
=cut
has type => (
is => 'rw',
coerce => quote_sub(q{ uc $_[0] }),
default => quote_sub(q{ 'NORMAL' }),
isa => enum(
[ keys %VALID_INDEX_TYPE ],
{
msg => "Invalid index type: %s",
allow_false => 1,
}
),
);
around type => \&ex2err;
=head2 equals
Determines if this index is the same as another
my $isIdentical = $index1->equals( $index2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
my $ignore_index_names = shift;
return 0 unless $self->$orig($other);
unless ($ignore_index_names) {
unless ((!$self->name && ($other->name eq $other->fields->[0]->name))
|| (!$other->name && ($self->name eq $self->fields->[0]->name))) {
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
}
}
#return 0 unless $self->is_valid eq $other->is_valid;
return 0 unless $self->type eq $other->type;
# Check fields, regardless of order
my $get_name = sub { return $case_insensitive ? uc(shift->name) : shift->name; };
my @otherFields = sort { $a->{key} cmp $b->{key} }
map +{ item => $_, key => $get_name->($_) }, $other->fields;
my @selfFields = sort { $a->{key} cmp $b->{key} }
map +{ item => $_, key => $get_name->($_) }, $self->fields;
return 0 unless @otherFields == @selfFields;
for my $idx (0 .. $#selfFields) {
return 0 unless $selfFields[$idx]{key} eq $otherFields[$idx]{key};
return 0
unless $self->_compare_objects(scalar $selfFields[$idx]{item}->extra, scalar $otherFields[$idx]{item}->extra);
}
return 0
unless $self->_compare_objects(scalar $self->options, scalar $other->options);
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/View.pm 0000644 0000000 0000000 00000010373 14551163724 022253 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::View;
=pod
=head1 NAME
SQL::Translator::Schema::View - SQL::Translator view object
=head1 SYNOPSIS
use SQL::Translator::Schema::View;
my $view = SQL::Translator::Schema::View->new(
name => 'foo', # name, required
sql => 'select id, name from foo', # SQL for view
fields => 'id, name', # field names in view
);
=head1 DESCRIPTION
C is the view object.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Utils qw(ex2err);
use SQL::Translator::Types qw(schema_obj);
use SQL::Translator::Role::ListAttr;
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
=head2 new
Object constructor.
my $view = SQL::Translator::Schema::View->new;
=head2 fields
Gets and set the fields the constraint is on. Accepts a string, list or
arrayref; returns an array or array reference. Will unique the field
names and keep them in order by the first occurrence of a field name.
$view->fields('id');
$view->fields('id', 'name');
$view->fields( 'id, name' );
$view->fields( [ 'id', 'name' ] );
$view->fields( qw[ id name ] );
my @fields = $view->fields;
=cut
with ListAttr fields => (uniq => 1);
=head2 tables
Gets and set the tables the SELECT mentions. Accepts a string, list or
arrayref; returns an array or array reference. Will unique the table
names and keep them in order by the first occurrence of a field name.
$view->tables('foo');
$view->tables('foo', 'bar');
$view->tables( 'foo, bar' );
$view->tables( [ 'foo', 'bar' ] );
$view->tables( qw[ foo bar ] );
my @tables = $view->tables;
=cut
with ListAttr tables => (uniq => 1);
=head2 options
Gets or appends a list of options on the view.
$view->options('ALGORITHM=UNDEFINED');
my @options = $view->options;
=cut
with ListAttr options => (uniq => 1, append => 1);
sub is_valid {
=pod
=head2 is_valid
Determine whether the view is valid or not.
my $ok = $view->is_valid;
=cut
my $self = shift;
return $self->error('No name') unless $self->name;
return $self->error('No sql') unless $self->sql;
return 1;
}
=head2 name
Get or set the view's name.
my $name = $view->name('foo');
=cut
has name => (is => 'rw', default => quote_sub(q{ '' }));
=head2 order
Get or set the view's order.
my $order = $view->order(3);
=cut
has order => (is => 'rw', default => quote_sub(q{ 0 }));
around order => sub {
my ($orig, $self, $arg) = @_;
if (defined $arg && $arg =~ /^\d+$/) {
return $self->$orig($arg);
}
return $self->$orig;
};
=head2 sql
Get or set the view's SQL.
my $sql = $view->sql('select * from foo');
=cut
has sql => (is => 'rw', default => quote_sub(q{ '' }));
=head2 schema
Get or set the view's schema object.
$view->schema( $schema );
my $schema = $view->schema;
=cut
has schema => (is => 'rw', isa => schema_obj('Schema'), weak_ref => 1);
around schema => \&ex2err;
=head2 equals
Determines if this view is the same as another
my $isIdentical = $view1->equals( $view2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
my $ignore_sql = shift;
return 0 unless $self->$orig($other);
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
#return 0 unless $self->is_valid eq $other->is_valid;
unless ($ignore_sql) {
my $selfSql = $self->sql;
my $otherSql = $other->sql;
# Remove comments
$selfSql =~ s/--.*$//mg;
$otherSql =~ s/--.*$//mg;
# Collapse whitespace to space to avoid whitespace comparison issues
$selfSql =~ s/\s+/ /sg;
$otherSql =~ s/\s+/ /sg;
return 0 unless $selfSql eq $otherSql;
}
my $selfFields = join(":", $self->fields);
my $otherFields = join(":", $other->fields);
return 0
unless $case_insensitive
? uc($selfFields) eq uc($otherFields)
: $selfFields eq $otherFields;
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/Field.pm 0000644 0000000 0000000 00000030700 14551163724 022360 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Field;
=pod
=head1 NAME
SQL::Translator::Schema::Field - SQL::Translator field object
=head1 SYNOPSIS
use SQL::Translator::Schema::Field;
my $field = SQL::Translator::Schema::Field->new(
name => 'foo',
table => $table,
);
=head1 DESCRIPTION
C is the field object.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Schema::Constants;
use SQL::Translator::Types qw(schema_obj);
use SQL::Translator::Utils qw(parse_list_arg ex2err throw carp_ro);
use Sub::Quote qw(quote_sub);
use Scalar::Util ();
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
# Stringify to our name, being careful not to pass any args through so we don't
# accidentally set it to undef. We also have to tweak bool so the object is
# still true when it doesn't have a name (which shouldn't happen!).
use overload
'""' => sub { shift->name },
'bool' => sub { $_[0]->name || $_[0] },
fallback => 1,
;
use DBI qw(:sql_types);
# Mapping from string to sql constant
our %type_mapping = (
integer => SQL_INTEGER,
int => SQL_INTEGER,
tinyint => SQL_TINYINT,
smallint => SQL_SMALLINT,
bigint => SQL_BIGINT,
double => SQL_DOUBLE,
'double precision' => SQL_DOUBLE,
decimal => SQL_DECIMAL,
dec => SQL_DECIMAL,
numeric => SQL_NUMERIC,
real => SQL_REAL,
float => SQL_FLOAT,
bit => SQL_BIT,
date => SQL_DATE,
datetime => SQL_DATETIME,
timestamp => SQL_TIMESTAMP,
time => SQL_TIME,
char => SQL_CHAR,
varchar => SQL_VARCHAR,
binary => SQL_BINARY,
varbinary => SQL_VARBINARY,
tinyblob => SQL_BLOB,
blob => SQL_BLOB,
text => SQL_LONGVARCHAR
);
has _numeric_sql_data_types => (is => 'lazy');
sub _build__numeric_sql_data_types {
return {
map { $_ => 1 } (
SQL_INTEGER, SQL_TINYINT, SQL_SMALLINT, SQL_BIGINT, SQL_DOUBLE, SQL_NUMERIC, SQL_DECIMAL, SQL_FLOAT, SQL_REAL
)
};
}
=head2 new
Object constructor.
my $field = SQL::Translator::Schema::Field->new(
name => 'foo',
table => $table,
);
=head2 comments
Get or set the comments on a field. May be called several times to
set and it will accumulate the comments. Called in an array context,
returns each comment individually; called in a scalar context, returns
all the comments joined on newlines.
$field->comments('foo');
$field->comments('bar');
print join( ', ', $field->comments ); # prints "foo, bar"
=cut
has comments => (
is => 'rw',
coerce => quote_sub(q{ ref($_[0]) eq 'ARRAY' ? $_[0] : [$_[0]] }),
default => quote_sub(q{ [] }),
);
around comments => sub {
my $orig = shift;
my $self = shift;
for my $arg (@_) {
$arg = $arg->[0] if ref $arg;
push @{ $self->$orig }, $arg if $arg;
}
return wantarray
? @{ $self->$orig }
: join("\n", @{ $self->$orig });
};
=head2 data_type
Get or set the field's data type.
my $data_type = $field->data_type('integer');
=cut
has data_type => (is => 'rw', default => quote_sub(q{ '' }));
=head2 sql_data_type
Constant from DBI package representing this data type. See L
for more details.
=cut
has sql_data_type => (is => 'rw', lazy => 1, builder => 1);
sub _build_sql_data_type {
$type_mapping{ lc $_[0]->data_type } || SQL_UNKNOWN_TYPE;
}
=head2 default_value
Get or set the field's default value. Will return undef if not defined
and could return the empty string (it's a valid default value), so don't
assume an error like other methods.
my $default = $field->default_value('foo');
=cut
has default_value => (is => 'rw');
=head2 foreign_key_reference
Get or set the field's foreign key reference;
my $constraint = $field->foreign_key_reference( $constraint );
=cut
has foreign_key_reference => (
is => 'rw',
predicate => '_has_foreign_key_reference',
isa => schema_obj('Constraint'),
weak_ref => 1,
);
around foreign_key_reference => sub {
my $orig = shift;
my $self = shift;
if (my $arg = shift) {
return $self->error('Foreign key reference for ', $self->name, 'already defined')
if $self->_has_foreign_key_reference;
return ex2err($orig, $self, $arg);
}
$self->$orig;
};
=head2 is_auto_increment
Get or set the field's C attribute.
my $is_auto = $field->is_auto_increment(1);
=cut
has is_auto_increment => (
is => 'rw',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
builder => 1,
lazy => 1,
);
sub _build_is_auto_increment {
my ($self) = @_;
if (my $table = $self->table) {
if (my $schema = $table->schema) {
if ( $schema->database eq 'PostgreSQL'
&& $self->data_type eq 'serial') {
return 1;
}
}
}
return 0;
}
=head2 is_foreign_key
Returns whether or not the field is a foreign key.
my $is_fk = $field->is_foreign_key;
=cut
has is_foreign_key => (
is => 'rw',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
builder => 1,
lazy => 1,
);
sub _build_is_foreign_key {
my ($self) = @_;
if (my $table = $self->table) {
for my $c ($table->get_constraints) {
if ($c->type eq FOREIGN_KEY) {
my %fields = map { $_, 1 } $c->fields;
if ($fields{ $self->name }) {
$self->foreign_key_reference($c);
return 1;
}
}
}
}
return 0;
}
=head2 is_nullable
Get or set whether the field can be null. If not defined, then
returns "1" (assumes the field can be null). The argument is evaluated
by Perl for True or False, so the following are equivalent:
$is_nullable = $field->is_nullable(0);
$is_nullable = $field->is_nullable('');
$is_nullable = $field->is_nullable('0');
While this is technically a field constraint, it's probably easier to
represent this as an attribute of the field. In order keep things
consistent, any other constraint on the field (unique, primary, and
foreign keys; checks) are represented as table constraints.
=cut
has is_nullable => (
is => 'rw',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
default => quote_sub(q{ 1 }),
);
around is_nullable => sub {
my ($orig, $self, $arg) = @_;
$self->$orig($self->is_primary_key ? 0 : defined $arg ? $arg : ());
};
=head2 is_primary_key
Get or set the field's C attribute. Does not create
a table constraint (should it?).
my $is_pk = $field->is_primary_key(1);
=cut
has is_primary_key => (
is => 'rw',
coerce => quote_sub(q{ $_[0] ? 1 : 0 }),
lazy => 1,
builder => 1,
);
sub _build_is_primary_key {
my ($self) = @_;
if (my $table = $self->table) {
if (my $pk = $table->primary_key) {
my %fields = map { $_, 1 } $pk->fields;
return $fields{ $self->name } || 0;
}
}
return 0;
}
=head2 is_unique
Determine whether the field has a UNIQUE constraint or not.
my $is_unique = $field->is_unique;
=cut
has is_unique => (is => 'lazy', init_arg => undef);
around is_unique => carp_ro('is_unique');
sub _build_is_unique {
my ($self) = @_;
if (my $table = $self->table) {
for my $c ($table->get_constraints) {
if ($c->type eq UNIQUE) {
my %fields = map { $_, 1 } $c->fields;
if ($fields{ $self->name }) {
return 1;
}
}
}
}
return 0;
}
sub is_valid {
=pod
=head2 is_valid
Determine whether the field is valid or not.
my $ok = $field->is_valid;
=cut
my $self = shift;
return $self->error('No name') unless $self->name;
return $self->error('No data type') unless $self->data_type;
return $self->error('No table object') unless $self->table;
return 1;
}
=head2 name
Get or set the field's name.
my $name = $field->name('foo');
The field object will also stringify to its name.
my $setter_name = "set_$field";
Errors ("No field name") if you try to set a blank name.
=cut
has name => (is => 'rw', isa => sub { throw("No field name") unless $_[0] });
around name => sub {
my $orig = shift;
my $self = shift;
if (my ($arg) = @_) {
if (my $schema = $self->table) {
return $self->error(qq[Can't use field name "$arg": field exists])
if $schema->get_field($arg);
}
}
return ex2err($orig, $self, @_);
};
sub full_name {
=head2 full_name
Read only method to return the fields name with its table name pre-pended.
e.g. "person.foo".
=cut
my $self = shift;
return $self->table . "." . $self->name;
}
=head2 order
Get or set the field's order.
my $order = $field->order(3);
=cut
has order => (is => 'rw', default => quote_sub(q{ 0 }));
around order => sub {
my ($orig, $self, $arg) = @_;
if (defined $arg && $arg =~ /^\d+$/) {
return $self->$orig($arg);
}
return $self->$orig;
};
sub schema {
=head2 schema
Shortcut to get the fields schema ($field->table->schema) or undef if it
doesn't have one.
my $schema = $field->schema;
=cut
my $self = shift;
if (my $table = $self->table) { return $table->schema || undef; }
return undef;
}
=head2 size
Get or set the field's size. Accepts a string, array or arrayref of
numbers and returns a string.
$field->size( 30 );
$field->size( [ 255 ] );
$size = $field->size( 10, 2 );
print $size; # prints "10,2"
$size = $field->size( '10, 2' );
print $size; # prints "10,2"
=cut
has size => (
is => 'rw',
default => quote_sub(q{ [0] }),
coerce => sub {
my @sizes = grep { defined && m/^\d+(?:\.\d+)?$/ } @{ parse_list_arg($_[0]) };
@sizes ? \@sizes : [0];
},
);
around size => sub {
my $orig = shift;
my $self = shift;
my $numbers = parse_list_arg(@_);
if (@$numbers) {
my @new;
for my $num (@$numbers) {
if (defined $num && $num =~ m/^\d+(?:\.\d+)?$/) {
push @new, $num;
}
}
$self->$orig(\@new) if @new; # only set if all OK
}
return wantarray
? @{ $self->$orig || [0] }
: join(',', @{ $self->$orig || [0] });
};
=head2 table
Get or set the field's table object. As the table object stringifies this can
also be used to get the table name.
my $table = $field->table;
print "Table name: $table";
=cut
has table => (is => 'rw', isa => schema_obj('Table'), weak_ref => 1);
around table => \&ex2err;
=head2 parsed_field
Returns the field exactly as the parser found it
=cut
has parsed_field => (is => 'rw');
around parsed_field => sub {
my $orig = shift;
my $self = shift;
return $self->$orig(@_) || $self;
};
=head2 equals
Determines if this field is the same as another
my $isIdentical = $field1->equals( $field2 );
=cut
around equals => sub {
my $orig = shift;
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
return 0 unless $self->$orig($other);
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
# Comparing types: use sql_data_type if both are not 0. Else use string data_type
if ($self->sql_data_type && $other->sql_data_type) {
return 0 unless $self->sql_data_type == $other->sql_data_type;
} else {
return 0 unless lc($self->data_type) eq lc($other->data_type);
}
return 0 unless $self->size eq $other->size;
{
my $lhs = $self->default_value;
$lhs = \'NULL' unless defined $lhs;
my $lhs_is_ref = !!ref $lhs;
my $rhs = $other->default_value;
$rhs = \'NULL' unless defined $rhs;
my $rhs_is_ref = !!ref $rhs;
# If only one is a ref, fail. -- rjbs, 2008-12-02
return 0 if $lhs_is_ref xor $rhs_is_ref;
my $effective_lhs = $lhs_is_ref ? $$lhs : $lhs;
my $effective_rhs = $rhs_is_ref ? $$rhs : $rhs;
if ( $self->_is_numeric_data_type
&& Scalar::Util::looks_like_number($effective_lhs)
&& Scalar::Util::looks_like_number($effective_rhs)) {
return 0 if ($effective_lhs + 0) != ($effective_rhs + 0);
} else {
return 0 if $effective_lhs ne $effective_rhs;
}
}
return 0 unless $self->is_nullable eq $other->is_nullable;
# return 0 unless $self->is_unique eq $other->is_unique;
return 0 unless $self->is_primary_key eq $other->is_primary_key;
# return 0 unless $self->is_foreign_key eq $other->is_foreign_key;
return 0 unless $self->is_auto_increment eq $other->is_auto_increment;
# return 0 unless $self->comments eq $other->comments;
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
return 1;
};
# Must come after all 'has' declarations
around new => \&ex2err;
sub _is_numeric_data_type {
my $self = shift;
return $self->_numeric_sql_data_types->{ $self->sql_data_type };
}
1;
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Schema/Object.pm 0000644 0000000 0000000 00000002026 14551163724 022543 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Object;
=head1 NAME
SQL::Translator::Schema::Object - Base class for SQL::Translator schema objects
=head1 SYNOPSIS
package SQL::Translator::Schema::Foo;
use Moo;
extends 'SQL::Translator::Schema::Object';
=head1 DESCRIPTION
Base class for Schema objects. A Moo class consuming the following
roles.
=over
=item L
Provides C<< $obj->error >>, similar to L.
=item L
Removes undefined constructor arguments, for backwards compatibility.
=item L
Provides an C attribute storing a hashref of arbitrary data.
=item L
Provides an C<< $obj->equals($other) >> method for testing object
equality.
=back
=cut
use Moo 1.000003;
# screw you PAUSE
our $VERSION = '1.65';
with qw(
SQL::Translator::Role::Error
SQL::Translator::Role::BuildArgs
SQL::Translator::Schema::Role::Extra
SQL::Translator::Schema::Role::Compare
);
1;
SQL-Translator-1.65/lib/SQL/Translator/Schema/Table.pm 0000644 0000000 0000000 00000060110 14551163724 022362 0 ustar 00root root 0000000 0000000 package SQL::Translator::Schema::Table;
=pod
=head1 NAME
SQL::Translator::Schema::Table - SQL::Translator table object
=head1 SYNOPSIS
use SQL::Translator::Schema::Table;
my $table = SQL::Translator::Schema::Table->new( name => 'foo' );
=head1 DESCRIPTION
C is the table object.
=head1 METHODS
=cut
use Moo;
use SQL::Translator::Utils qw(parse_list_arg ex2err throw carp_ro);
use SQL::Translator::Types qw(schema_obj);
use SQL::Translator::Role::ListAttr;
use SQL::Translator::Schema::Constants;
use SQL::Translator::Schema::Constraint;
use SQL::Translator::Schema::Field;
use SQL::Translator::Schema::Index;
use Carp::Clan '^SQL::Translator';
use List::Util 'max';
use Sub::Quote qw(quote_sub);
extends 'SQL::Translator::Schema::Object';
our $VERSION = '1.65';
# Stringify to our name, being careful not to pass any args through so we don't
# accidentally set it to undef. We also have to tweak bool so the object is
# still true when it doesn't have a name (which shouldn't happen!).
use overload
'""' => sub { shift->name },
'bool' => sub { $_[0]->name || $_[0] },
fallback => 1,
;
=pod
=head2 new
Object constructor.
my $table = SQL::Translator::Schema::Table->new(
schema => $schema,
name => 'foo',
);
=head2 add_constraint
Add a constraint to the table. Returns the newly created
C object.
my $c1 = $table->add_constraint(
name => 'pk',
type => PRIMARY_KEY,
fields => [ 'foo_id' ],
);
my $c2 = SQL::Translator::Schema::Constraint->new( name => 'uniq' );
$c2 = $table->add_constraint( $constraint );
=cut
has _constraints => (
is => 'ro',
init_arg => undef,
default => quote_sub(q{ +[] }),
predicate => 1,
lazy => 1,
);
sub add_constraint {
my $self = shift;
my $constraint_class = 'SQL::Translator::Schema::Constraint';
my $constraint;
if (UNIVERSAL::isa($_[0], $constraint_class)) {
$constraint = shift;
$constraint->table($self);
} else {
my %args = @_;
$args{'table'} = $self;
$constraint = $constraint_class->new(\%args)
or return $self->error($constraint_class->error);
}
#
# If we're trying to add a PK when one is already defined,
# then just add the fields to the existing definition.
#
my $ok = 1;
my $pk = $self->primary_key;
if ($pk && $constraint->type eq PRIMARY_KEY) {
$self->primary_key($constraint->fields);
$pk->name($constraint->name) if $constraint->name;
my %extra = $constraint->extra;
$pk->extra(%extra) if keys %extra;
$constraint = $pk;
$ok = 0;
} elsif ($constraint->type eq PRIMARY_KEY) {
for my $fname ($constraint->fields) {
if (my $f = $self->get_field($fname)) {
$f->is_primary_key(1);
}
}
}
#
# See if another constraint of the same type
# covers the same fields. -- This doesn't work! ky
#
# elsif ( $constraint->type ne CHECK_C ) {
# my @field_names = $constraint->fields;
# for my $c (
# grep { $_->type eq $constraint->type }
# $self->get_constraints
# ) {
# my %fields = map { $_, 1 } $c->fields;
# for my $field_name ( @field_names ) {
# if ( $fields{ $field_name } ) {
# $constraint = $c;
# $ok = 0;
# last;
# }
# }
# last unless $ok;
# }
# }
if ($ok) {
push @{ $self->_constraints }, $constraint;
}
return $constraint;
}
=head2 drop_constraint
Remove a constraint from the table. Returns the constraint object if the index
was found and removed, an error otherwise. The single parameter can be either
an index name or an C object.
$table->drop_constraint('myconstraint');
=cut
sub drop_constraint {
my $self = shift;
my $constraint_class = 'SQL::Translator::Schema::Constraint';
my $constraint_name;
if (UNIVERSAL::isa($_[0], $constraint_class)) {
$constraint_name = shift->name;
} else {
$constraint_name = shift;
}
if (!($self->_has_constraints && grep { $_->name eq $constraint_name } @{ $self->_constraints })) {
return $self->error(qq[Can't drop constraint: "$constraint_name" doesn't exist]);
}
my @cs = @{ $self->_constraints };
my ($constraint_id)
= grep { $cs[$_]->name eq $constraint_name } (0 .. $#cs);
my $constraint = splice(@{ $self->_constraints }, $constraint_id, 1);
return $constraint;
}
=head2 add_index
Add an index to the table. Returns the newly created
C object.
my $i1 = $table->add_index(
name => 'name',
fields => [ 'name' ],
type => 'normal',
);
my $i2 = SQL::Translator::Schema::Index->new( name => 'id' );
$i2 = $table->add_index( $index );
=cut
has _indices => (
is => 'ro',
init_arg => undef,
default => quote_sub(q{ [] }),
predicate => 1,
lazy => 1,
);
sub add_index {
my $self = shift;
my $index_class = 'SQL::Translator::Schema::Index';
my $index;
if (UNIVERSAL::isa($_[0], $index_class)) {
$index = shift;
$index->table($self);
} else {
my %args = @_;
$args{'table'} = $self;
$index = $index_class->new(\%args)
or return $self->error($index_class->error);
}
foreach my $ex_index ($self->get_indices) {
return if ($ex_index->equals($index));
}
push @{ $self->_indices }, $index;
return $index;
}
=head2 drop_index
Remove an index from the table. Returns the index object if the index was
found and removed, an error otherwise. The single parameter can be either
an index name of an C object.
$table->drop_index('myindex');
=cut
sub drop_index {
my $self = shift;
my $index_class = 'SQL::Translator::Schema::Index';
my $index_name;
if (UNIVERSAL::isa($_[0], $index_class)) {
$index_name = shift->name;
} else {
$index_name = shift;
}
if (!($self->_has_indices && grep { $_->name eq $index_name } @{ $self->_indices })) {
return $self->error(qq[Can't drop index: "$index_name" doesn't exist]);
}
my @is = @{ $self->_indices };
my ($index_id) = grep { $is[$_]->name eq $index_name } (0 .. $#is);
my $index = splice(@{ $self->_indices }, $index_id, 1);
return $index;
}
=head2 add_field
Add an field to the table. Returns the newly created
C object. The "name" parameter is
required. If you try to create a field with the same name as an
existing field, you will get an error and the field will not be created.
my $f1 = $table->add_field(
name => 'foo_id',
data_type => 'integer',
size => 11,
);
my $f2 = SQL::Translator::Schema::Field->new(
name => 'name',
table => $table,
);
$f2 = $table->add_field( $field2 ) or die $table->error;
=cut
has _fields => (
is => 'ro',
init_arg => undef,
default => quote_sub(q{ +{} }),
predicate => 1,
lazy => 1
);
sub add_field {
my $self = shift;
my $field_class = 'SQL::Translator::Schema::Field';
my $field;
if (UNIVERSAL::isa($_[0], $field_class)) {
$field = shift;
$field->table($self);
} else {
my %args = @_;
$args{'table'} = $self;
$field = $field_class->new(\%args)
or return $self->error($field_class->error);
}
my $existing_order = { map { $_->order => $_->name } $self->get_fields };
# supplied order, possible unordered assembly
if ($field->order) {
if ($existing_order->{ $field->order }) {
croak sprintf
"Requested order '%d' for column '%s' conflicts with already existing column '%s'",
$field->order,
$field->name,
$existing_order->{ $field->order },
;
}
} else {
my $last_field_no = max(keys %$existing_order) || 0;
if ($last_field_no != scalar keys %$existing_order) {
croak sprintf
"Table '%s' field order incomplete - unable to auto-determine order for newly added field",
$self->name,
;
}
$field->order($last_field_no + 1);
}
# We know we have a name as the Field->new above errors if none given.
my $field_name = $field->name;
if ($self->get_field($field_name)) {
return $self->error(qq[Can't use field name "$field_name": field exists]);
} else {
$self->_fields->{$field_name} = $field;
}
return $field;
}
=head2 drop_field
Remove a field from the table. Returns the field object if the field was
found and removed, an error otherwise. The single parameter can be either
a field name or an C object.
$table->drop_field('myfield');
=cut
sub drop_field {
my $self = shift;
my $field_class = 'SQL::Translator::Schema::Field';
my $field_name;
if (UNIVERSAL::isa($_[0], $field_class)) {
$field_name = shift->name;
} else {
$field_name = shift;
}
my %args = @_;
my $cascade = $args{'cascade'};
if (!($self->_has_fields && exists $self->_fields->{$field_name})) {
return $self->error(qq[Can't drop field: "$field_name" doesn't exists]);
}
my $field = delete $self->_fields->{$field_name};
if ($cascade) {
# Remove this field from all indices using it
foreach my $i ($self->get_indices()) {
my @fs = $i->fields();
@fs = grep { $_ ne $field->name } @fs;
$i->fields(@fs);
}
# Remove this field from all constraints using it
foreach my $c ($self->get_constraints()) {
my @cs = $c->fields();
@cs = grep { $_ ne $field->name } @cs;
$c->fields(@cs);
}
}
return $field;
}
=head2 comments
Get or set the comments on a table. May be called several times to
set and it will accumulate the comments. Called in an array context,
returns each comment individually; called in a scalar context, returns
all the comments joined on newlines.
$table->comments('foo');
$table->comments('bar');
print join( ', ', $table->comments ); # prints "foo, bar"
=cut
has comments => (
is => 'rw',
coerce => quote_sub(q{ ref($_[0]) eq 'ARRAY' ? $_[0] : [$_[0]] }),
default => quote_sub(q{ [] }),
);
around comments => sub {
my $orig = shift;
my $self = shift;
my @comments = ref $_[0] ? @{ $_[0] } : @_;
for my $arg (@comments) {
$arg = $arg->[0] if ref $arg;
push @{ $self->$orig }, $arg if defined $arg && $arg;
}
@comments = @{ $self->$orig };
return
wantarray ? @comments
: @comments ? join("\n", @comments)
: undef;
};
=head2 get_constraints
Returns all the constraint objects as an array or array reference.
my @constraints = $table->get_constraints;
=cut
sub get_constraints {
my $self = shift;
if ($self->_has_constraints) {
return wantarray ? @{ $self->_constraints } : $self->_constraints;
} else {
$self->error('No constraints');
return;
}
}
=head2 get_indices
Returns all the index objects as an array or array reference.
my @indices = $table->get_indices;
=cut
sub get_indices {
my $self = shift;
if ($self->_has_indices) {
return wantarray
? @{ $self->_indices }
: $self->_indices;
} else {
$self->error('No indices');
return;
}
}
=head2 get_field
Returns a field by the name provided.
my $field = $table->get_field('foo');
=cut
sub get_field {
my $self = shift;
my $field_name = shift or return $self->error('No field name');
my $case_insensitive = shift;
return $self->error(qq[Field "$field_name" does not exist])
unless $self->_has_fields;
if ($case_insensitive) {
$field_name = uc($field_name);
foreach my $field (keys %{ $self->_fields }) {
return $self->_fields->{$field} if $field_name eq uc($field);
}
return $self->error(qq[Field "$field_name" does not exist]);
}
return $self->error(qq[Field "$field_name" does not exist])
unless exists $self->_fields->{$field_name};
return $self->_fields->{$field_name};
}
=head2 get_fields
Returns all the field objects as an array or array reference.
my @fields = $table->get_fields;
=cut
sub get_fields {
my $self = shift;
my @fields = map { $_->[1] }
sort { $a->[0] <=> $b->[0] }
map { [ $_->order, $_ ] } values %{ $self->_has_fields ? $self->_fields : {} };
if (@fields) {
return wantarray ? @fields : \@fields;
} else {
$self->error('No fields');
return;
}
}
=head2 is_valid
Determine whether the view is valid or not.
my $ok = $view->is_valid;
=cut
sub is_valid {
my $self = shift;
return $self->error('No name') unless $self->name;
return $self->error('No fields') unless $self->get_fields;
for my $object ($self->get_fields, $self->get_indices, $self->get_constraints) {
return $object->error unless $object->is_valid;
}
return 1;
}
=head2 is_trivial_link
True if table has no data (non-key) fields and only uses single key joins.
=cut
has is_trivial_link => (is => 'lazy', init_arg => undef);
around is_trivial_link => carp_ro('is_trivial_link');
sub _build_is_trivial_link {
my $self = shift;
return 0 if $self->is_data;
my %fk = ();
foreach my $field ($self->get_fields) {
next unless $field->is_foreign_key;
$fk{ $field->foreign_key_reference->reference_table }++;
}
foreach my $referenced (keys %fk) {
if ($fk{$referenced} > 1) {
return 0;
}
}
return 1;
}
=head2 is_data
Returns true if the table has some non-key fields.
=cut
has is_data => (is => 'lazy', init_arg => undef);
around is_data => carp_ro('is_data');
sub _build_is_data {
my $self = shift;
foreach my $field ($self->get_fields) {
if (!$field->is_primary_key and !$field->is_foreign_key) {
return 1;
}
}
return 0;
}
=head2 can_link
Determine whether the table can link two arg tables via many-to-many.
my $ok = $table->can_link($table1,$table2);
=cut
has _can_link => (is => 'ro', init_arg => undef, default => quote_sub(q{ +{} }));
sub can_link {
my ($self, $table1, $table2) = @_;
return $self->_can_link->{ $table1->name }{ $table2->name }
if defined $self->_can_link->{ $table1->name }{ $table2->name };
if ($self->is_data == 1) {
$self->_can_link->{ $table1->name }{ $table2->name } = [0];
$self->_can_link->{ $table2->name }{ $table1->name } = [0];
return $self->_can_link->{ $table1->name }{ $table2->name };
}
my %fk = ();
foreach my $field ($self->get_fields) {
if ($field->is_foreign_key) {
push @{ $fk{ $field->foreign_key_reference->reference_table } }, $field->foreign_key_reference;
}
}
if (!defined($fk{ $table1->name }) or !defined($fk{ $table2->name })) {
$self->_can_link->{ $table1->name }{ $table2->name } = [0];
$self->_can_link->{ $table2->name }{ $table1->name } = [0];
return $self->_can_link->{ $table1->name }{ $table2->name };
}
# trivial traversal, only one way to link the two tables
if ( scalar(@{ $fk{ $table1->name } } == 1)
and scalar(@{ $fk{ $table2->name } } == 1)) {
$self->_can_link->{ $table1->name }{ $table2->name } = [ 'one2one', $fk{ $table1->name }, $fk{ $table2->name } ];
$self->_can_link->{ $table1->name }{ $table2->name } = [ 'one2one', $fk{ $table2->name }, $fk{ $table1->name } ];
# non-trivial traversal. one way to link table2,
# many ways to link table1
} elsif (scalar(@{ $fk{ $table1->name } } > 1)
and scalar(@{ $fk{ $table2->name } } == 1)) {
$self->_can_link->{ $table1->name }{ $table2->name } = [ 'many2one', $fk{ $table1->name }, $fk{ $table2->name } ];
$self->_can_link->{ $table2->name }{ $table1->name } = [ 'one2many', $fk{ $table2->name }, $fk{ $table1->name } ];
# non-trivial traversal. one way to link table1,
# many ways to link table2
} elsif (scalar(@{ $fk{ $table1->name } } == 1)
and scalar(@{ $fk{ $table2->name } } > 1)) {
$self->_can_link->{ $table1->name }{ $table2->name } = [ 'one2many', $fk{ $table1->name }, $fk{ $table2->name } ];
$self->_can_link->{ $table2->name }{ $table1->name } = [ 'many2one', $fk{ $table2->name }, $fk{ $table1->name } ];
# non-trivial traversal. many ways to link table1 and table2
} elsif (scalar(@{ $fk{ $table1->name } } > 1)
and scalar(@{ $fk{ $table2->name } } > 1)) {
$self->_can_link->{ $table1->name }{ $table2->name } = [ 'many2many', $fk{ $table1->name }, $fk{ $table2->name } ];
$self->_can_link->{ $table2->name }{ $table1->name } = [ 'many2many', $fk{ $table2->name }, $fk{ $table1->name } ];
# one of the tables didn't export a key
# to this table, no linking possible
} else {
$self->_can_link->{ $table1->name }{ $table2->name } = [0];
$self->_can_link->{ $table2->name }{ $table1->name } = [0];
}
return $self->_can_link->{ $table1->name }{ $table2->name };
}
=head2 name
Get or set the table's name.
Errors ("No table name") if you try to set a blank name.
If provided an argument, checks the schema object for a table of
that name and disallows the change if one exists (setting the error to
"Can't use table name "%s": table exists").
my $table_name = $table->name('foo');
=cut
has name => (
is => 'rw',
isa => sub { throw("No table name") unless $_[0] },
);
around name => sub {
my $orig = shift;
my $self = shift;
if (my ($arg) = @_) {
if (my $schema = $self->schema) {
return $self->error(qq[Can't use table name "$arg": table exists])
if $schema->get_table($arg);
}
}
return ex2err($orig, $self, @_);
};
=head2 schema
Get or set the table's schema object.
my $schema = $table->schema;
=cut
has schema => (is => 'rw', isa => schema_obj('Schema'), weak_ref => 1);
around schema => \&ex2err;
sub primary_key {
=pod
=head2 primary_key
Gets or sets the table's primary key(s). Takes one or more field
names (as a string, list or array[ref]) as an argument. If the field
names are present, it will create a new PK if none exists, or it will
add to the fields of an existing PK (and will unique the field names).
Returns the C object representing
the primary key.
These are equivalent:
$table->primary_key('id');
$table->primary_key(['name']);
$table->primary_key('id','name']);
$table->primary_key(['id','name']);
$table->primary_key('id,name');
$table->primary_key(qw[ id name ]);
my $pk = $table->primary_key;
=cut
my $self = shift;
my $fields = parse_list_arg(@_);
my $constraint;
if (@$fields) {
for my $f (@$fields) {
return $self->error(qq[Invalid field "$f"])
unless $self->get_field($f);
}
my $has_pk;
for my $c ($self->get_constraints) {
if ($c->type eq PRIMARY_KEY) {
$has_pk = 1;
$c->fields(@{ $c->fields }, @$fields);
$constraint = $c;
}
}
unless ($has_pk) {
$constraint = $self->add_constraint(
type => PRIMARY_KEY,
fields => $fields,
) or return;
}
}
if ($constraint) {
return $constraint;
} else {
for my $c ($self->get_constraints) {
return $c if $c->type eq PRIMARY_KEY;
}
}
return;
}
=head2 options
Get or append to the table's options (e.g., table types for MySQL).
Returns an array or array reference.
my @options = $table->options;
=cut
with ListAttr options => (append => 1);
=head2 order
Get or set the table's order.
my $order = $table->order(3);
=cut
has order => (is => 'rw', default => quote_sub(q{ 0 }));
around order => sub {
my ($orig, $self, $arg) = @_;
if (defined $arg && $arg =~ /^\d+$/) {
return $self->$orig($arg);
}
return $self->$orig;
};
=head2 field_names
Read-only method to return a list or array ref of the field names. Returns undef
or an empty list if the table has no fields set. Useful if you want to
avoid the overload magic of the Field objects returned by the get_fields method.
my @names = $constraint->field_names;
=cut
sub field_names {
my $self = shift;
my @fields = map { $_->name } $self->get_fields;
if (@fields) {
return wantarray ? @fields : \@fields;
} else {
$self->error('No fields');
return;
}
}
sub equals {
=pod
=head2 equals
Determines if this table is the same as another
my $isIdentical = $table1->equals( $table2 );
=cut
my $self = shift;
my $other = shift;
my $case_insensitive = shift;
return 0 unless $self->SUPER::equals($other);
return 0
unless $case_insensitive
? uc($self->name) eq uc($other->name)
: $self->name eq $other->name;
return 0
unless $self->_compare_objects(scalar $self->options, scalar $other->options);
return 0
unless $self->_compare_objects(scalar $self->extra, scalar $other->extra);
# Fields
# Go through our fields
my %checkedFields;
foreach my $field ($self->get_fields) {
my $otherField = $other->get_field($field->name, $case_insensitive);
return 0 unless $field->equals($otherField, $case_insensitive);
$checkedFields{ $field->name } = 1;
}
# Go through the other table's fields
foreach my $otherField ($other->get_fields) {
next if $checkedFields{ $otherField->name };
return 0;
}
# Constraints
# Go through our constraints
my %checkedConstraints;
CONSTRAINT:
foreach my $constraint ($self->get_constraints) {
foreach my $otherConstraint ($other->get_constraints) {
if ($constraint->equals($otherConstraint, $case_insensitive)) {
$checkedConstraints{$otherConstraint} = 1;
next CONSTRAINT;
}
}
return 0;
}
# Go through the other table's constraints
CONSTRAINT2:
foreach my $otherConstraint ($other->get_constraints) {
next if $checkedFields{$otherConstraint};
foreach my $constraint ($self->get_constraints) {
if ($otherConstraint->equals($constraint, $case_insensitive)) {
next CONSTRAINT2;
}
}
return 0;
}
# Indices
# Go through our indices
my %checkedIndices;
INDEX:
foreach my $index ($self->get_indices) {
foreach my $otherIndex ($other->get_indices) {
if ($index->equals($otherIndex, $case_insensitive)) {
$checkedIndices{$otherIndex} = 1;
next INDEX;
}
}
return 0;
}
# Go through the other table's indices
INDEX2:
foreach my $otherIndex ($other->get_indices) {
next if $checkedIndices{$otherIndex};
foreach my $index ($self->get_indices) {
if ($otherIndex->equals($index, $case_insensitive)) {
next INDEX2;
}
}
return 0;
}
return 1;
}
=head1 LOOKUP METHODS
The following are a set of shortcut methods for getting commonly used lists of
fields and constraints. They all return lists or array refs of Field or
Constraint objects.
=over 4
=item pkey_fields
The primary key fields.
=item fkey_fields
All foreign key fields.
=item nonpkey_fields
All the fields except the primary key.
=item data_fields
All non key fields.
=item unique_fields
All fields with unique constraints.
=item unique_constraints
All this tables unique constraints.
=item fkey_constraints
All this tables foreign key constraints. (See primary_key method to get the
primary key constraint)
=back
=cut
sub pkey_fields {
my $me = shift;
my @fields = grep { $_->is_primary_key } $me->get_fields;
return wantarray ? @fields : \@fields;
}
sub fkey_fields {
my $me = shift;
my @fields;
push @fields, $_->fields foreach $me->fkey_constraints;
return wantarray ? @fields : \@fields;
}
sub nonpkey_fields {
my $me = shift;
my @fields = grep { !$_->is_primary_key } $me->get_fields;
return wantarray ? @fields : \@fields;
}
sub data_fields {
my $me = shift;
my @fields = grep { !$_->is_foreign_key and !$_->is_primary_key } $me->get_fields;
return wantarray ? @fields : \@fields;
}
sub unique_fields {
my $me = shift;
my @fields;
push @fields, $_->fields foreach $me->unique_constraints;
return wantarray ? @fields : \@fields;
}
sub unique_constraints {
my $me = shift;
my @cons = grep { $_->type eq UNIQUE } $me->get_constraints;
return wantarray ? @cons : \@cons;
}
sub fkey_constraints {
my $me = shift;
my @cons = grep { $_->type eq FOREIGN_KEY } $me->get_constraints;
return wantarray ? @cons : \@cons;
}
# Must come after all 'has' declarations
around new => \&ex2err;
1;
=pod
=head1 AUTHORS
Ken Youens-Clark Ekclark@cpan.orgE,
Allen Day Eallenday@ucla.eduE.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Diff.pm 0000644 0000000 0000000 00000042540 14541265164 021012 0 ustar 00root root 0000000 0000000 package SQL::Translator::Diff;
## SQLT schema diffing code
use strict;
use warnings;
use Data::Dumper;
use Carp::Clan qw/^SQL::Translator/;
use SQL::Translator::Schema::Constants;
use Sub::Quote qw(quote_sub);
use Moo;
has ignore_index_names => (is => 'rw',);
has ignore_constraint_names => (is => 'rw',);
has ignore_view_sql => (is => 'rw',);
has ignore_proc_sql => (is => 'rw',);
has output_db => (is => 'rw',);
has source_schema => (is => 'rw',);
has target_schema => (is => 'rw',);
has case_insensitive => (is => 'rw',);
has no_batch_alters => (is => 'rw',);
has ignore_missing_methods => (is => 'rw',);
has sqlt_args => (
is => 'rw',
lazy => 1,
default => quote_sub '{}',
);
has tables_to_drop => (
is => 'rw',
lazy => 1,
default => quote_sub '[]',
);
has tables_to_create => (
is => 'rw',
lazy => 1,
default => quote_sub '[]',
);
has table_diff_hash => (
is => 'rw',
lazy => 1,
default => quote_sub '{}',
);
my @diff_arrays = qw/
tables_to_drop
tables_to_create
/;
my @diff_hash_keys = qw/
constraints_to_create
constraints_to_drop
indexes_to_create
indexes_to_drop
fields_to_create
fields_to_alter
fields_to_rename
fields_to_drop
table_options
table_renamed_from
/;
sub schema_diff {
# use Data::Dumper;
## we are getting instructions on how to turn the source into the target
## source == original, target == new (hmm, if I need to comment this, should I rename the vars again ??)
## _schema isa SQL::Translator::Schema
## _db is the name of the producer/db it came out of/into
## results are formatted to the source preferences
my ($source_schema, $source_db, $target_schema, $output_db, $options) = @_;
$options ||= {};
my $obj = SQL::Translator::Diff->new({
%$options,
source_schema => $source_schema,
target_schema => $target_schema,
output_db => $output_db
});
$obj->compute_differences->produce_diff_sql;
}
my $warned_dep;
sub BUILD {
my ($self, $args) = @_;
for my $deprecated (qw/producer_options producer_args/) {
if ($args->{$deprecated}) {
carp
"$deprecated is deprecated -- it does not go straight to the producer, it goes to the internal sqlt object. Please use sqlt_args, which reflects how it's used"
unless $warned_dep++;
$self->sqlt_args({ %{ $args->{$deprecated} }, %{ $self->sqlt_args } });
}
}
if (!$self->output_db) {
$self->output_db($args->{source_db});
}
}
sub compute_differences {
my ($self) = @_;
my $target_schema = $self->target_schema;
my $source_schema = $self->source_schema;
my $producer_class = "SQL::Translator::Producer::@{[$self->output_db]}";
eval "require $producer_class";
die $@ if $@;
if (my $preprocess = $producer_class->can('preprocess_schema')) {
$preprocess->($source_schema);
$preprocess->($target_schema);
}
my %src_tables_checked = ();
my @tar_tables = sort { $a->name cmp $b->name } $target_schema->get_tables;
## do original/source tables exist in target?
for my $tar_table (@tar_tables) {
my $tar_table_name = $tar_table->name;
my $src_table;
$self->table_diff_hash->{$tar_table_name} = { map { $_ => [] } @diff_hash_keys };
if (my $old_name = $tar_table->extra('renamed_from')) {
$src_table = $source_schema->get_table($old_name, $self->case_insensitive);
if ($src_table) {
$self->table_diff_hash->{$tar_table_name}{table_renamed_from} = [ [ $src_table, $tar_table ] ];
} else {
delete $tar_table->extra->{renamed_from};
carp qq#Renamed table can't find old table "$old_name" for renamed table\n#;
}
} else {
$src_table = $source_schema->get_table($tar_table_name, $self->case_insensitive);
}
unless ($src_table) {
## table is new
## add table(s) later.
push @{ $self->tables_to_create }, $tar_table;
next;
}
my $src_table_name = $src_table->name;
$src_table_name = lc $src_table_name if $self->case_insensitive;
$src_tables_checked{$src_table_name} = 1;
$self->diff_table_options($src_table, $tar_table);
## Compare fields, their types, defaults, sizes etc etc
$self->diff_table_fields($src_table, $tar_table);
$self->diff_table_indexes($src_table, $tar_table);
$self->diff_table_constraints($src_table, $tar_table);
} # end of target_schema->get_tables loop
for my $src_table ($source_schema->get_tables) {
my $src_table_name = $src_table->name;
$src_table_name = lc $src_table_name if $self->case_insensitive;
push @{ $self->tables_to_drop }, $src_table
unless $src_tables_checked{$src_table_name};
}
return $self;
}
sub produce_diff_sql {
my ($self) = @_;
my $target_schema = $self->target_schema;
my $source_schema = $self->source_schema;
my $tar_name = $target_schema->name;
my $src_name = $source_schema->name;
my $producer_class = "SQL::Translator::Producer::@{[$self->output_db]}";
eval "require $producer_class";
die $@ if $@;
# Map of name we store under => producer method name
my %func_map = (
constraints_to_create => 'alter_create_constraint',
constraints_to_drop => 'alter_drop_constraint',
indexes_to_create => 'alter_create_index',
indexes_to_drop => 'alter_drop_index',
fields_to_create => 'add_field',
fields_to_alter => 'alter_field',
fields_to_rename => 'rename_field',
fields_to_drop => 'drop_field',
table_options => 'alter_table',
table_renamed_from => 'rename_table',
);
my @diffs;
if (!$self->no_batch_alters
&& (my $batch_alter = $producer_class->can('batch_alter_table'))) {
# Good - Producer supports batch altering of tables.
foreach my $table (sort keys %{ $self->table_diff_hash }) {
my $tar_table = $target_schema->get_table($table)
|| $source_schema->get_table($table);
push @diffs,
$batch_alter->(
$tar_table,
{
map { $func_map{$_} => $self->table_diff_hash->{$table}{$_} } keys %func_map
},
$self->sqlt_args
);
}
} else {
# If we have any table renames we need to do those first;
my %flattened_diffs;
foreach my $table (sort keys %{ $self->table_diff_hash }) {
my $table_diff = $self->table_diff_hash->{$table};
for (@diff_hash_keys) {
push(@{ $flattened_diffs{ $func_map{$_} } ||= [] }, @{ $table_diff->{$_} });
}
}
push @diffs, map({
if (@{ $flattened_diffs{$_} || [] }) {
my $meth = $producer_class->can($_);
$meth
? map {
map { $_ ? "$_" : () } $meth->((ref $_ eq 'ARRAY' ? @$_ : $_), $self->sqlt_args);
} @{ $flattened_diffs{$_} }
: $self->ignore_missing_methods ? "-- $producer_class cant $_"
: die "$producer_class cant $_";
} else {
()
}
} qw/rename_table
alter_drop_constraint
alter_drop_index
drop_field
add_field
alter_field
rename_field
alter_create_index
alter_create_constraint
alter_table/),
;
}
if (my @tables = @{ $self->tables_to_create }) {
my $translator = SQL::Translator->new(
producer_type => $self->output_db,
add_drop_table => 0,
no_comments => 1,
# TODO: sort out options
%{ $self->sqlt_args }
);
$translator->producer_args->{no_transaction} = 1;
my $schema = $translator->schema;
$schema->add_table($_) for @tables;
unshift @diffs,
# Remove begin/commit here, since we wrap everything in one.
grep { $_ !~ /^(?:COMMIT|START(?: TRANSACTION)?|BEGIN(?: TRANSACTION)?)/ }
$producer_class->can('produce')->($translator);
}
if (my @tables_to_drop = @{ $self->{tables_to_drop} || [] }) {
my $meth = $producer_class->can('drop_table');
push @diffs,
$meth ? (map { $meth->($_, $self->sqlt_args) } @tables_to_drop)
: $self->ignore_missing_methods ? "-- $producer_class cant drop_table"
: die "$producer_class cant drop_table";
}
if (@diffs) {
unshift @diffs, "BEGIN";
push @diffs, "\nCOMMIT";
} else {
@diffs = ("-- No differences found");
}
if (@diffs) {
if ($self->output_db !~ /^(?:MySQL|SQLite|PostgreSQL)$/) {
unshift(@diffs, "-- Output database @{[$self->output_db]} is untested/unsupported!!!");
}
my @return = map { $_ ? ($_ =~ /;\s*\z/xms ? $_ : "$_;\n\n") : "\n" }
("-- Convert schema '$src_name' to '$tar_name':", @diffs);
return wantarray ? @return : join('', @return);
}
return undef;
}
sub diff_table_indexes {
my ($self, $src_table, $tar_table) = @_;
my (%checked_indices);
INDEX_CREATE:
for my $i_tar ($tar_table->get_indices) {
for my $i_src ($src_table->get_indices) {
if ($i_tar->equals($i_src, $self->case_insensitive, $self->ignore_index_names)) {
$checked_indices{$i_src} = 1;
next INDEX_CREATE;
}
}
push @{ $self->table_diff_hash->{$tar_table}{indexes_to_create} }, $i_tar;
}
INDEX_DROP:
for my $i_src ($src_table->get_indices) {
next if !$self->ignore_index_names && $checked_indices{$i_src};
for my $i_tar ($tar_table->get_indices) {
next INDEX_DROP
if $i_src->equals($i_tar, $self->case_insensitive, $self->ignore_index_names);
}
push @{ $self->table_diff_hash->{$tar_table}{indexes_to_drop} }, $i_src;
}
}
sub diff_table_constraints {
my ($self, $src_table, $tar_table) = @_;
my (%checked_constraints);
CONSTRAINT_CREATE:
for my $c_tar ($tar_table->get_constraints) {
for my $c_src ($src_table->get_constraints) {
# This is a bit of a hack - needed for renaming tables to work
local $c_src->{table} = $tar_table;
if ($c_tar->equals($c_src, $self->case_insensitive, $self->ignore_constraint_names)) {
$checked_constraints{$c_src} = 1;
next CONSTRAINT_CREATE;
}
}
push @{ $self->table_diff_hash->{$tar_table}{constraints_to_create} }, $c_tar;
}
CONSTRAINT_DROP:
for my $c_src ($src_table->get_constraints) {
# This is a bit of a hack - needed for renaming tables to work
local $c_src->{table} = $tar_table;
next if !$self->ignore_constraint_names && $checked_constraints{$c_src};
for my $c_tar ($tar_table->get_constraints) {
next CONSTRAINT_DROP
if $c_src->equals($c_tar, $self->case_insensitive, $self->ignore_constraint_names);
}
push @{ $self->table_diff_hash->{$tar_table}{constraints_to_drop} }, $c_src;
}
}
sub diff_table_fields {
my ($self, $src_table, $tar_table) = @_;
# List of ones we've renamed from so we don't drop them
my %renamed_source_fields;
for my $tar_table_field ($tar_table->get_fields) {
my $f_tar_name = $tar_table_field->name;
if (my $old_name = $tar_table_field->extra->{renamed_from}) {
my $src_table_field = $src_table->get_field($old_name, $self->case_insensitive);
unless ($src_table_field) {
carp qq#Renamed column can't find old column "@{[$src_table->name]}.$old_name" for renamed column\n#;
delete $tar_table_field->extra->{renamed_from};
} else {
push @{ $self->table_diff_hash->{$tar_table}{fields_to_rename} }, [ $src_table_field, $tar_table_field ];
$renamed_source_fields{$old_name} = 1;
next;
}
}
my $src_table_field = $src_table->get_field($f_tar_name, $self->case_insensitive);
unless ($src_table_field) {
push @{ $self->table_diff_hash->{$tar_table}{fields_to_create} }, $tar_table_field;
next;
}
# field exists, something changed. This is a bit complex. Parsers can
# normalize types, but only some of them do, so compare the normalized and
# parsed types for each field to each other
if ( !$tar_table_field->equals($src_table_field, $self->case_insensitive)
&& !$tar_table_field->equals($src_table_field->parsed_field, $self->case_insensitive)
&& !$tar_table_field->parsed_field->equals($src_table_field, $self->case_insensitive)
&& !$tar_table_field->parsed_field->equals($src_table_field->parsed_field, $self->case_insensitive)) {
# Some producers might need src field to diff against
push @{ $self->table_diff_hash->{$tar_table}{fields_to_alter} }, [ $src_table_field, $tar_table_field ];
next;
}
}
# Now check to see if any fields from src_table need to be dropped
for my $src_table_field ($src_table->get_fields) {
my $f_src_name = $src_table_field->name;
next if $renamed_source_fields{$f_src_name};
my $tar_table_field = $tar_table->get_field($f_src_name, $self->case_insensitive);
unless ($tar_table_field) {
push @{ $self->table_diff_hash->{$tar_table}{fields_to_drop} }, $src_table_field;
next;
}
}
}
sub diff_table_options {
my ($self, $src_table, $tar_table) = @_;
my $cmp = sub {
my ($a_name, undef, $b_name, undef) = (%$a, %$b);
$a_name cmp $b_name;
};
# Need to sort the options so we don't get spurious diffs.
my (@src_opts, @tar_opts);
@src_opts = sort $cmp $src_table->options;
@tar_opts = sort $cmp $tar_table->options;
# If there's a difference, just re-set all the options
push @{ $self->table_diff_hash->{$tar_table}{table_options} }, $tar_table
unless $src_table->_compare_objects(\@src_opts, \@tar_opts);
}
# support producer_options as an alias for sqlt_args for legacy code.
sub producer_options {
my $self = shift;
return $self->sqlt_args(@_);
}
# support producer_args as an alias for sqlt_args for legacy code.
sub producer_args {
my $self = shift;
return $self->sqlt_args(@_);
}
1;
__END__
=head1 NAME
SQL::Translator::Diff - determine differences between two schemas
=head1 DESCRIPTION
Takes two input SQL::Translator::Schemas (or SQL files) and produces ALTER
statements to make them the same
=head1 SNYOPSIS
Simplest usage:
use SQL::Translator::Diff;
my $sql = SQL::Translator::Diff::schema_diff($source_schema, 'MySQL', $target_schema, 'MySQL', $options_hash)
OO usage:
use SQL::Translator::Diff;
my $diff = SQL::Translator::Diff->new({
output_db => 'MySQL',
source_schema => $source_schema,
target_schema => $target_schema,
%$options_hash,
})->compute_differences->produce_diff_sql;
=head1 OPTIONS
=over
=item B
Match indexes based on types and fields, ignoring name.
=item B
Match constrains based on types, fields and tables, ignoring name.
=item B
Which producer to use to produce the output.
=item B
Ignore case of table, field, index and constraint names when comparing
=item B
Produce each alter as a distinct C statement even if the producer
supports the ability to do all alters for a table as one statement.
=item B
If the diff would need a method that is missing from the producer, just emit a
comment showing the method is missing, rather than dieing with an error
=item B
Hash of extra arguments passed to L and the below
L.
=back
=head1 PRODUCER FUNCTIONS
The following producer functions should be implemented for completeness. If
any of them are needed for a given diff, but not found, an error will be
thrown.
=over
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C
=item * C (optional)
=item * C (optional)
If the producer supports C, it will be called with the
table to alter and a hash, the keys of which will be the method names listed
above; values will be arrays of fields or constraints to operate on. In the
case of the field functions that take two arguments this will appear as an
array reference.
I.e. the hash might look something like the following:
{
alter_create_constraint => [ $constraint1, $constraint2 ],
add_field => [ $field ],
alter_field => [ [$old_field, $new_field] ]
}
=item * C (optional)
C is called by the Diff code to allow the producer to
normalize any data it needs to first. For example, the MySQL producer uses
this method to ensure that FK constraint names are unique.
Basicaly any changes that need to be made to produce the SQL file for the
schema should be done here, so that a diff between a parsed SQL file and (say)
a parsed DBIx::Class::Schema object will be sane.
(As an aside, DBIx::Class, for instance, uses the presence of a
C function on the producer to know that it can diff between
the previous SQL file and its own internal representation. Without this method
on th producer it will diff the two SQL files which is slower, but known to
work better on old-style producers.)
=back
=head1 AUTHOR
Original Author(s) unknown.
Refactor/re-write and more comprehensive tests by Ash Berlin C<< ash@cpan.org >>.
Redevelopment sponsored by Takkle Inc.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/ 0000755 0000000 0000000 00000000000 14551164244 021031 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Parser/Oracle.pm 0000644 0000000 0000000 00000054132 14551163724 022603 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::Oracle;
=head1 NAME
SQL::Translator::Parser::Oracle - parser for Oracle
=head1 SYNOPSIS
use SQL::Translator;
use SQL::Translator::Parser::Oracle;
my $translator = SQL::Translator->new;
$translator->parser("SQL::Translator::Parser::Oracle");
=head1 DESCRIPTION
From http://www.ss64.com/ora/table_c.html:
CREATE [GLOBAL TEMPORARY] TABLE [schema.]table (tbl_defs,...)
[ON COMMIT {DELETE|PRESERVE} ROWS]
[storage_options | CLUSTER cluster_name (col1, col2,... )
| ORGANIZATION {HEAP [storage_options]
| INDEX idx_organized_tbl_clause}]
[LOB_storage_clause][varray_clause][nested_storage_clause]
partitioning_options
[[NO]CACHE] [[NO]MONITORING] [PARALLEL parallel_clause]
[ENABLE enable_clause | DISABLE disable_clause]
[AS subquery]
tbl_defs:
column datatype [DEFAULT expr] [column_constraint(s)]
table_ref_constraint
storage_options:
PCTFREE int
PCTUSED int
INITTRANS int
MAXTRANS int
STORAGE storage_clause
TABLESPACE tablespace
[LOGGING|NOLOGGING]
idx_organized_tbl_clause:
storage_option(s) [PCTTHRESHOLD int]
[COMPRESS int|NOCOMPRESS]
[ [INCLUDING column_name] OVERFLOW [storage_option(s)] ]
nested_storage_clause:
NESTED TABLE nested_item STORE AS storage_table
[RETURN AS {LOCATOR|VALUE} ]
partitioning_options:
Partition_clause {ENABLE|DISABLE} ROW MOVEMENT
Column Constraints
(http://www.ss64.com/ora/clause_constraint_col.html)
CONSTRAINT constrnt_name {UNIQUE|PRIMARY KEY} constrnt_state
CONSTRAINT constrnt_name CHECK(condition) constrnt_state
CONSTRAINT constrnt_name [NOT] NULL constrnt_state
CONSTRAINT constrnt_name REFERENCES [schema.]table[(column)]
[ON DELETE {CASCADE|SET NULL}] constrnt_state
constrnt_state
[[NOT] DEFERRABLE] [INITIALLY {IMMEDIATE|DEFERRED}]
[RELY | NORELY] [USING INDEX using_index_clause]
[ENABLE|DISABLE] [VALIDATE|NOVALIDATE]
[EXCEPTIONS INTO [schema.]table]
Note that probably not all of the above syntax is supported, but the grammar
was altered to better handle the syntax created by DDL::Oracle.
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use SQL::Translator::Utils qw/ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our $GRAMMAR = <<'END_OF_GRAMMAR';
{ my ( %tables, %indices, %constraints, $table_order, @table_comments, %views, $view_order, %procedures, $proc_order, %triggers, $trigger_order ) }
#
# The "eofile" rule makes the parser fail if any "statement" rule
# fails. Otherwise, the first successful match by a "statement"
# won't cause the failure needed to know that the parse, as a whole,
# failed. -ky
#
startrule : statement(s) eofile
{
$return = {
tables => \%tables,
indices => \%indices,
constraints => \%constraints,
views => \%views,
procedures => \%procedures,
triggers => \%triggers,
};
}
eofile : /^\Z/
statement : remark
| run
| prompt
| create
| table_comment
| comment_on_table
| comment_on_column
| alter
| drop
|
alter: /alter/i TABLE table_name /add/i table_constraint ';'
{
my $constraint = $item{table_constraint};
$constraint->{type} = $constraint->{constraint_type};
push @{$tables{$item{table_name}}{constraints}}, $constraint;
}
alter : /alter/i WORD /[^;]+/ ';'
{ @table_comments = () }
drop : /drop/i WORD(s) NAME WORD(s?) ';'
{ @table_comments = () }
create : create_table table_name '(' create_definition(s /,/) ')' table_option(s?) ';'
{
my $table_name = $item{'table_name'};
$tables{ $table_name }{'order'} = ++$table_order;
$tables{ $table_name }{'table_name'} = $table_name;
if ( @table_comments ) {
$tables{ $table_name }{'comments'} = [ @table_comments ];
@table_comments = ();
}
my $i = 1;
my @constraints;
for my $definition ( @{ $item[4] } ) {
if ( $definition->{'type'} eq 'field' ) {
my $field_name = $definition->{'name'};
$tables{ $table_name }{'fields'}{ $field_name } =
{ %$definition, order => $i };
$i++;
for my $constraint ( @{ $definition->{'constraints'} || [] } ) {
$constraint->{'fields'} = [ $field_name ];
push @{ $tables{ $table_name }{'constraints'} },
$constraint;
}
}
elsif ( $definition->{'type'} eq 'constraint' ) {
$definition->{'type'} = $definition->{'constraint_type'};
push @{ $tables{ $table_name }{'constraints'} }, $definition;
}
else {
push @{ $tables{ $table_name }{'indices'} }, $definition;
}
}
for my $option ( @{ $item[6] } ) {
push @{ $tables{ $table_name }{'table_options'} }, $option;
}
1;
}
create : create_index index_name /on/i table_name index_expr table_option(?) ';'
{
my $table_name = $item[4];
if ( $item[1] ) {
push @{ $constraints{ $table_name } }, {
name => $item[2],
type => 'unique',
fields => $item[5],
};
}
else {
push @{ $indices{ $table_name } }, {
name => $item[2],
type => 'normal',
fields => $item[5],
};
}
}
index_expr: parens_name_list
{ $item[1] }
| '(' WORD parens_name_list ')'
{
my $arg_list = join(",", @{$item[3]});
$return = "$item[2]($arg_list)";
}
create : /create/i /or replace/i /trigger/i table_name not_end m#^/$#im
{
@table_comments = ();
my $trigger_name = $item[4];
# Hack to strip owner from trigger name
$trigger_name =~ s#.*\.##;
my $owner = '';
my $action = "$item[1] $item[2] $item[3] $item[4] $item[5]";
$triggers{ $trigger_name }{'order'} = ++$trigger_order;
$triggers{ $trigger_name }{'name'} = $trigger_name;
$triggers{ $trigger_name }{'owner'} = $owner;
$triggers{ $trigger_name }{'action'} = $action;
}
create : /create/i /or replace/i /procedure/i table_name not_end m#^/$#im
{
@table_comments = ();
my $proc_name = $item[4];
# Hack to strip owner from procedure name
$proc_name =~ s#.*\.##;
my $owner = '';
my $sql = "$item[1] $item[2] $item[3] $item[4] $item[5]";
$procedures{ $proc_name }{'order'} = ++$proc_order;
$procedures{ $proc_name }{'name'} = $proc_name;
$procedures{ $proc_name }{'owner'} = $owner;
$procedures{ $proc_name }{'sql'} = $sql;
}
not_end: m#.*?(?=^/$)#ism
create : /create/i /or replace/i /force/i /view/i table_name not_delimiter ';'
{
@table_comments = ();
my $view_name = $item[5];
# Hack to strip owner from view name
$view_name =~ s#.*\.##;
my $sql = "$item[1] $item[2] $item[3] $item[4] $item[5] $item[6] $item[7]";
$views{ $view_name }{'order'} = ++$view_order;
$views{ $view_name }{'name'} = $view_name;
$views{ $view_name }{'sql'} = $sql;
}
not_delimiter: /.*?(?=;)/is
# Create anything else (e.g., domain, function, etc.)
create : ...!create_table ...!create_index /create/i WORD /[^;]+/ ';'
{ @table_comments = () }
create_index : /create/i UNIQUE(?) /index/i
{ $return = @{$item[2]} }
index_name : NAME '.' NAME
{ $item[3] }
| NAME
{ $item[1] }
global_temporary: /global/i /temporary/i
table_name : NAME '.' NAME
{ $item[3] }
| NAME
{ $item[1] }
create_definition : table_constraint
| field
|
table_comment : comment
{
my $comment = $item[1];
$return = $comment;
push @table_comments, $comment;
}
comment : /^\s*(?:#|-{2}).*\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|-{2})\s*//;
$comment =~ s/\s*$//;
$return = $comment;
}
comment : /\/\*/ /[^\*]+/ /\*\//
{
my $comment = $item[2];
$comment =~ s/^\s*|\s*$//g;
$return = $comment;
}
remark : /^REM\s+.*\n/
run : /^(RUN|\/)\s+.*\n/
prompt : /prompt/i /(table|index|sequence|trigger)/i ';'
prompt : /prompt\s+create\s+.*\n/i
comment_on_table : /comment/i /on/i /table/i table_name /is/i comment_phrase ';'
{
push @{ $tables{ $item{'table_name'} }{'comments'} }, $item{'comment_phrase'};
}
comment_on_column : /comment/i /on/i /column/i column_name /is/i comment_phrase ';'
{
my $table_name = $item[4]->{'table'};
my $field_name = $item[4]->{'field'};
push @{ $tables{ $table_name }{'fields'}{ $field_name }{'comments'} },
$item{'comment_phrase'};
}
column_name : NAME '.' NAME
{ $return = { table => $item[1], field => $item[3] } }
comment_phrase : /'.*?'/
{
my $val = $item[1];
$val =~ s/^'|'$//g;
$return = $val;
}
field : comment(s?) field_name data_type field_meta(s?) comment(s?)
{
my ( $is_pk, $default, @constraints );
my $null = 1;
for my $meta ( @{ $item[4] } ) {
if ( $meta->{'type'} eq 'default' ) {
$default = $meta;
next;
}
elsif ( $meta->{'type'} eq 'not_null' ) {
$null = 0;
next;
}
elsif ( $meta->{'type'} eq 'primary_key' ) {
$is_pk = 1;
}
push @constraints, $meta if $meta->{'supertype'} eq 'constraint';
}
my @comments = ( @{ $item[1] }, @{ $item[5] } );
$return = {
type => 'field',
name => $item{'field_name'},
data_type => $item{'data_type'}{'type'},
size => $item{'data_type'}{'size'},
null => $null,
default => $default->{'value'},
is_primary_key => $is_pk,
constraints => [ @constraints ],
comments => [ @comments ],
}
}
|
field_name : NAME
data_type : ora_data_type data_size(?)
{
$return = {
type => $item[1],
size => $item[2][0] || '',
}
}
data_size : '(' VALUE(s /,/) data_size_modifier(?) ')'
{ $item[2] }
data_size_modifier: /byte/i
| /char/i
column_constraint : constraint_name(?) column_constraint_type constraint_state(s?)
{
my $desc = $item{'column_constraint_type'};
my $type = $desc->{'type'};
my $fields = $desc->{'fields'} || [];
my $expression = $desc->{'expression'} || '';
$return = {
supertype => 'constraint',
name => $item{'constraint_name(?)'}[0] || '',
type => $type,
expression => $type eq 'check' ? $expression : '',
deferrable => $desc->{'deferrable'},
deferred => $desc->{'deferred'},
reference_table => $desc->{'reference_table'},
reference_fields => $desc->{'reference_fields'},
# match_type => $desc->{'match_type'},
# on_update => $desc->{'on_update'},
}
}
constraint_name : /constraint/i NAME { $item[2] }
column_constraint_type : /not\s+null/i { $return = { type => 'not_null' } }
| /unique/i
{ $return = { type => 'unique' } }
| /primary\s+key/i
{ $return = { type => 'primary_key' } }
| /check/i check_expression
{
$return = {
type => 'check',
expression => $item[2],
};
}
| /references/i table_name parens_name_list(?) on_delete(?)
{
$return = {
type => 'foreign_key',
reference_table => $item[2],
reference_fields => $item[3][0],
# match_type => $item[4][0],
on_delete => $item[5][0],
}
}
LPAREN : '('
RPAREN : ')'
check_condition_text : /.+\s+in\s+\([^)]+\)/i
| /[^)]+/
check_expression : LPAREN check_condition_text RPAREN
{ $return = join( ' ', map { $_ || () }
$item[1], $item[2], $item[3], $item[4][0] )
}
constraint_state : deferrable { $return = { type => $item[1] } }
| deferred { $return = { type => $item[1] } }
| /(no)?rely/i { $return = { type => $item[1] } }
# | /using/i /index/i using_index_clause
# { $return = { type => 'using_index', index => $item[3] } }
| /(dis|en)able/i { $return = { type => $item[1] } }
| /(no)?validate/i { $return = { type => $item[1] } }
| /exceptions/i /into/i table_name
{ $return = { type => 'exceptions_into', table => $item[3] } }
deferrable : /not/i /deferrable/i
{ $return = 'not_deferrable' }
| /deferrable/i
{ $return = 'deferrable' }
deferred : /initially/i /(deferred|immediate)/i { $item[2] }
ora_data_type :
/(n?varchar2|varchar)/i { $return = 'varchar2' }
|
/n?char/i { $return = 'character' }
|
/n?dec/i { $return = 'decimal' }
|
/number/i { $return = 'number' }
|
/integer/i { $return = 'integer' }
|
/(pls_integer|binary_integer)/i { $return = 'integer' }
|
/interval\s+day/i { $return = 'interval day' }
|
/interval\s+year/i { $return = 'interval year' }
|
/long\s+raw/i { $return = 'long raw' }
|
/(long|date|timestamp|raw|rowid|urowid|mlslabel|clob|nclob|blob|bfile|float|double)/i { $item[1] }
parens_value_list : '(' VALUE(s /,/) ')'
{ $item[2] }
parens_word_list : '(' WORD(s /,/) ')'
{ $item[2] }
parens_name_list : '(' NAME(s /,/) ')'
{ $item[2] }
field_meta : default_val
| column_constraint
default_val :
/default/i CURRENT_TIMESTAMP
{
my $val = $item[2];
$return = {
supertype => 'constraint',
type => 'default',
value => $val,
}
}
| /default/i VALUE
{
my $val = $item[2];
$return = {
supertype => 'constraint',
type => 'default',
value => $val,
}
}
| /null/i
{
$return = {
supertype => 'constraint',
type => 'default',
value => 'NULL',
}
}
create_table : /create/i global_temporary(?) /table/i
table_option : /organization/i WORD
{
$return = { 'ORGANIZATION' => $item[2] }
}
table_option : /nomonitoring/i
{
$return = { 'NOMONITORING' => undef }
}
table_option : /parallel/i '(' key_value(s) ')'
{
$return = { 'PARALLEL' => $item[3] }
}
key_value : WORD VALUE
{
$return = { $item[1], $item[2] }
}
table_option : /[^;]+/
table_constraint : comment(s?) constraint_name(?) table_constraint_type deferrable(?) deferred(?) constraint_state(s?) comment(s?)
{
my $desc = $item{'table_constraint_type'};
my $type = $desc->{'type'};
my $fields = $desc->{'fields'};
my $expression = $desc->{'expression'};
my @comments = ( @{ $item[1] }, @{ $item[-1] } );
$return = {
name => $item{'constraint_name(?)'}[0] || '',
type => 'constraint',
constraint_type => $type,
fields => $type ne 'check' ? $fields : [],
expression => $type eq 'check' ? $expression : '',
deferrable => $item{'deferrable(?)'},
deferred => $item{'deferred(?)'},
reference_table => $desc->{'reference_table'},
reference_fields => $desc->{'reference_fields'},
# match_type => $desc->{'match_type'}[0],
on_delete => $desc->{'on_delete'} || $desc->{'on_delete_do'},
on_update => $desc->{'on_update'} || $desc->{'on_update_do'},
comments => [ @comments ],
}
}
table_constraint_type : /primary key/i '(' NAME(s /,/) ')'
{
$return = {
type => 'primary_key',
fields => $item[3],
}
}
|
/unique/i '(' NAME(s /,/) ')'
{
$return = {
type => 'unique',
fields => $item[3],
}
}
|
/check/i check_expression /^(en|dis)able/i
{
$return = {
type => 'check',
expression => join(' ', $item[2], $item[3]),
}
}
|
/foreign key/i '(' NAME(s /,/) ')' /references/i table_name parens_name_list(?) on_delete(?)
{
$return = {
type => 'foreign_key',
fields => $item[3],
reference_table => $item[6],
reference_fields => $item[7][0],
# match_type => $item[8][0],
on_delete => $item[8][0],
# on_update => $item[9][0],
}
}
on_delete : /on delete/i WORD(s)
{ join(' ', @{$item[2]}) }
UNIQUE : /unique/i { $return = 1 }
WORD : /\w+/
NAME : /\w+/ { $item[1] }
| DQSTRING
TABLE : /table/i
DQSTRING : '"' /((?:[^"]|"")+)/ '"'
{ ($return = $item[3]) =~ s/""/"/g; }
SQSTRING : "'" /((?:[^']|'')*)/ "'"
{ ($return = $item[3]) =~ s/''/'/g }
VALUE : /[-+]?\d*\.?\d+(?:[eE]\d+)?/
| SQSTRING
| /null/i
{ 'NULL' }
# always a scalar-ref, so that it is treated as a function and not quoted by consumers
CURRENT_TIMESTAMP :
/current_timestamp(\(\))?/i { \'CURRENT_TIMESTAMP' }
| /now\(\)/i { \'CURRENT_TIMESTAMP' }
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
local $::RD_ERRORS = 1
unless defined $::RD_ERRORS; # Make sure the parser dies when it encounters an error
local $::RD_WARN = 1
unless defined $::RD_WARN; # Enable warnings. This will warn on unused rules &c.
local $::RD_HINT = 1
unless defined $::RD_HINT; # Give out hints to help fix problems.
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('Oracle');
my $result = $parser->startrule($data);
die "Parse failed.\n" unless defined $result;
if ($DEBUG) {
warn "Parser results =\n", Dumper($result), "\n";
}
my $schema = $translator->schema;
my $indices = $result->{'indices'};
my $constraints = $result->{'constraints'};
my @tables
= sort { $result->{'tables'}{$a}{'order'} <=> $result->{'tables'}{$b}{'order'} } keys %{ $result->{'tables'} };
for my $table_name (@tables) {
my $tdata = $result->{'tables'}{$table_name};
next unless $tdata->{'table_name'};
my $table = $schema->add_table(
name => $tdata->{'table_name'},
comments => $tdata->{'comments'},
) or die $schema->error;
$table->options($tdata->{'table_options'});
my @fields = sort { $tdata->{'fields'}->{$a}->{'order'} <=> $tdata->{'fields'}->{$b}->{'order'} }
keys %{ $tdata->{'fields'} };
for my $fname (@fields) {
my $fdata = $tdata->{'fields'}{$fname};
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_inc'},
is_nullable => $fdata->{'null'},
comments => $fdata->{'comments'},
) or die $table->error;
}
push @{ $tdata->{'indices'} }, @{ $indices->{$table_name} || [] };
push @{ $tdata->{'constraints'} }, @{ $constraints->{$table_name} || [] };
for my $idata (@{ $tdata->{'indices'} || [] }) {
my $index = $table->add_index(
name => $idata->{'name'},
type => uc $idata->{'type'},
fields => $idata->{'fields'},
) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
my $constraint = $table->add_constraint(
name => $cdata->{'name'},
type => $cdata->{'type'},
fields => $cdata->{'fields'},
expression => $cdata->{'expression'},
reference_table => $cdata->{'reference_table'},
reference_fields => $cdata->{'reference_fields'},
match_type => $cdata->{'match_type'} || '',
on_delete => $cdata->{'on_delete'} || $cdata->{'on_delete_do'},
on_update => $cdata->{'on_update'} || $cdata->{'on_update_do'},
) or die $table->error;
}
}
my @procedures = sort { $result->{procedures}->{$a}->{'order'} <=> $result->{procedures}->{$b}->{'order'} }
keys %{ $result->{procedures} };
foreach my $proc_name (@procedures) {
$schema->add_procedure(
name => $proc_name,
owner => $result->{procedures}->{$proc_name}->{owner},
sql => $result->{procedures}->{$proc_name}->{sql},
);
}
my @views
= sort { $result->{views}->{$a}->{'order'} <=> $result->{views}->{$b}->{'order'} } keys %{ $result->{views} };
foreach my $view_name (keys %{ $result->{views} }) {
$schema->add_view(
name => $view_name,
sql => $result->{views}->{$view_name}->{sql},
);
}
my @triggers = sort { $result->{triggers}->{$a}->{'order'} <=> $result->{triggers}->{$b}->{'order'} }
keys %{ $result->{triggers} };
foreach my $trigger_name (@triggers) {
$schema->add_trigger(
name => $trigger_name,
action => $result->{triggers}->{$trigger_name}->{action},
);
}
return 1;
}
1;
# -------------------------------------------------------------------
# Something there is that doesn't love a wall.
# Robert Frost
# -------------------------------------------------------------------
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
SQL::Translator, Parse::RecDescent, DDL::Oracle.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/PostgreSQL.pm 0000644 0000000 0000000 00000102316 14551163724 023377 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::PostgreSQL;
=head1 NAME
SQL::Translator::Parser::PostgreSQL - parser for PostgreSQL
=head1 SYNOPSIS
use SQL::Translator;
use SQL::Translator::Parser::PostgreSQL;
my $translator = SQL::Translator->new;
$translator->parser("SQL::Translator::Parser::PostgreSQL");
=head1 DESCRIPTION
The grammar was started from the MySQL parsers. Here is the description
from PostgreSQL, truncated to what's currently supported (patches welcome, of course) :
Table:
(http://www.postgresql.org/docs/current/sql-createtable.html)
CREATE [ [ LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name (
{ column_name data_type [ DEFAULT default_expr ]
[ column_constraint [, ... ] ]
| table_constraint } [, ... ]
)
[ INHERITS ( parent_table [, ... ] ) ]
[ WITH OIDS | WITHOUT OIDS ]
where column_constraint is:
[ CONSTRAINT constraint_name ]
{ NOT NULL | NULL | UNIQUE | PRIMARY KEY |
CHECK (expression) |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
and table_constraint is:
[ CONSTRAINT constraint_name ]
{ UNIQUE ( column_name [, ... ] ) |
PRIMARY KEY ( column_name [, ... ] ) |
CHECK ( expression ) |
EXCLUDE [USING acc_method] (expression) [INCLUDE (column [, ...])] [WHERE (predicate)]
FOREIGN KEY ( column_name [, ... ] )
REFERENCES reftable [ ( refcolumn [, ... ] ) ]
[ MATCH FULL | MATCH PARTIAL ] [ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
Index :
(http://www.postgresql.org/docs/current/sql-createindex.html)
CREATE [ UNIQUE ] INDEX index_name ON table
[ USING acc_method ] ( column [ ops_name ] [, ...] )
[ INCLUDE ( column [, ...] ) ]
[ WHERE predicate ]
CREATE [ UNIQUE ] INDEX index_name ON table
[ USING acc_method ] ( func_name( column [, ... ]) [ ops_name ] )
[ WHERE predicate ]
Alter table:
ALTER TABLE [ ONLY ] table [ * ]
ADD [ COLUMN ] column type [ column_constraint [ ... ] ]
ALTER TABLE [ ONLY ] table [ * ]
ALTER [ COLUMN ] column { SET DEFAULT value | DROP DEFAULT }
ALTER TABLE [ ONLY ] table [ * ]
ALTER [ COLUMN ] column SET STATISTICS integer
ALTER TABLE [ ONLY ] table [ * ]
RENAME [ COLUMN ] column TO newcolumn
ALTER TABLE table
RENAME TO new_table
ALTER TABLE table
ADD table_constraint_definition
ALTER TABLE [ ONLY ] table
DROP CONSTRAINT constraint { RESTRICT | CASCADE }
ALTER TABLE table
OWNER TO new_owner
View :
CREATE [ OR REPLACE ] VIEW view [ ( column name list ) ] AS SELECT query
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use SQL::Translator::Utils qw/ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our $GRAMMAR = <<'END_OF_GRAMMAR';
{ my ( %tables, @views, @triggers, $table_order, $field_order, @table_comments) }
#
# The "eofile" rule makes the parser fail if any "statement" rule
# fails. Otherwise, the first successful match by a "statement"
# won't cause the failure needed to know that the parse, as a whole,
# failed. -ky
#
startrule : statement(s) eofile {
{
tables => \%tables,
views => \@views,
triggers => \@triggers,
}
}
eofile : /^\Z/
statement : create
| comment_on_table
| comment_on_column
| comment_on_other
| comment
| alter
| grant
| revoke
| drop
| insert
| connect
| update
| set
| select
| copy
| readin_symbol
| commit
|
commit : /commit/i ';'
connect : /^\s*\\connect.*\n/
set : /set/i /[^;]*/ ';'
revoke : /revoke/i WORD(s /,/) /on/i TABLE(?) table_id /from/i NAME(s /,/) ';'
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
push @{ $tables{ $table_name }{'permissions'} }, {
type => 'revoke',
actions => $item[2],
users => $item[7],
}
}
revoke : /revoke/i WORD(s /,/) /on/i SCHEMA(?) schema_name /from/i NAME(s /,/) ';'
{ 1 }
grant : /grant/i WORD(s /,/) /on/i TABLE(?) table_id /to/i NAME(s /,/) ';'
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
push @{ $tables{ $table_name }{'permissions'} }, {
type => 'grant',
actions => $item[2],
users => $item[7],
}
}
grant : /grant/i WORD(s /,/) /on/i SCHEMA(?) schema_name /to/i NAME(s /,/) ';'
{ 1 }
drop : /drop/i /[^;]*/ ';'
string :
/'(\.|''|[^\\'])*'/
nonstring : /[^;\'"]+/
statement_body : string | nonstring
insert : /insert/i statement_body(s?) ';'
update : /update/i statement_body(s?) ';'
#
# Create table.
#
create : CREATE temporary(?) TABLE table_id '(' create_definition(s? /,/) ')' table_option(s?) ';'
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
$tables{ $table_name }{'order'} = ++$table_order;
$tables{ $table_name }{'schema_name'} = $schema_name;
$tables{ $table_name }{'table_name'} = $table_name;
$tables{ $table_name }{'temporary'} = $item[2][0];
if ( @table_comments ) {
$tables{ $table_name }{'comments'} = [ @table_comments ];
@table_comments = ();
}
my @constraints;
for my $definition ( @{ $item[6] } ) {
if ( $definition->{'supertype'} eq 'field' ) {
my $field_name = $definition->{'name'};
$tables{ $table_name }{'fields'}{ $field_name } =
{ %$definition, order => $field_order++ };
for my $constraint ( @{ $definition->{'constraints'} || [] } ) {
$constraint->{'fields'} = [ $field_name ];
push @{ $tables{ $table_name }{'constraints'} },
$constraint;
}
}
elsif ( $definition->{'supertype'} eq 'constraint' ) {
push @{ $tables{ $table_name }{'constraints'} }, $definition;
}
elsif ( $definition->{'supertype'} eq 'index' ) {
push @{ $tables{ $table_name }{'indices'} }, $definition;
}
}
for my $option ( @{ $item[8] } ) {
$tables{ $table_name }{'table_options(s?)'}{ $option->{'type'} } =
$option;
}
1;
}
create : CREATE unique(?) /(index|key)/i index_name /on/i table_id using_method(?) '(' field_name(s /,/) ')' include_covering(?) where_predicate(?) ';'
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
push @{ $tables{ $table_name }{'indices'} },
{
name => $item{'index_name'},
supertype => $item{'unique'}[0] ? 'constraint' : 'index',
type => $item{'unique'}[0] ? 'unique' : 'normal',
fields => $item[9],
method => $item{'using_method(?)'}[0],
where => $item{'where_predicate(?)'}[0],
include => $item{'include_covering(?)'}[0]
}
;
}
create : CREATE or_replace(?) temporary(?) VIEW view_id view_fields(?) /AS/i view_target ';'
{
push @views, {
schema_name => $item{view_id}{schema_name},
view_name => $item{view_id}{view_name},
sql => $item{view_target},
fields => $item[6],
is_temporary => $item[3][0],
}
}
create: CREATE /MATERIALIZED VIEW/i if_not_exists(?) view_id view_fields(?) /AS/i view_target ';'
{
push @views, {
schema_name => $item{view_id}{schema_name},
view_name => $item{view_id}{view_name},
sql => $item{view_target},
fields => $item[5],
extra => { materialized => 1 }
}
}
if_not_exists : /IF NOT EXISTS/i
trigger_name : NAME
trigger_scope : /FOR/i /EACH/i /(ROW|STATEMENT)/i { $return = lc $1 }
before_or_after : /(before|after)/i { $return = lc $1 }
trigger_action : /.+/
database_event : /insert|update|delete/i
database_events : database_event(s /OR/)
create : CREATE /TRIGGER/i trigger_name before_or_after database_events /ON/i table_id trigger_scope(?) trigger_action
{
# Hack to pass roundtrip tests which have trigger statements terminated by double semicolon
# and expect the returned data to have the same
my $action = $item{trigger_action};
$action =~ s/;$//;
push @triggers, {
name => $item{trigger_name},
perform_action_when => $item{before_or_after},
database_events => $item{database_events},
on_table => $item{table_id}{table_name},
scope => $item{'trigger_scope(?)'}[0],
action => $action,
}
}
#
# Create anything else (e.g., domain, etc.)
#
create : CREATE WORD /[^;]+/ ';'
{ @table_comments = (); }
using_method : /using/i WORD { $item[2] }
where_predicate : /where/i /[^;]+/
where_paren_predicate : /where/i '(' /[^;]+/ ')'
include_covering : /include/i '(' covering_field_name(s /,/) ')'
{ $item{'covering_field_name(s)'} }
create_definition : field
| table_constraint
|
comment : /^\s*(?:#|-{2})(.*)\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|-*)\s*//;
$comment =~ s/\s*$//;
$return = $comment;
push @table_comments, $comment;
}
comment_on_table : /comment/i /on/i /table/i table_id /is/i comment_phrase ';'
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
push @{ $tables{ $table_name }{'comments'} }, $item{'comment_phrase'};
}
comment_on_column : /comment/i /on/i /column/i column_name /is/i comment_phrase ';'
{
my $table_name = $item[4]->{'table'};
my $field_name = $item[4]->{'field'};
if ($tables{ $table_name }{'fields'}{ $field_name } ) {
push @{ $tables{ $table_name }{'fields'}{ $field_name }{'comments'} },
$item{'comment_phrase'};
}
else {
die "No such column as $table_name.$field_name";
}
}
comment_on_other : /comment/i /on/i /\w+/ /\w+/ /is/i comment_phrase ';'
{
push(@table_comments, $item{'comment_phrase'});
}
# [added by cjm 20041019]
# [TODO: other comment-on types]
# for now we just have a general mechanism for handling other
# kinds of comments than table/column; I'm not sure of the best
# way to incorporate these into the datamodel
#
# this is the exhaustive list of types of comment:
#COMMENT ON DATABASE my_database IS 'Development Database';
#COMMENT ON INDEX my_index IS 'Enforces uniqueness on employee id';
#COMMENT ON RULE my_rule IS 'Logs UPDATES of employee records';
#COMMENT ON SEQUENCE my_sequence IS 'Used to generate primary keys';
#COMMENT ON TABLE my_table IS 'Employee Information';
#COMMENT ON TYPE my_type IS 'Complex Number support';
#COMMENT ON VIEW my_view IS 'View of departmental costs';
#COMMENT ON COLUMN my_table.my_field IS 'Employee ID number';
#COMMENT ON TRIGGER my_trigger ON my_table IS 'Used for R.I.';
#
# this is tested by test 08
column_name : NAME '.' NAME
{ $return = { table => $item[1], field => $item[3] } }
comment_phrase : /null/i
{ $return = 'NULL' }
| SQSTRING
| DOLLARSTRING
field : field_comment(s?) field_name data_type field_meta(s?) field_comment(s?)
{
my ( $default, @constraints, $is_pk );
my $is_nullable = 1;
for my $meta ( @{ $item[4] } ) {
if ( $meta->{'type'} eq 'default' ) {
$default = $meta;
next;
}
elsif ( $meta->{'type'} eq 'not_null' ) {
$is_nullable = 0;
}
elsif ( $meta->{'type'} eq 'primary_key' ) {
$is_pk = 1;
}
push @constraints, $meta if $meta->{'supertype'} eq 'constraint';
}
my @comments = ( @{ $item[1] }, @{ $item[5] } );
$return = {
supertype => 'field',
name => $item{'field_name'},
data_type => $item{'data_type'}{'type'},
size => $item{'data_type'}{'size'},
is_nullable => $is_nullable,
default => $default->{'value'},
constraints => [ @constraints ],
comments => [ @comments ],
is_primary_key => $is_pk || 0,
is_auto_increment => $item{'data_type'}{'is_auto_increment'},
}
}
|
field_comment : /^\s*(?:#|-{2})(.*)\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|-*)\s*//;
$comment =~ s/\s*$//;
$return = $comment;
}
field_meta : default_val
| column_constraint
view_fields : '(' field_name(s /,/) ')'
{ $return = join (',', @{$item[2]} ) }
column_constraint : constraint_name(?) column_constraint_type deferrable(?) deferred(?)
{
my $desc = $item{'column_constraint_type'};
my $type = $desc->{'type'};
my $fields = $desc->{'fields'} || [];
my $expression = $desc->{'expression'} || '';
$return = {
supertype => 'constraint',
name => $item{'constraint_name'}[0] || '',
type => $type,
expression => $type eq 'check' ? $expression : '',
deferrable => $item{'deferrable'},
deferred => $item{'deferred'},
reference_table => $desc->{'reference_table'},
reference_fields => $desc->{'reference_fields'},
match_type => $desc->{'match_type'},
on_delete => $desc->{'on_delete'} || $desc->{'on_delete_do'},
on_update => $desc->{'on_update'} || $desc->{'on_update_do'},
}
}
constraint_name : /constraint/i NAME { $item[2] }
column_constraint_type : /not null/i { $return = { type => 'not_null' } }
|
/null/i
{ $return = { type => 'null' } }
|
/unique/i
{ $return = { type => 'unique' } }
|
/primary key/i
{ $return = { type => 'primary_key' } }
|
/check/i '(' /[^)]+/ ')'
{ $return = { type => 'check', expression => $item[3] } }
|
/references/i table_id parens_word_list(?) match_type(?) key_action(s?)
{
my $table_info = $item{'table_id'};
my $schema_name = $table_info->{'schema_name'};
my $table_name = $table_info->{'table_name'};
my ( $on_delete, $on_update );
for my $action ( @{ $item[5] || [] } ) {
$on_delete = $action->{'action'} if $action->{'type'} eq 'delete';
$on_update = $action->{'action'} if $action->{'type'} eq 'update';
}
$return = {
type => 'foreign_key',
reference_table => $table_name,
reference_fields => $item[3][0],
match_type => $item[4][0],
on_delete => $on_delete,
on_update => $on_update,
}
}
table_id : schema_qualification(?) NAME {
$return = { schema_name => $item[1][0], table_name => $item[2] }
}
view_id : schema_qualification(?) NAME {
$return = { schema_name => $item[1][0], view_name => $item[2] }
}
view_target : /select|with/i /[^;]+/ {
$return = "$item[1] $item[2]";
}
# SELECT views _may_ support outer parens, and we used to produce
# such sql, although non-standard. Use ugly lookeahead to parse
view_target : '(' /select/i / [^;]+ (?= \) ) /x ')' {
$return = "$item[2] $item[3]"
}
view_target_spec :
schema_qualification : NAME '.'
schema_name : NAME
field_name : NAME
covering_field_name : NAME
double_quote: /"/
index_name : NAME
array_indicator : '[' ']'
{ $return = $item[1].$item[2] }
data_type : pg_data_type parens_value_list(?) array_indicator(?)
{
my $data_type = $item[1];
$data_type->{type} .= $item[3][0] if $item[3][0];
#
# We can deduce some sizes from the data type's name.
#
if ( my @size = @{$item[2]} ) {
$data_type->{'size'} = (@size == 1 ? $size[0] : \@size);
}
$return = $data_type;
}
pg_data_type :
/(bigint|int8)/i
{
$return = {
type => 'integer',
size => 20,
};
}
|
/(smallint|int2)/i
{
$return = {
type => 'integer',
size => 5,
};
}
|
/interval/i
{
$return = { type => 'interval' };
}
|
/(integer|int4?)/i # interval must come before this
{
$return = {
type => 'integer',
size => 10,
};
}
|
/(real|float4)/i
{
$return = {
type => 'real',
size => 10,
};
}
|
/(double precision|float8?)/i
{
$return = {
type => 'float',
size => 20,
};
}
|
/(bigserial|serial8)/i
{
$return = {
type => 'integer',
size => 20,
is_auto_increment => 1,
};
}
|
/serial4?/i
{
$return = {
type => 'integer',
size => 11,
is_auto_increment => 1,
};
}
|
/(bit varying|varbit)/i
{
$return = { type => 'varbit' };
}
|
/character varying/i
{
$return = { type => 'varchar' };
}
|
/char(acter)?/i
{
$return = { type => 'char' };
}
|
/bool(ean)?/i
{
$return = { type => 'boolean' };
}
|
/bytea/i
{
$return = { type => 'bytea' };
}
|
/ ( timestamp (?:tz)? ) (?: \( \d \) )? ( \s with (?:out)? \s time \s zone )? /ix
{
$return = { type => 'timestamp' . ($2||'') };
}
|
/ ( time (?:tz)? ) (?: \( \d \) )? ( \s with (?:out)? \s time \s zone )? /ix
{
$return = { type => 'time' . ($2||'') };
}
|
/text/i
{
$return = {
type => 'text',
size => 64_000,
};
}
|
/(bit|box|cidr|circle|date|inet|line|lseg|macaddr|money|numeric|decimal|path|point|polygon|varchar|json|hstore|uuid)/i
{
$return = { type => $item[1] };
}
parens_value_list : '(' VALUE(s /,/) ')'
{ $item[2] }
parens_word_list : '(' NAME(s /,/) ')'
{ $item[2] }
field_size : '(' num_range ')' { $item{'num_range'} }
num_range : DIGITS ',' DIGITS
{ $return = $item[1].','.$item[3] }
| DIGITS
{ $return = $item[1] }
table_constraint : comment(s?) constraint_name(?) table_constraint_type deferrable(?) deferred(?) comment(s?)
{
my $desc = $item{'table_constraint_type'};
my $type = $desc->{'type'};
my $fields = $desc->{'fields'};
my $expression = $desc->{'expression'};
my @comments = ( @{ $item[1] }, @{ $item[-1] } );
my $expr_constraint = $type eq 'check' || $type eq 'exclude';
$return = {
name => $item[2][0] || '',
supertype => 'constraint',
type => $type,
fields => $expr_constraint ? [] : $fields,
expression => $expr_constraint ? $expression : '',
deferrable => $item{'deferrable'},
deferred => $item{'deferred'},
on_delete => $desc->{'on_delete'} || $desc->{'on_delete_do'},
on_update => $desc->{'on_update'} || $desc->{'on_update_do'},
comments => [ @comments ],
%{$desc}{qw/include using where reference_table reference_fields match_type/}
}
}
table_constraint_type : /primary key/i '(' NAME(s /,/) ')' include_covering(?)
{
$return = {
type => 'primary_key',
fields => $item[3],
include => $item{'include_convering(?)'}[0],
}
}
|
/unique/i '(' NAME(s /,/) ')' include_covering(?)
{
$return = {
type => 'unique',
fields => $item[3],
include => $item{'include_convering(?)'}[0],
}
}
|
/check/i '(' /[^)]+/ ')'
{
$return = {
type => 'check',
expression => $item[3],
}
}
|
/exclude/i using_method(?) '(' /[^)]+/ ')' include_covering(?) where_paren_predicate(?) {
$return = {
type => 'exclude',
expression => $item{__PATTERN2__},
using => $item{'using_method(?)'}[0],
include => $item{'include_convering(?)'}[0],
where => $item{'where_paren_predicate(?)'}[0],
}
}
|
/foreign key/i '(' NAME(s /,/) ')' /references/i table_id parens_word_list(?) match_type(?) key_action(s?)
{
my ( $on_delete, $on_update );
for my $action ( @{ $item[9] || [] } ) {
$on_delete = $action->{'action'} if $action->{'type'} eq 'delete';
$on_update = $action->{'action'} if $action->{'type'} eq 'update';
}
$return = {
supertype => 'constraint',
type => 'foreign_key',
fields => $item[3],
reference_table => $item[6]->{'table_name'},
reference_fields => $item[7][0],
match_type => $item[8][0],
on_delete => $on_delete || '',
on_update => $on_update || '',
}
}
deferrable : not(?) /deferrable/i
{
$return = ( $item[1] =~ /not/i ) ? 0 : 1;
}
deferred : /initially/i /(deferred|immediate)/i { $item[2] }
match_type : /match/i /partial|full|simple/i { $item[2] }
key_action : key_delete
|
key_update
key_delete : /on delete/i key_mutation
{
$return = {
type => 'delete',
action => $item[2],
};
}
key_update : /on update/i key_mutation
{
$return = {
type => 'update',
action => $item[2],
};
}
key_mutation : /no action/i { $return = 'no_action' }
|
/restrict/i { $return = 'restrict' }
|
/cascade/i { $return = 'cascade' }
|
/set null/i { $return = 'set null' }
|
/set default/i { $return = 'set default' }
alter : alter_table table_id add_column field ';'
{
my $field_def = $item[4];
$tables{ $item[2]->{'table_name'} }{'fields'}{ $field_def->{'name'} } = {
%$field_def, order => $field_order++
};
1;
}
alter : alter_table table_id ADD table_constraint ';'
{
my $table_name = $item[2]->{'table_name'};
my $constraint = $item[4];
push @{ $tables{ $table_name }{'constraints'} }, $constraint;
1;
}
alter : alter_table table_id drop_column NAME restrict_or_cascade(?) ';'
{
$tables{ $item[2]->{'table_name'} }{'fields'}{ $item[4] }{'drop'} = 1;
1;
}
alter : alter_table table_id alter_column NAME alter_default_val ';'
{
$tables{ $item[2]->{'table_name'} }{'fields'}{ $item[4] }{'default'} =
$item[5]->{'value'};
1;
}
#
# These will just parse for now but won't affect the structure. - ky
#
alter : alter_table table_id /rename/i /to/i NAME ';'
{ 1 }
alter : alter_table table_id alter_column NAME SET /statistics/i INTEGER ';'
{ 1 }
alter : alter_table table_id alter_column NAME SET /storage/i storage_type ';'
{ 1 }
alter : alter_table table_id rename_column NAME /to/i NAME ';'
{ 1 }
alter : alter_table table_id DROP /constraint/i NAME restrict_or_cascade ';'
{ 1 }
alter : alter_table table_id /owner/i /to/i NAME ';'
{ 1 }
alter : alter_sequence NAME /owned/i /by/i column_name ';'
{ 1 }
storage_type : /(plain|external|extended|main)/i
temporary : /temp(orary)?\b/i
{
1;
}
or_replace : /or replace/i
alter_default_val : SET default_val
{
$return = { value => $item[2]->{'value'} }
}
| DROP DEFAULT
{
$return = { value => undef }
}
#
# This is a little tricky to get right, at least WRT to making the
# tests pass. The problem is that the constraints are stored just as
# a list (no name access), and the tests expect the constraints in a
# particular order. I'm going to leave the rule but disable the code
# for now. - ky
#
alter : alter_table table_id alter_column NAME alter_nullable ';'
{
# my $table_name = $item[2]->{'table_name'};
# my $field_name = $item[4];
# my $is_nullable = $item[5]->{'is_nullable'};
#
# $tables{ $table_name }{'fields'}{ $field_name }{'is_nullable'} =
# $is_nullable;
#
# if ( $is_nullable ) {
# 1;
# push @{ $tables{ $table_name }{'constraints'} }, {
# type => 'not_null',
# fields => [ $field_name ],
# };
# }
# else {
# for my $i (
# 0 .. $#{ $tables{ $table_name }{'constraints'} || [] }
# ) {
# my $c = $tables{ $table_name }{'constraints'}[ $i ] or next;
# my $fields = join( '', @{ $c->{'fields'} || [] } ) or next;
# if ( $c->{'type'} eq 'not_null' && $fields eq $field_name ) {
# delete $tables{ $table_name }{'constraints'}[ $i ];
# last;
# }
# }
# }
1;
}
alter_nullable : SET not_null
{
$return = { is_nullable => 0 }
}
| DROP not_null
{
$return = { is_nullable => 1 }
}
not_null : /not/i /null/i
not : /not/i
add_column : ADD COLUMN(?)
alter_table : ALTER TABLE ONLY(?)
alter_sequence : ALTER SEQUENCE
drop_column : DROP COLUMN(?)
alter_column : ALTER COLUMN(?)
rename_column : /rename/i COLUMN(?)
restrict_or_cascade : /restrict/i |
/cascade/i
# Handle functions that can be called
select : SELECT select_function ';'
{ 1 }
# Read the setval function but don't do anything with it because this parser
# isn't handling sequences
select_function : schema_qualification(?) /setval/i '(' VALUE /,/ VALUE /,/ /(true|false)/i ')'
{ 1 }
# Skipping all COPY commands
copy : COPY WORD /[^;]+/ ';' { 1 }
{ 1 }
# The "\." allows reading in from STDIN but this isn't needed for schema
# creation, so it is skipped.
readin_symbol : '\.'
{1}
#
# End basically useless stuff. - ky
#
create_table : CREATE TABLE
create_index : CREATE /index/i
default_val : DEFAULT DEFAULT_VALUE ( '::' data_type )(?)
{
my $val = $item[2];
$val =~ s/^\((\d+)\)\z/$1/; # for example (0)::smallint
$return = {
supertype => 'constraint',
type => 'default',
value => $val,
}
}
| /null/i
{
$return = {
supertype => 'constraint',
type => 'default',
value => 'NULL',
}
}
DEFAULT_VALUE : VALUE
| /\w+\(.*\)/
| /\w+/
| /\(\d+\)/
name_with_opt_paren : NAME parens_value_list(s?)
{ $item[2][0] ? "$item[1]($item[2][0][0])" : $item[1] }
unique : /unique/i { 1 }
key : /key/i | /index/i
table_option : /inherits/i '(' NAME(s /,/) ')'
{
$return = { type => 'inherits', table_name => $item[3] }
}
|
/with(out)? oids/i
{
$return = { type => $item[1] =~ /out/i ? 'without_oids' : 'with_oids' }
}
ADD : /add/i
ALTER : /alter/i
CREATE : /create/i
ONLY : /only/i
DEFAULT : /default/i
DROP : /drop/i
COLUMN : /column/i
TABLE : /table/i
VIEW : /view/i
SCHEMA : /schema/i
SEMICOLON : /\s*;\n?/
SEQUENCE : /sequence/i
SELECT : /select/i
COPY : /copy/i
INTEGER : /\d+/
WORD : /\w+/
DIGITS : /\d+/
COMMA : ','
SET : /set/i
NAME : DQSTRING
| /\w+/
DQSTRING : '"' /((?:[^"]|"")+)/ '"'
{ ($return = $item[3]) =~ s/""/"/g; }
SQSTRING : "'" /((?:[^']|'')*)/ "'"
{ ($return = $item[3]) =~ s/''/'/g }
DOLLARSTRING : /\$[^\$]*\$/ /.*?(?=\Q$item[1]\E)/s "$item[1]"
{ $return = $item[3]; }
VALUE : /[-+]?\d*\.?\d+(?:[eE]\d+)?/
| SQSTRING
| DOLLARSTRING
| /null/i
{ 'NULL' }
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
local $::RD_ERRORS = 1
unless defined $::RD_ERRORS; # Make sure the parser dies when it encounters an error
local $::RD_WARN = 1
unless defined $::RD_WARN; # Enable warnings. This will warn on unused rules &c.
local $::RD_HINT = 1
unless defined $::RD_HINT; # Give out hints to help fix problems.
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('PostgreSQL');
my $result = $parser->startrule($data);
die "Parse failed.\n" unless defined $result;
warn Dumper($result) if $DEBUG;
my $schema = $translator->schema;
my @tables = sort { ($result->{tables}{$a}{'order'} || 0) <=> ($result->{tables}{$b}{'order'} || 0) }
keys %{ $result->{tables} };
for my $table_name (@tables) {
my $tdata = $result->{tables}{$table_name};
my $table = $schema->add_table(
#schema => $tdata->{'schema_name'},
name => $tdata->{'table_name'},
) or die "Couldn't create table '$table_name': " . $schema->error;
$table->extra(temporary => 1) if $tdata->{'temporary'};
$table->comments($tdata->{'comments'});
my @fields
= sort { $tdata->{'fields'}{$a}{'order'} <=> $tdata->{'fields'}{$b}{'order'} } keys %{ $tdata->{'fields'} };
for my $fname (@fields) {
my $fdata = $tdata->{'fields'}{$fname};
next if $fdata->{'drop'};
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_increment'},
is_nullable => $fdata->{'is_nullable'},
comments => $fdata->{'comments'},
) or die $table->error;
$table->primary_key($field->name) if $fdata->{'is_primary_key'};
for my $cdata (@{ $fdata->{'constraints'} }) {
next unless $cdata->{'type'} eq 'foreign_key';
$cdata->{'fields'} ||= [ $field->name ];
push @{ $tdata->{'constraints'} }, $cdata;
}
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
my @options = ();
push @options, { using => $idata->{'method'} } if $idata->{method};
push @options, { where => $idata->{'where'} } if $idata->{where};
push @options, { include => $idata->{'include'} }
if $idata->{include};
my $index = $table->add_index(
name => $idata->{'name'},
type => uc $idata->{'type'},
fields => $idata->{'fields'},
options => \@options
) or die $table->error . ' ' . $table->name;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
my $options = [
# load this up with the extras
map +{ %$cdata{$_} }, grep $cdata->{$_},
qw/include using where/
];
my $constraint = $table->add_constraint(
name => $cdata->{'name'},
type => $cdata->{'type'},
fields => $cdata->{'fields'},
reference_table => $cdata->{'reference_table'},
reference_fields => $cdata->{'reference_fields'},
match_type => $cdata->{'match_type'} || '',
on_delete => $cdata->{'on_delete'} || $cdata->{'on_delete_do'},
on_update => $cdata->{'on_update'} || $cdata->{'on_update_do'},
expression => $cdata->{'expression'},
options => $options
)
or die "Can't add constraint of type '"
. $cdata->{'type'}
. "' to table '"
. $table->name . "': "
. $table->error;
}
}
for my $vinfo (@{ $result->{views} }) {
my $sql = $vinfo->{sql};
$sql =~ s/\A\s+|\s+\z//g;
my $view = $schema->add_view(
name => $vinfo->{view_name},
sql => $sql,
fields => $vinfo->{fields},
);
$view->extra(temporary => 1) if $vinfo->{is_temporary};
}
for my $trigger (@{ $result->{triggers} }) {
$schema->add_trigger(%$trigger);
}
return 1;
}
1;
# -------------------------------------------------------------------
# Rescue the drowning and tie your shoestrings.
# Henry David Thoreau
# -------------------------------------------------------------------
=pod
=head1 AUTHORS
Ken Y. Clark Ekclark@cpan.orgE,
Allen Day Eallenday@ucla.eduE.
=head1 SEE ALSO
perl(1), Parse::RecDescent.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/YAML.pm 0000644 0000000 0000000 00000006374 14551163724 022145 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::YAML;
use strict;
use warnings;
our $VERSION = '1.65';
use SQL::Translator::Schema;
use SQL::Translator::Utils qw(header_comment);
use Data::Dumper;
use YAML qw(Load);
sub parse {
my ($translator, $data) = @_;
$data = Load($data);
$data = $data->{'schema'};
warn "YAML data:", Dumper($data) if $translator->debug;
my $schema = $translator->schema;
#
# Tables
#
my @tables = map { $data->{'tables'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'tables'}{$_}{'order'} || 0, $_ ] }
keys %{ $data->{'tables'} };
for my $tdata (@tables) {
my $table = $schema->add_table(map { $tdata->{$_} ? ($_ => $tdata->{$_}) : () } (qw/name extra options/))
or die $schema->error;
my @fields = map { $tdata->{'fields'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $tdata->{'fields'}{$_}{'order'}, $_ ] }
keys %{ $tdata->{'fields'} };
for my $fdata (@fields) {
$table->add_field(%$fdata) or die $table->error;
$table->primary_key($fdata->{'name'})
if $fdata->{'is_primary_key'};
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
$table->add_index(%$idata) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
$table->add_constraint(%$cdata) or die $table->error;
}
$table->comments($tdata->{'comments'})
if exists $tdata->{'comments'};
}
#
# Views
#
my @views = map { $data->{'views'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'views'}{$_}{'order'}, $_ ] }
keys %{ $data->{'views'} };
for my $vdata (@views) {
$schema->add_view(%$vdata) or die $schema->error;
}
#
# Triggers
#
my @triggers = map { $data->{'triggers'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'triggers'}{$_}{'order'}, $_ ] }
keys %{ $data->{'triggers'} };
for my $tdata (@triggers) {
$schema->add_trigger(%$tdata) or die $schema->error;
}
#
# Procedures
#
my @procedures = map { $data->{'procedures'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'procedures'}{$_}{'order'}, $_ ] }
keys %{ $data->{'procedures'} };
for my $tdata (@procedures) {
$schema->add_procedure(%$tdata) or die $schema->error;
}
if (my $tr_data = $data->{'translator'}) {
$translator->add_drop_table($tr_data->{'add_drop_table'});
$translator->filename($tr_data->{'filename'});
$translator->no_comments($tr_data->{'no_comments'});
$translator->parser_args($tr_data->{'parser_args'});
$translator->producer_args($tr_data->{'producer_args'});
$translator->parser_type($tr_data->{'parser_type'});
$translator->producer_type($tr_data->{'producer_type'});
$translator->show_warnings($tr_data->{'show_warnings'});
$translator->trace($tr_data->{'trace'});
}
return 1;
}
1;
__END__
=head1 NAME
SQL::Translator::Parser::YAML - Parse a YAML representation of a schema
=head1 SYNOPSIS
use SQL::Translator;
my $translator = SQL::Translator->new(parser => "YAML");
=head1 DESCRIPTION
C parses a schema serialized with YAML.
=head1 AUTHORS
Darren Chamberlain Edarren@cpan.orgE,
Ken Y. Clark Ekclark@cpan.orgE.
SQL-Translator-1.65/lib/SQL/Translator/Parser/XML/ 0000755 0000000 0000000 00000000000 14551164244 021471 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Parser/XML/SQLFairy.pm 0000644 0000000 0000000 00000021726 14551163724 023473 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::XML::SQLFairy;
=head1 NAME
SQL::Translator::Parser::XML::SQLFairy - parser for SQL::Translator's XML.
=head1 SYNOPSIS
use SQL::Translator;
my $translator = SQL::Translator->new( show_warnings => 1 );
my $out = $obj->translate(
from => 'XML-SQLFairy',
to => 'MySQL',
filename => 'schema.xml',
) or die $translator->error;
print $out;
=head1 DESCRIPTION
This parser handles the flavor of XML used natively by the SQLFairy
project (L). The XML must be in the XML namespace
C.
See L for details of this format.
You do not need to specify every attribute of the Schema objects as any missing
from the XML will be set to their default values. e.g. A field could be written
using only;
Instead of the full;
If you do not explicitly set the order of items using order attributes on the
tags then the order the tags appear in the XML will be used.
=head2 default_value
Leave the attribute out all together to use the default in
L. Use empty quotes or 'EMPTY_STRING'
for a zero length string. 'NULL' for an explicit null (currently sets
default_value to undef in the field object).
=head2 ARGS
Doesn't take any extra parser args at the moment.
=head1 LEGACY FORMAT
The previous version of the SQLFairy XML allowed the attributes of the
schema objects to be written as either xml attributes or as data elements, in
any combination. While this allows for lots of flexibility in writing the XML
the result is a great many possible XML formats, not so good for DTD writing,
XPathing etc! So we have moved to a fixed version described in
L.
This version of the parser will still parse the old formats and emit warnings
when it sees them being used but they should be considered B.
To convert your old format files simply pass them through the translator :)
$ sqlt -f XML-SQLFairy -t XML-SQLFairy schema-old.xml > schema-new.xml
=cut
use strict;
use warnings;
our ($DEBUG, @EXPORT_OK);
our $VERSION = '1.65';
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use Carp::Clan qw/^SQL::Translator/;
use Exporter;
use base qw(Exporter);
@EXPORT_OK = qw(parse);
use base qw/SQL::Translator::Parser/; # Doesnt do anything at the mo!
use SQL::Translator::Utils 'debug';
use XML::LibXML;
use XML::LibXML::XPathContext;
sub parse {
my ($translator, $data) = @_;
my $schema = $translator->schema;
local $DEBUG = $translator->debug;
my $doc = XML::LibXML->new->parse_string($data);
my $xp = XML::LibXML::XPathContext->new($doc);
$xp->registerNs("sqlf", "http://sqlfairy.sourceforge.net/sqlfairy.xml");
#
# Work our way through the tables
#
my @nodes = $xp->findnodes('/sqlf:schema/sqlf:table|/sqlf:schema/sqlf:tables/sqlf:table');
for my $tblnode (
sort {
("" . $xp->findvalue('sqlf:order|@order', $a) || 0) <=> ("" . $xp->findvalue('sqlf:order|@order', $b) || 0)
} @nodes
) {
debug "Adding table:" . $xp->findvalue('sqlf:name', $tblnode);
my $table = $schema->add_table(get_tagfields($xp, $tblnode, "sqlf:" => qw/name order extra/))
or die $schema->error;
#
# Fields
#
my @nodes = $xp->findnodes('sqlf:fields/sqlf:field', $tblnode);
foreach (sort { ("" . $xp->findvalue('sqlf:order', $a) || 0) <=> ("" . $xp->findvalue('sqlf:order', $b) || 0) }
@nodes) {
my %fdata = get_tagfields(
$xp, $_, "sqlf:",
qw/name data_type size default_value is_nullable extra
is_auto_increment is_primary_key is_foreign_key comments/
);
if (exists $fdata{'default_value'}
and defined $fdata{'default_value'}) {
if ($fdata{'default_value'} =~ /^\s*NULL\s*$/) {
$fdata{'default_value'} = undef;
} elsif ($fdata{'default_value'} =~ /^\s*EMPTY_STRING\s*$/) {
$fdata{'default_value'} = "";
}
}
my $field = $table->add_field(%fdata) or die $table->error;
$table->primary_key($field->name) if $fdata{'is_primary_key'};
#
# TODO:
# - We should be able to make the table obj spot this when
# we use add_field.
#
}
#
# Constraints
#
@nodes = $xp->findnodes('sqlf:constraints/sqlf:constraint', $tblnode);
foreach (@nodes) {
my %data = get_tagfields(
$xp, $_, "sqlf:",
qw/name type table fields reference_fields reference_table
match_type on_delete on_update extra/
);
$table->add_constraint(%data) or die $table->error;
}
#
# Indexes
#
@nodes = $xp->findnodes('sqlf:indices/sqlf:index', $tblnode);
foreach (@nodes) {
my %data = get_tagfields($xp, $_, "sqlf:", qw/name type fields options extra/);
$table->add_index(%data) or die $table->error;
}
#
# Comments
#
@nodes = $xp->findnodes('sqlf:comments/sqlf:comment', $tblnode);
foreach (@nodes) {
my $data = $_->string_value;
$table->comments($data);
}
} # tables loop
#
# Views
#
@nodes = $xp->findnodes('/sqlf:schema/sqlf:view|/sqlf:schema/sqlf:views/sqlf:view');
foreach (@nodes) {
my %data = get_tagfields($xp, $_, "sqlf:", qw/name sql fields order extra/);
$schema->add_view(%data) or die $schema->error;
}
#
# Triggers
#
@nodes = $xp->findnodes('/sqlf:schema/sqlf:trigger|/sqlf:schema/sqlf:triggers/sqlf:trigger');
foreach (@nodes) {
my %data = get_tagfields(
$xp, $_, "sqlf:", qw/
name perform_action_when database_event database_events fields
on_table action order extra scope
/
);
# back compat
if (my $evt = $data{database_event} and $translator->{show_warnings}) {
carp 'The database_event tag is deprecated - please use '
. 'database_events (which can take one or more comma separated '
. 'event names)';
$data{database_events} = join(', ', $data{database_events} || (), $evt,);
}
# split into arrayref
if (my $evts = $data{database_events}) {
$data{database_events} = [ split(/\s*,\s*/, $evts) ];
}
$schema->add_trigger(%data) or die $schema->error;
}
#
# Procedures
#
@nodes = $xp->findnodes('/sqlf:schema/sqlf:procedure|/sqlf:schema/sqlf:procedures/sqlf:procedure');
foreach (@nodes) {
my %data = get_tagfields($xp, $_, "sqlf:", qw/name sql parameters owner comments order extra/);
$schema->add_procedure(%data) or die $schema->error;
}
return 1;
}
sub get_tagfields {
#
# get_tagfields XP, NODE, NAMESPACE => qw/TAGNAMES/;
# get_tagfields $node, "sqlf:" => qw/name type fields reference/;
#
# Returns hash of data.
# TODO - Add handling of an explicit NULL value.
#
my ($xp, $node, @names) = @_;
my (%data, $ns);
foreach (@names) {
if (m/:$/) { $ns = $_; next; } # Set def namespace
my $thisns = (s/(^.*?:)// ? $1 : $ns);
my $is_attrib = m/^(sql|comments|action|extra)$/ ? 0 : 1;
my $attrib_path = "\@$_";
my $tag_path = "$thisns$_";
if (my $found = $xp->find($attrib_path, $node)) {
$data{$_} = "" . $found->to_literal;
warn "Use of '$_' as an attribute is depricated."
. " Use a child tag instead."
. " To convert your file to the new version see the Docs.\n"
unless $is_attrib;
debug "Got $_=" . (defined $data{$_} ? $data{$_} : 'UNDEF');
} elsif ($found = $xp->find($tag_path, $node)) {
if ($_ eq "extra") {
my %extra;
foreach ($found->pop->getAttributes) {
$extra{ $_->getName } = $_->getData;
}
$data{$_} = \%extra;
} else {
$data{$_} = "" . $found->to_literal;
}
warn "Use of '$_' as a child tag is depricated."
. " Use an attribute instead."
. " To convert your file to the new version see the Docs.\n"
if $is_attrib;
debug "Got $_=" . (defined $data{$_} ? $data{$_} : 'UNDEF');
}
}
return wantarray ? %data : \%data;
}
1;
=pod
=head1 BUGS
Ignores the order attribute for Constraints, Views, Indices, Views, Triggers
and Procedures, using the tag order instead. (This is the order output by the
SQLFairy XML producer).
=head1 SEE ALSO
L, L, L,
L.
=head1 TODO
=over 4
=item *
Support options attribute.
=item *
Test foreign keys are parsed ok.
=item *
Control over defaulting.
=back
=head1 AUTHOR
Mark D. Addison Emark.addison@itn.co.ukE,
Jonathan Yu Efrequency@cpan.orgE
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/DBI.pm 0000644 0000000 0000000 00000007675 14551163724 022006 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::DBI;
=head1 NAME
SQL::Translator::Parser::DBI - "parser" for DBI handles
=head1 SYNOPSIS
use DBI;
use SQL::Translator;
my $dbh = DBI->connect('dsn', 'user', 'pass',
{
RaiseError => 1,
FetchHashKeyName => 'NAME_lc',
}
);
my $translator = SQL::Translator->new(
parser => 'DBI',
parser_args => {
dbh => $dbh,
},
);
Or:
use SQL::Translator;
my $translator = SQL::Translator->new(
parser => 'DBI',
parser_args => {
dsn => 'dbi:mysql:FOO',
db_user => 'guest',
db_password => 'password',
}
);
=head1 DESCRIPTION
This parser accepts an open database handle (or the arguments to create
one) and queries the database directly for the information.
The following are acceptable arguments:
=over 4
=item * dbh
An open DBI database handle. NB: Be sure to create the database with the
"FetchHashKeyName => 'NAME_lc'" option as all the DBI parsers expect
lowercased column names.
=item * dsn
The DSN to use for connecting to a database.
=item * db_user
The user name to use for connecting to a database.
=item * db_password
The password to use for connecting to a database.
=back
There is no need to specify which type of database you are querying as
this is determined automatically by inspecting $dbh->{'Driver'}{'Name'}.
If a parser exists for your database, it will be used automatically;
if not, the code will fail automatically (and you can write the parser
and contribute it to the project!).
Currently parsers exist for the following databases:
=over 4
=item * MySQL
=item * SQLite
=item * Sybase
=item * PostgreSQL (still experimental)
=back
Most of these parsers are able to query the database directly for the
structure rather than parsing a text file. For large schemas, this is
probably orders of magnitude faster than traditional parsing (which
uses Parse::RecDescent, an amazing module but really quite slow).
Though no Oracle parser currently exists, it would be fairly easy to
query an Oracle database directly by using DDL::Oracle to generate a
DDL for the schema and then using the normal Oracle parser on this.
Perhaps future versions of SQL::Translator will include the ability to
query Oracle directly and skip the parsing of a text file, too.
=cut
use strict;
use warnings;
use DBI;
our @EXPORT;
our $VERSION = '1.65';
use constant DRIVERS => {
mysql => 'MySQL',
odbc => 'SQLServer',
oracle => 'Oracle',
pg => 'PostgreSQL',
sqlite => 'SQLite',
sybase => 'Sybase',
db2 => 'DB2',
};
use Exporter;
use SQL::Translator::Utils qw(debug);
use base qw(Exporter);
@EXPORT = qw(parse);
#
# Passed a SQL::Translator instance and a string containing the data
#
sub parse {
my ($tr, $data) = @_;
my $args = $tr->parser_args;
my $dbh = $args->{'dbh'};
my $dsn = $args->{'dsn'};
my $db_user = $args->{'db_user'};
my $db_password = $args->{'db_password'};
my $dbh_is_local;
unless ($dbh) {
die 'No DSN' unless $dsn;
$dbh = DBI->connect(
$dsn, $db_user,
$db_password,
{
FetchHashKeyName => 'NAME_lc',
LongReadLen => 3000,
LongTruncOk => 1,
RaiseError => 1,
}
);
$dbh_is_local = 1;
}
die 'No database handle' unless defined $dbh;
my $db_type = $dbh->{'Driver'}{'Name'} or die 'Cannot determine DBI type';
my $driver = DRIVERS->{ lc $db_type } or die "$db_type not supported";
my $pkg = "SQL::Translator::Parser::DBI::$driver";
my $sub = $pkg . '::parse';
SQL::Translator::load($pkg);
my $s = eval {
no strict 'refs';
&{$sub}($tr, $dbh) or die "No result from $pkg";
};
my $err = $@;
eval { $dbh->disconnect } if (defined $dbh and $dbh_is_local);
die $err if $err;
return $s;
}
1;
=pod
=head1 AUTHOR
Ken Y. Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
DBI, SQL::Translator.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/Sybase.pm 0000644 0000000 0000000 00000023725 14551163724 022630 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::Sybase;
=head1 NAME
SQL::Translator::Parser::Sybase - parser for Sybase
=head1 SYNOPSIS
use SQL::Translator::Parser::Sybase;
=head1 DESCRIPTION
Mostly parses the output of "dbschema.pl," a Perl script freely
available from http://www.midsomer.org. The parsing is not complete,
however, and you would probably have much better luck using the
DBI-Sybase parser included with SQL::Translator.
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use SQL::Translator::Utils qw/ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our $GRAMMAR = <<'END_OF_GRAMMAR';
{
my ( %tables, @table_comments, $table_order );
}
startrule : statement(s) eofile { \%tables }
eofile : /^\Z/
statement : create_table
| create_procedure
| create_index
| create_constraint
| comment
| use
| setuser
| if
| print
| grant
| exec
|
use : /use/i WORD GO
{ @table_comments = () }
setuser : /setuser/i NAME GO
if : /if/i object_not_null begin if_command end GO
if_command : grant
| create_index
| create_constraint
object_not_null : /object_id/i '(' ident ')' /is not null/i
print : /\s*/ /print/i /.*/
else : /else/i /.*/
begin : /begin/i
end : /end/i
grant : /grant/i /[^\n]*/
exec : exec_statement(s) GO
exec_statement : /exec/i /[^\n]+/
comment : comment_start comment_middle comment_end
{
my $comment = $item[2];
$comment =~ s/^\s*|\s*$//mg;
$comment =~ s/^\**\s*//mg;
push @table_comments, $comment;
}
comment_start : /^\s*\/\*/
comment_end : /\s*\*\//
comment_middle : m{([^*]+|\*(?!/))*}
#
# Create table.
#
create_table : /create/i /table/i ident '(' create_def(s /,/) ')' lock(?) on_system(?) GO
{
my $table_owner = $item[3]{'owner'};
my $table_name = $item[3]{'name'};
if ( @table_comments ) {
$tables{ $table_name }{'comments'} = [ @table_comments ];
@table_comments = ();
}
$tables{ $table_name }{'order'} = ++$table_order;
$tables{ $table_name }{'name'} = $table_name;
$tables{ $table_name }{'owner'} = $table_owner;
$tables{ $table_name }{'system'} = $item[7];
my $i = 0;
for my $def ( @{ $item[5] } ) {
if ( $def->{'supertype'} eq 'field' ) {
my $field_name = $def->{'name'};
$tables{ $table_name }{'fields'}{ $field_name } =
{ %$def, order => $i };
$i++;
if ( $def->{'is_primary_key'} ) {
push @{ $tables{ $table_name }{'constraints'} }, {
type => 'primary_key',
fields => [ $field_name ],
};
}
}
elsif ( $def->{'supertype'} eq 'constraint' ) {
push @{ $tables{ $table_name }{'constraints'} }, $def;
}
else {
push @{ $tables{ $table_name }{'indices'} }, $def;
}
}
}
create_constraint : /create/i constraint
{
@table_comments = ();
push @{ $tables{ $item[2]{'table'} }{'constraints'} }, $item[2];
}
create_index : /create/i index
{
@table_comments = ();
push @{ $tables{ $item[2]{'table'} }{'indices'} }, $item[2];
}
create_procedure : /create/i /procedure/i procedure_body GO
{
@table_comments = ();
}
procedure_body : not_go(s)
not_go : /((?!go).)*/
create_def : field
| index
| constraint
blank : /\s*/
field : field_name data_type nullable(?)
{
$return = {
supertype => 'field',
name => $item{'field_name'},
data_type => $item{'data_type'}{'type'},
size => $item{'data_type'}{'size'},
nullable => $item[3][0],
# default => $item{'default_val'}[0],
# is_auto_inc => $item{'auto_inc'}[0],
# is_primary_key => $item{'primary_key'}[0],
}
}
constraint : primary_key_constraint
| unique_constraint
field_name : WORD
index_name : WORD
table_name : WORD
data_type : WORD field_size(?)
{
$return = {
type => $item[1],
size => $item[2][0]
}
}
lock : /lock/i /datarows/i
field_type : WORD
field_size : '(' num_range ')' { $item{'num_range'} }
num_range : DIGITS ',' DIGITS
{ $return = $item[1].','.$item[3] }
| DIGITS
{ $return = $item[1] }
nullable : /not/i /null/i
{ $return = 0 }
| /null/i
{ $return = 1 }
default_val : /default/i /(?:')?[\w\d.-]*(?:')?/
{ $item[2]=~s/'//g; $return=$item[2] }
auto_inc : /auto_increment/i { 1 }
primary_key_constraint : /primary/i /key/i index_name(?) parens_field_list
{
$return = {
supertype => 'constraint',
name => $item{'index_name'}[0],
type => 'primary_key',
fields => $item[4],
}
}
unique_constraint : /unique/i clustered(?) INDEX(?) index_name(?) on_table(?) parens_field_list
{
$return = {
supertype => 'constraint',
type => 'unique',
clustered => $item[2][0],
name => $item[4][0],
table => $item[5][0],
fields => $item[6],
}
}
clustered : /clustered/i
{ $return = 1 }
| /nonclustered/i
{ $return = 0 }
INDEX : /index/i
on_table : /on/i table_name
{ $return = $item[2] }
on_system : /on/i /system/i
{ $return = 1 }
index : clustered(?) INDEX index_name(?) on_table(?) parens_field_list
{
$return = {
supertype => 'index',
type => 'normal',
clustered => $item[1][0],
name => $item[3][0],
table => $item[4][0],
fields => $item[5],
}
}
parens_field_list : '(' field_name(s /,/) ')'
{ $item[2] }
ident : QUOTE(?) WORD '.' WORD QUOTE(?)
{ $return = { owner => $item[2], name => $item[4] } }
| WORD
{ $return = { name => $item[2] } }
GO : /^go/i
NAME : QUOTE(?) /\w+/ QUOTE(?)
{ $item[2] }
WORD : /[\w#]+/
DIGITS : /\d+/
COMMA : ','
QUOTE : /'/
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
local $::RD_ERRORS = 1
unless defined $::RD_ERRORS; # Make sure the parser dies when it encounters an error
local $::RD_WARN = 1
unless defined $::RD_WARN; # Enable warnings. This will warn on unused rules &c.
local $::RD_HINT = 1
unless defined $::RD_HINT; # Give out hints to help fix problems.
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('Sybase');
my $result = $parser->startrule($data);
return $translator->error("Parse failed.") unless defined $result;
warn Dumper($result) if $DEBUG;
my $schema = $translator->schema;
my @tables = sort { $result->{$a}->{'order'} <=> $result->{$b}->{'order'} }
keys %{$result};
for my $table_name (@tables) {
my $tdata = $result->{$table_name};
my $table = $schema->add_table(name => $tdata->{'name'})
or die "Can't create table '$table_name': ", $schema->error;
$table->comments($tdata->{'comments'});
my @fields = sort { $tdata->{'fields'}->{$a}->{'order'} <=> $tdata->{'fields'}->{$b}->{'order'} }
keys %{ $tdata->{'fields'} };
for my $fname (@fields) {
my $fdata = $tdata->{'fields'}{$fname};
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_inc'},
is_nullable => $fdata->{'nullable'},
comments => $fdata->{'comments'},
) or die $table->error;
$table->primary_key($field->name) if $fdata->{'is_primary_key'};
for my $qual (qw[ binary unsigned zerofill list ]) {
if (my $val = $fdata->{$qual} || $fdata->{ uc $qual }) {
next if ref $val eq 'ARRAY' && !@$val;
$field->extra($qual, $val);
}
}
if ($field->data_type =~ /(set|enum)/i && !$field->size) {
my %extra = $field->extra;
my $longest = 0;
for my $len (map {length} @{ $extra{'list'} || [] }) {
$longest = $len if $len > $longest;
}
$field->size($longest) if $longest;
}
for my $cdata (@{ $fdata->{'constraints'} }) {
next unless $cdata->{'type'} eq 'foreign_key';
$cdata->{'fields'} ||= [ $field->name ];
push @{ $tdata->{'constraints'} }, $cdata;
}
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
my $index = $table->add_index(
name => $idata->{'name'},
type => uc $idata->{'type'},
fields => $idata->{'fields'},
) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
my $constraint = $table->add_constraint(
name => $cdata->{'name'},
type => $cdata->{'type'},
fields => $cdata->{'fields'},
reference_table => $cdata->{'reference_table'},
reference_fields => $cdata->{'reference_fields'},
match_type => $cdata->{'match_type'} || '',
on_delete => $cdata->{'on_delete'} || $cdata->{'on_delete_do'},
on_update => $cdata->{'on_update'} || $cdata->{'on_update_do'},
) or die $table->error;
}
}
return 1;
}
1;
# -------------------------------------------------------------------
# Every hero becomes a bore at last.
# Ralph Waldo Emerson
# -------------------------------------------------------------------
=pod
=head1 AUTHOR
Ken Y. Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
SQL::Translator, SQL::Translator::Parser::DBI, L.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/SQLite.pm 0000644 0000000 0000000 00000044721 14551163724 022542 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::SQLite;
=head1 NAME
SQL::Translator::Parser::SQLite - parser for SQLite
=head1 SYNOPSIS
use SQL::Translator;
use SQL::Translator::Parser::SQLite;
my $translator = SQL::Translator->new;
$translator->parser("SQL::Translator::Parser::SQLite");
=head1 DESCRIPTION
This is a grammar for parsing CREATE statements for SQLite as
described here:
http://www.sqlite.org/lang.html
CREATE INDEX
sql-statement ::=
CREATE [TEMP | TEMPORARY] [UNIQUE] INDEX index-name
ON [database-name .] table-name ( column-name [, column-name]* )
[ ON CONFLICT conflict-algorithm ]
column-name ::=
name [ ASC | DESC ]
CREATE TABLE
sql-command ::=
CREATE [TEMP | TEMPORARY] TABLE table-name (
column-def [, column-def]*
[, constraint]*
)
sql-command ::=
CREATE [TEMP | TEMPORARY] TABLE table-name AS select-statement
column-def ::=
name [type] [[CONSTRAINT name] column-constraint]*
type ::=
typename |
typename ( number ) |
typename ( number , number )
column-constraint ::=
NOT NULL [ conflict-clause ] |
PRIMARY KEY [sort-order] [ conflict-clause ] |
UNIQUE [ conflict-clause ] |
CHECK ( expr ) [ conflict-clause ] |
DEFAULT value
constraint ::=
PRIMARY KEY ( name [, name]* ) [ conflict-clause ]|
UNIQUE ( name [, name]* ) [ conflict-clause ] |
CHECK ( expr ) [ conflict-clause ]
conflict-clause ::=
ON CONFLICT conflict-algorithm
CREATE TRIGGER
sql-statement ::=
CREATE [TEMP | TEMPORARY] TRIGGER trigger-name [ BEFORE | AFTER ]
database-event ON [database-name .] table-name
trigger-action
sql-statement ::=
CREATE [TEMP | TEMPORARY] TRIGGER trigger-name INSTEAD OF
database-event ON [database-name .] view-name
trigger-action
database-event ::=
DELETE |
INSERT |
UPDATE |
UPDATE OF column-list
trigger-action ::=
[ FOR EACH ROW | FOR EACH STATEMENT ] [ WHEN expression ]
BEGIN
trigger-step ; [ trigger-step ; ]*
END
trigger-step ::=
update-statement | insert-statement |
delete-statement | select-statement
CREATE VIEW
sql-command ::=
CREATE [TEMP | TEMPORARY] VIEW view-name AS select-statement
ON CONFLICT clause
conflict-clause ::=
ON CONFLICT conflict-algorithm
conflict-algorithm ::=
ROLLBACK | ABORT | FAIL | IGNORE | REPLACE
expression
expr ::=
expr binary-op expr |
expr like-op expr |
unary-op expr |
( expr ) |
column-name |
table-name . column-name |
database-name . table-name . column-name |
literal-value |
function-name ( expr-list | * ) |
expr (+) |
expr ISNULL |
expr NOTNULL |
expr [NOT] BETWEEN expr AND expr |
expr [NOT] IN ( value-list ) |
expr [NOT] IN ( select-statement ) |
( select-statement ) |
CASE [expr] ( WHEN expr THEN expr )+ [ELSE expr] END
like-op::=
LIKE | GLOB | NOT LIKE | NOT GLOB
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use SQL::Translator::Utils qw/ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our $GRAMMAR = <<'END_OF_GRAMMAR';
{
my ( %tables, $table_order, @views, @triggers );
sub _err {
my $max_lines = 5;
my @up_to_N_lines = split (/\n/, $_[1], $max_lines + 1);
die sprintf ("Unable to parse line %d:\n%s\n",
$_[0],
join "\n", (map { "'$_'" } @up_to_N_lines[0..$max_lines - 1 ]), @up_to_N_lines > $max_lines ? '...' : ()
);
}
}
#
# The "eofile" rule makes the parser fail if any "statement" rule
# fails. Otherwise, the first successful match by a "statement"
# won't cause the failure needed to know that the parse, as a whole,
# failed. -ky
#
startrule : statement(s) eofile {
$return = {
tables => \%tables,
views => \@views,
triggers => \@triggers,
}
}
eofile : /^\Z/
statement : begin_transaction
| commit
| drop
| create
| comment
| /^\Z/ | { _err ($thisline, $text) }
begin_transaction : /begin/i TRANSACTION(?) SEMICOLON
commit : /commit/i SEMICOLON
drop : /drop/i (tbl_drop | view_drop | trg_drop) SEMICOLON
tbl_drop: TABLE table_name
view_drop: VIEW if_exists(?) view_name
trg_drop: TRIGGER if_exists(?) trigger_name
comment : /^\s*(?:#|-{2}).*\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|-{2})\s*//;
$comment =~ s/\s*$//;
$return = $comment;
}
comment : /\/\*/ /[^\*]+/ /\*\//
{
my $comment = $item[2];
$comment =~ s/^\s*|\s*$//g;
$return = $comment;
}
#
# Create Index
#
create : CREATE TEMPORARY(?) UNIQUE(?) INDEX NAME ON table_name parens_field_list conflict_clause(?) SEMICOLON
{
my $db_name = $item[7]->{'db_name'} || '';
my $table_name = $item[7]->{'name'};
my $index = {
name => $item[5],
fields => $item[8],
on_conflict => $item[9][0],
is_temporary => $item[2][0] ? 1 : 0,
};
my $is_unique = $item[3][0];
if ( $is_unique ) {
$index->{'type'} = 'unique';
push @{ $tables{ $table_name }{'constraints'} }, $index;
}
else {
push @{ $tables{ $table_name }{'indices'} }, $index;
}
}
#
# Create Table
#
create : comment(s?) CREATE TEMPORARY(?) TABLE table_name '(' definition(s /,/) ')' SEMICOLON
{
my $db_name = $item[5]->{'db_name'} || '';
my $table_name = $item[5]->{'name'};
$tables{ $table_name }{'name'} = $table_name;
$tables{ $table_name }{'is_temporary'} = $item[3][0] ? 1 : 0;
$tables{ $table_name }{'comments'} = $item[1];
$tables{ $table_name }{'order'} = ++$table_order;
for my $def ( @{ $item[7] } ) {
if ( $def->{'supertype'} eq 'column' ) {
push @{ $tables{ $table_name }{'fields'} }, $def;
}
elsif ( $def->{'supertype'} eq 'constraint' ) {
push @{ $tables{ $table_name }{'constraints'} }, $def;
}
}
}
definition : constraint_def | column_def
column_def: comment(s?) NAME type(?) column_constraint_def(s?)
{
my $column = {
supertype => 'column',
name => $item[2],
data_type => $item[3][0]->{'type'},
size => $item[3][0]->{'size'},
is_nullable => 1,
is_primary_key => 0,
is_unique => 0,
check => '',
default => undef,
constraints => $item[4],
comments => $item[1],
};
for my $c ( @{ $item[4] } ) {
if ( $c->{'type'} eq 'not_null' ) {
$column->{'is_nullable'} = 0;
}
elsif ( $c->{'type'} eq 'primary_key' ) {
$column->{'is_primary_key'} = 1;
}
elsif ( $c->{'type'} eq 'unique' ) {
$column->{'is_unique'} = 1;
}
elsif ( $c->{'type'} eq 'check' ) {
$column->{'check'} = $c->{'expression'};
}
elsif ( $c->{'type'} eq 'default' ) {
$column->{'default'} = $c->{'value'};
}
elsif ( $c->{'type'} eq 'autoincrement' ) {
$column->{'is_auto_inc'} = 1;
}
}
$column;
}
type : WORD parens_value_list(?)
{
$return = {
type => $item[1],
size => $item[2][0],
}
}
column_constraint_def : CONSTRAINT constraint_name column_constraint
{
$return = {
name => $item[2],
%{ $item[3] },
}
}
|
column_constraint
column_constraint : NOT_NULL conflict_clause(?)
{
$return = {
type => 'not_null',
}
}
|
PRIMARY_KEY sort_order(?) conflict_clause(?)
{
$return = {
type => 'primary_key',
sort_order => $item[2][0],
on_conflict => $item[2][0],
}
}
|
UNIQUE conflict_clause(?)
{
$return = {
type => 'unique',
on_conflict => $item[2][0],
}
}
|
CHECK_C '(' expr ')' conflict_clause(?)
{
$return = {
type => 'check',
expression => $item[3],
on_conflict => $item[5][0],
}
}
|
DEFAULT VALUE
{
$return = {
type => 'default',
value => $item[2],
}
}
|
REFERENCES ref_def cascade_def(?)
{
$return = {
type => 'foreign_key',
reference_table => $item[2]{'reference_table'},
reference_fields => $item[2]{'reference_fields'},
on_delete => $item[3][0]{'on_delete'},
on_update => $item[3][0]{'on_update'},
}
}
|
AUTOINCREMENT
{
$return = {
type => 'autoincrement',
}
}
constraint_def : comment(s?) CONSTRAINT constraint_name table_constraint
{
$return = {
comments => $item[1],
name => $item[3],
%{ $item[4] },
}
}
|
comment(s?) table_constraint
{
$return = {
comments => $item[1],
%{ $item[2] },
}
}
table_constraint : PRIMARY_KEY parens_field_list conflict_clause(?)
{
$return = {
supertype => 'constraint',
type => 'primary_key',
fields => $item[2],
on_conflict => $item[3][0],
}
}
|
UNIQUE parens_field_list conflict_clause(?)
{
$return = {
supertype => 'constraint',
type => 'unique',
fields => $item[2],
on_conflict => $item[3][0],
}
}
|
CHECK_C '(' expr ')' conflict_clause(?)
{
$return = {
supertype => 'constraint',
type => 'check',
expression => $item[3],
on_conflict => $item[5][0],
}
}
|
FOREIGN_KEY parens_field_list REFERENCES ref_def cascade_def(?)
{
$return = {
supertype => 'constraint',
type => 'foreign_key',
fields => $item[2],
reference_table => $item[4]{'reference_table'},
reference_fields => $item[4]{'reference_fields'},
on_delete => $item[5][0]{'on_delete'},
on_update => $item[5][0]{'on_update'},
}
}
ref_def : table_name parens_field_list
{ $return = { reference_table => $item[1]{name}, reference_fields => $item[2] } }
cascade_def : cascade_update_def cascade_delete_def(?)
{ $return = { on_update => $item[1], on_delete => $item[2][0] } }
|
cascade_delete_def cascade_update_def(?)
{ $return = { on_delete => $item[1], on_update => $item[2][0] } }
cascade_delete_def : /on\s+delete\s+(set null|set default|cascade|restrict|no action)/i
{ $return = $1}
cascade_update_def : /on\s+update\s+(set null|set default|cascade|restrict|no action)/i
{ $return = $1}
table_name : qualified_name
qualified_name : NAME
{ $return = { name => $item[1] } }
qualified_name : /(\w+)\.(\w+)/
{ $return = { db_name => $1, name => $2 } }
field_name : NAME
constraint_name : NAME
conflict_clause : /on conflict/i conflict_algorigthm
conflict_algorigthm : /(rollback|abort|fail|ignore|replace)/i
parens_field_list : '(' column_list ')'
{ $item[2] }
column_list : field_name(s /,/)
parens_value_list : '(' VALUE(s /,/) ')'
{ $item[2] }
expr : /[^)]* \( [^)]+ \) [^)]*/x # parens, balanced one deep
| /[^)]+/
sort_order : /(ASC|DESC)/i
#
# Create Trigger
create : CREATE TEMPORARY(?) TRIGGER NAME before_or_after(?) database_event ON table_name trigger_action SEMICOLON
{
my $table_name = $item[8]->{'name'};
push @triggers, {
name => $item[4],
is_temporary => $item[2][0] ? 1 : 0,
when => $item[5][0],
instead_of => 0,
db_events => [ $item[6] ],
action => $item[9],
on_table => $table_name,
}
}
create : CREATE TEMPORARY(?) TRIGGER NAME instead_of database_event ON view_name trigger_action
{
my $table_name = $item[8]->{'name'};
push @triggers, {
name => $item[4],
is_temporary => $item[2][0] ? 1 : 0,
when => undef,
instead_of => 1,
db_events => [ $item[6] ],
action => $item[9],
on_table => $table_name,
}
}
database_event : /(delete|insert|update)/i
database_event : /update of/i column_list
trigger_action : for_each(?) when(?) BEGIN_C trigger_step(s) END_C
{
$return = {
for_each => $item[1][0],
when => $item[2][0],
steps => $item[4],
}
}
for_each : /FOR EACH ROW/i
when : WHEN expr { $item[2] }
string :
/'(\.|''|[^\\'])*'/
nonstring : /[^;\'"]+/
statement_body : string | nonstring
trigger_step : /(select|delete|insert|update)/i statement_body(s?) SEMICOLON
{
$return = join( ' ', $item[1], join ' ', @{ $item[2] || [] } )
}
before_or_after : /(before|after)/i { $return = lc $1 }
instead_of : /instead of/i
if_exists : /if exists/i
view_name : qualified_name
trigger_name : qualified_name
#
# Create View
#
create : CREATE TEMPORARY(?) VIEW view_name AS select_statement
{
push @views, {
name => $item[4]->{'name'},
sql => $item[6],
is_temporary => $item[2][0] ? 1 : 0,
}
}
select_statement : SELECT /[^;]+/ SEMICOLON
{
$return = join( ' ', $item[1], $item[2] );
}
#
# Tokens
#
BEGIN_C : /begin/i
END_C : /end/i
TRANSACTION: /transaction/i
CREATE : /create/i
TEMPORARY : /temp(orary)?/i { 1 }
TABLE : /table/i
INDEX : /index/i
NOT_NULL : /not null/i
PRIMARY_KEY : /primary key/i
FOREIGN_KEY : /foreign key/i
CHECK_C : /check/i
DEFAULT : /default/i
TRIGGER : /trigger/i
VIEW : /view/i
SELECT : /select/i
ON : /on/i
AS : /as/i
WORD : /\w+/
WHEN : /when/i
REFERENCES : /references/i
CONSTRAINT : /constraint/i
AUTOINCREMENT : /autoincrement/i
UNIQUE : /unique/i { 1 }
SEMICOLON : ';'
NAME : /\w+/
| DQSTRING
| SQSTRING
DQSTRING : '"' /((?:[^"]|"")+)/ '"'
{ ($return = $item[3]) =~ s/""/"/g }
SQSTRING : "'" /((?:[^']|'')*)/ "'"
{ ($return = $item[3]) =~ s/''/'/g }
VALUE : /[-+]?\d*\.?\d+(?:[eE]\d+)?/
{ $item[1] }
| SQSTRING
| /NULL/i
{ 'NULL' }
| /CURRENT_TIMESTAMP/i
{ 'CURRENT_TIMESTAMP' }
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
local $::RD_ERRORS = 1
unless defined $::RD_ERRORS; # Make sure the parser dies when it encounters an error
local $::RD_WARN = 1
unless defined $::RD_WARN; # Enable warnings. This will warn on unused rules &c.
local $::RD_HINT = 1
unless defined $::RD_HINT; # Give out hints to help fix problems.
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('SQLite');
my $result = $parser->startrule($data);
return $translator->error("Parse failed.") unless defined $result;
warn Dumper($result) if $DEBUG;
my $schema = $translator->schema;
my @tables = map { $_->[1] }
sort { $a->[0] <=> $b->[0] }
map { [ $result->{'tables'}{$_}->{'order'}, $_ ] }
keys %{ $result->{'tables'} };
for my $table_name (@tables) {
my $tdata = $result->{'tables'}{$table_name};
my $table = $schema->add_table(name => $tdata->{'name'},)
or die $schema->error;
$table->comments($tdata->{'comments'});
for my $fdata (@{ $tdata->{'fields'} }) {
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_inc'},
(
$fdata->{'is_auto_inc'}
? (extra => { auto_increment_type => 'monotonic' })
: ()
),
is_nullable => $fdata->{'is_nullable'},
comments => $fdata->{'comments'},
) or die $table->error;
$table->primary_key($field->name) if $fdata->{'is_primary_key'};
for my $cdata (@{ $fdata->{'constraints'} }) {
next unless $cdata->{'type'} eq 'foreign_key';
$cdata->{'fields'} ||= [ $field->name ];
push @{ $tdata->{'constraints'} }, $cdata;
}
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
my $index = $table->add_index(
name => $idata->{'name'},
type => uc($idata->{'type'} || ''),
fields => $idata->{'fields'},
) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
my $constraint = $table->add_constraint(
name => $cdata->{'name'},
type => $cdata->{'type'},
fields => $cdata->{'fields'},
reference_table => $cdata->{'reference_table'},
reference_fields => $cdata->{'reference_fields'},
match_type => $cdata->{'match_type'} || '',
on_delete => $cdata->{'on_delete'} || $cdata->{'on_delete_do'},
on_update => $cdata->{'on_update'} || $cdata->{'on_update_do'},
) or die $table->error;
}
}
for my $def (@{ $result->{'views'} || [] }) {
my $view = $schema->add_view(
name => $def->{'name'},
sql => $def->{'sql'},
);
}
for my $def (@{ $result->{'triggers'} || [] }) {
my $view = $schema->add_trigger(
name => $def->{'name'},
perform_action_when => $def->{'when'},
database_events => $def->{'db_events'},
action => $def->{'action'},
on_table => $def->{'on_table'},
scope => 'row', # SQLite only supports row triggers
);
}
return 1;
}
1;
# -------------------------------------------------------------------
# All wholesome food is caught without a net or a trap.
# William Blake
# -------------------------------------------------------------------
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
perl(1), Parse::RecDescent, SQL::Translator::Schema.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/Access.pm 0000644 0000000 0000000 00000026001 14551163724 022571 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::Access;
=head1 NAME
SQL::Translator::Parser::Access - parser for Access as produced by mdbtools
=head1 SYNOPSIS
use SQL::Translator;
use SQL::Translator::Parser::Access;
my $translator = SQL::Translator->new;
$translator->parser("SQL::Translator::Parser::Access");
=head1 DESCRIPTION
The grammar derived from the MySQL grammar. The input is expected to be
something similar to the output of mdbtools (http://mdbtools.sourceforge.net/).
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use SQL::Translator::Utils qw/ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our $GRAMMAR = <<'END_OF_GRAMMAR';
{
my ( %tables, $table_order, @table_comments );
}
#
# The "eofile" rule makes the parser fail if any "statement" rule
# fails. Otherwise, the first successful match by a "statement"
# won't cause the failure needed to know that the parse, as a whole,
# failed. -ky
#
startrule : statement(s) eofile { \%tables }
eofile : /^\Z/
statement : comment
| use
| set
| drop
| create
|
use : /use/i WORD ';'
{ @table_comments = () }
set : /set/i /[^;]+/ ';'
{ @table_comments = () }
drop : /drop/i TABLE /[^;]+/ ';'
drop : /drop/i WORD(s) ';'
{ @table_comments = () }
create : CREATE /database/i WORD ';'
{ @table_comments = () }
create : CREATE TABLE table_name '(' create_definition(s /,/) ')' ';'
{
my $table_name = $item{'table_name'};
$tables{ $table_name }{'order'} = ++$table_order;
$tables{ $table_name }{'table_name'} = $table_name;
if ( @table_comments ) {
$tables{ $table_name }{'comments'} = [ @table_comments ];
@table_comments = ();
}
my $i = 1;
for my $definition ( @{ $item[5] } ) {
if ( $definition->{'supertype'} eq 'field' ) {
my $field_name = $definition->{'name'};
$tables{ $table_name }{'fields'}{ $field_name } =
{ %$definition, order => $i };
$i++;
if ( $definition->{'is_primary_key'} ) {
push @{ $tables{ $table_name }{'constraints'} },
{
type => 'primary_key',
fields => [ $field_name ],
}
;
}
}
elsif ( $definition->{'supertype'} eq 'constraint' ) {
push @{ $tables{ $table_name }{'constraints'} }, $definition;
}
elsif ( $definition->{'supertype'} eq 'index' ) {
push @{ $tables{ $table_name }{'indices'} }, $definition;
}
}
1;
}
create : CREATE UNIQUE(?) /(index|key)/i index_name /on/i table_name '(' field_name(s /,/) ')' ';'
{
@table_comments = ();
push @{ $tables{ $item{'table_name'} }{'indices'} },
{
name => $item[4],
type => $item[2] ? 'unique' : 'normal',
fields => $item[8],
}
;
}
create_definition : constraint
| index
| field
| comment
|
comment : /^\s*--(.*)\n/
{
my $comment = $1;
$return = $comment;
push @table_comments, $comment;
}
field : field_name data_type field_qualifier(s?) reference_definition(?)
{
$return = {
supertype => 'field',
name => $item{'field_name'},
data_type => $item{'data_type'}{'type'},
size => $item{'data_type'}{'size'},
constraints => $item{'reference_definition(?)'},
}
}
|
field_qualifier : not_null
{
$return = {
null => $item{'not_null'},
}
}
field_qualifier : default_val
{
$return = {
default => $item{'default_val'},
}
}
field_qualifier : auto_inc
{
$return = {
is_auto_inc => $item{'auto_inc'},
}
}
field_qualifier : primary_key
{
$return = {
is_primary_key => $item{'primary_key'},
}
}
field_qualifier : unsigned
{
$return = {
is_unsigned => $item{'unsigned'},
}
}
field_qualifier : /character set/i WORD
{
$return = {
character_set => $item[2],
}
}
reference_definition : /references/i table_name parens_field_list(?) match_type(?) on_delete(?) on_update(?)
{
$return = {
type => 'foreign_key',
reference_table => $item[2],
reference_fields => $item[3][0],
match_type => $item[4][0],
on_delete => $item[5][0],
on_update => $item[6][0],
}
}
match_type : /match full/i { 'full' }
|
/match partial/i { 'partial' }
on_delete : /on delete/i reference_option
{ $item[2] }
on_update : /on update/i reference_option
{ $item[2] }
reference_option: /restrict/i |
/cascade/i |
/set null/i |
/no action/i |
/set default/i
{ $item[1] }
index : normal_index
| fulltext_index
|
table_name : NAME
field_name : NAME
index_name : NAME
data_type : access_data_type parens_value_list(s?) type_qualifier(s?)
{
$return = {
type => $item[1],
size => $item[2][0],
qualifiers => $item[3],
}
}
access_data_type : /long integer/i { $return = 'Long Integer' }
| /text/i { $return = 'Text' }
| /datetime (\(short\))?/i { $return = 'DateTime' }
| /boolean/i { $return = 'Boolean' }
| WORD
parens_field_list : '(' field_name(s /,/) ')'
{ $item[2] }
parens_value_list : '(' VALUE(s /,/) ')'
{ $item[2] }
type_qualifier : /(BINARY|UNSIGNED|ZEROFILL)/i
{ lc $item[1] }
field_type : WORD
create_index : /create/i /index/i
not_null : /not/i /null/i { $return = 0 }
unsigned : /unsigned/i { $return = 0 }
default_val : /default/i /'(?:.*?\')*.*?'|(?:')?[\w\d:.-]*(?:')?/
{
$item[2] =~ s/^\s*'|'\s*$//g;
$return = $item[2];
}
auto_inc : /auto_increment/i { 1 }
primary_key : /primary/i /key/i { 1 }
constraint : primary_key_def
| unique_key_def
| foreign_key_def
|
foreign_key_def : foreign_key_def_begin parens_field_list reference_definition
{
$return = {
supertype => 'constraint',
type => 'foreign_key',
name => $item[1],
fields => $item[2],
%{ $item{'reference_definition'} },
}
}
foreign_key_def_begin : /constraint/i /foreign key/i
{ $return = '' }
|
/constraint/i WORD /foreign key/i
{ $return = $item[2] }
|
/foreign key/i
{ $return = '' }
primary_key_def : primary_key index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'constraint',
name => $item{'index_name(?)'}[0],
type => 'primary_key',
fields => $item[4],
};
}
unique_key_def : UNIQUE KEY(?) index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'constraint',
name => $item{'index_name(?)'}[0],
type => 'unique',
fields => $item[5],
}
}
normal_index : KEY index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'index',
type => 'normal',
name => $item{'index_name(?)'}[0],
fields => $item[4],
}
}
fulltext_index : /fulltext/i KEY(?) index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'index',
type => 'fulltext',
name => $item{'index_name(?)'}[0],
fields => $item[5],
}
}
name_with_opt_paren : NAME parens_value_list(s?)
{ $item[2][0] ? "$item[1]($item[2][0][0])" : $item[1] }
UNIQUE : /unique/i { 1 }
KEY : /key/i | /index/i
table_option : WORD /\s*=\s*/ WORD
{
$return = { $item[1] => $item[3] };
}
CREATE : /create/i
TEMPORARY : /temporary/i
TABLE : /table/i
WORD : /\w+/
DIGITS : /\d+/
COMMA : ','
NAME : "`" /\w+/ "`"
{ $item[2] }
| /\w+/
{ $item[1] }
VALUE : /[-+]?\.?\d+(?:[eE]\d+)?/
{ $item[1] }
| /'.*?'/
{
# remove leading/trailing quotes
my $val = $item[1];
$val =~ s/^['"]|['"]$//g;
$return = $val;
}
| /NULL/
{ 'NULL' }
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
local $::RD_ERRORS = 1
unless defined $::RD_ERRORS; # Make sure the parser dies when it encounters an error
local $::RD_WARN = 1
unless defined $::RD_WARN; # Enable warnings. This will warn on unused rules &c.
local $::RD_HINT = 1
unless defined $::RD_HINT; # Give out hints to help fix problems.
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('Access');
my $result = $parser->startrule($data);
return $translator->error("Parse failed.") unless defined $result;
warn Dumper($result) if $DEBUG;
my $schema = $translator->schema;
my @tables = sort { $result->{$a}->{'order'} <=> $result->{$b}->{'order'} }
keys %{$result};
for my $table_name (@tables) {
my $tdata = $result->{$table_name};
my $table = $schema->add_table(name => $tdata->{'table_name'},)
or die $schema->error;
$table->comments($tdata->{'comments'});
my @fields = sort { $tdata->{'fields'}->{$a}->{'order'} <=> $tdata->{'fields'}->{$b}->{'order'} }
keys %{ $tdata->{'fields'} };
for my $fname (@fields) {
my $fdata = $tdata->{'fields'}{$fname};
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_inc'},
is_nullable => $fdata->{'null'},
comments => $fdata->{'comments'},
) or die $table->error;
$table->primary_key($field->name) if $fdata->{'is_primary_key'};
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
my $index = $table->add_index(
name => $idata->{'name'},
type => uc $idata->{'type'},
fields => $idata->{'fields'},
) or die $table->error;
}
}
return 1;
}
1;
# -------------------------------------------------------------------
# Where man is not nature is barren.
# William Blake
# -------------------------------------------------------------------
=pod
=head1 AUTHOR
Ken Y. Clark Ekclark@cpan.orgE.
=head1 SEE ALSO
perl(1), Parse::RecDescent, SQL::Translator::Schema.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/MySQL.pm 0000644 0000000 0000000 00000077624 14551163724 022356 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::MySQL;
=head1 NAME
SQL::Translator::Parser::MySQL - parser for MySQL
=head1 SYNOPSIS
use SQL::Translator;
use SQL::Translator::Parser::MySQL;
my $translator = SQL::Translator->new;
$translator->parser("SQL::Translator::Parser::MySQL");
=head1 DESCRIPTION
The grammar is influenced heavily by Tim Bunce's "mysql2ora" grammar.
Here's the word from the MySQL site
(http://www.mysql.com/doc/en/CREATE_TABLE.html):
CREATE [TEMPORARY] TABLE [IF NOT EXISTS] tbl_name [(create_definition,...)]
[table_options] [select_statement]
or
CREATE [TEMPORARY] TABLE [IF NOT EXISTS] tbl_name LIKE old_table_name;
create_definition:
col_name type [NOT NULL | NULL] [DEFAULT default_value] [AUTO_INCREMENT]
[PRIMARY KEY] [reference_definition]
or PRIMARY KEY (index_col_name,...)
or KEY [index_name] (index_col_name,...)
or INDEX [index_name] (index_col_name,...)
or UNIQUE [INDEX] [index_name] (index_col_name,...)
or FULLTEXT [INDEX] [index_name] (index_col_name,...)
or [CONSTRAINT symbol] FOREIGN KEY [index_name] (index_col_name,...)
[reference_definition]
or CHECK (expr)
type:
TINYINT[(length)] [UNSIGNED] [ZEROFILL]
or SMALLINT[(length)] [UNSIGNED] [ZEROFILL]
or MEDIUMINT[(length)] [UNSIGNED] [ZEROFILL]
or INT[(length)] [UNSIGNED] [ZEROFILL]
or INTEGER[(length)] [UNSIGNED] [ZEROFILL]
or BIGINT[(length)] [UNSIGNED] [ZEROFILL]
or REAL[(length,decimals)] [UNSIGNED] [ZEROFILL]
or DOUBLE[(length,decimals)] [UNSIGNED] [ZEROFILL]
or FLOAT[(length,decimals)] [UNSIGNED] [ZEROFILL]
or DECIMAL(length,decimals) [UNSIGNED] [ZEROFILL]
or NUMERIC(length,decimals) [UNSIGNED] [ZEROFILL]
or CHAR(length) [BINARY]
or VARCHAR(length) [BINARY]
or DATE
or TIME
or TIMESTAMP
or DATETIME
or TINYBLOB
or BLOB
or MEDIUMBLOB
or LONGBLOB
or TINYTEXT
or TEXT
or MEDIUMTEXT
or LONGTEXT
or ENUM(value1,value2,value3,...)
or SET(value1,value2,value3,...)
index_col_name:
col_name [(length)]
reference_definition:
REFERENCES tbl_name [(index_col_name,...)]
[MATCH FULL | MATCH PARTIAL]
[ON DELETE reference_option]
[ON UPDATE reference_option]
reference_option:
RESTRICT | CASCADE | SET NULL | NO ACTION | SET DEFAULT
table_options:
TYPE = {BDB | HEAP | ISAM | InnoDB | MERGE | MRG_MYISAM | MYISAM }
or ENGINE = {BDB | HEAP | ISAM | InnoDB | MERGE | MRG_MYISAM | MYISAM }
or AUTO_INCREMENT = #
or AVG_ROW_LENGTH = #
or [ DEFAULT ] CHARACTER SET charset_name
or CHECKSUM = {0 | 1}
or COLLATE collation_name
or COMMENT = "string"
or MAX_ROWS = #
or MIN_ROWS = #
or PACK_KEYS = {0 | 1 | DEFAULT}
or PASSWORD = "string"
or DELAY_KEY_WRITE = {0 | 1}
or ROW_FORMAT= { default | dynamic | fixed | compressed }
or RAID_TYPE= {1 | STRIPED | RAID0 } RAID_CHUNKS=# RAID_CHUNKSIZE=#
or UNION = (table_name,[table_name...])
or INSERT_METHOD= {NO | FIRST | LAST }
or DATA DIRECTORY="absolute path to directory"
or INDEX DIRECTORY="absolute path to directory"
A subset of the ALTER TABLE syntax that allows addition of foreign keys:
ALTER [IGNORE] TABLE tbl_name alter_specification [, alter_specification] ...
alter_specification:
ADD [CONSTRAINT [symbol]]
FOREIGN KEY [index_name] (index_col_name,...)
[reference_definition]
A subset of INSERT that we ignore:
INSERT anything
=head1 ARGUMENTS
This parser takes a single optional parser_arg C, which
provides the desired version for the target database. Any statement in the processed
dump file, that is commented with a version higher than the one supplied, will be stripped.
The default C is set to the conservative value of 40000 (MySQL 4.0)
Valid version specifiers for C are listed L
More information about the MySQL comment-syntax: L
=cut
use strict;
use warnings;
our $VERSION = '1.65';
our $DEBUG;
$DEBUG = 0 unless defined $DEBUG;
use Data::Dumper;
use Storable qw(dclone);
use DBI qw(:sql_types);
use SQL::Translator::Utils qw/parse_mysql_version ddl_parser_instance/;
use base qw(Exporter);
our @EXPORT_OK = qw(parse);
our %type_mapping = ();
use constant DEFAULT_PARSER_VERSION => 40000;
our $GRAMMAR = << 'END_OF_GRAMMAR';
{
my ( $database_name, %tables, $table_order, @table_comments, %views,
$view_order, %procedures, $proc_order );
my $delimiter = ';';
}
#
# The "eofile" rule makes the parser fail if any "statement" rule
# fails. Otherwise, the first successful match by a "statement"
# won't cause the failure needed to know that the parse, as a whole,
# failed. -ky
#
startrule : statement(s) eofile {
{
database_name => $database_name,
tables => \%tables,
views => \%views,
procedures => \%procedures,
}
}
eofile : /^\Z/
statement : comment
| use
| set
| drop
| create
| alter
| insert
| delimiter
| empty_statement
|
use : /use/i NAME "$delimiter"
{
$database_name = $item[2];
@table_comments = ();
}
set : /set/i not_delimiter "$delimiter"
{ @table_comments = () }
drop : /drop/i TABLE not_delimiter "$delimiter"
drop : /drop/i NAME(s) "$delimiter"
{ @table_comments = () }
bit:
/(b'[01]{1,64}')/ |
/(b"[01]{1,64}")/
string :
# MySQL strings, unlike common SQL strings, can be double-quoted or
# single-quoted.
SQSTRING | DQSTRING
nonstring : /[^;\'"]+/
statement_body : string | nonstring
insert : /insert/i statement_body(s?) "$delimiter"
delimiter : /delimiter/i /[\S]+/
{ $delimiter = $item[2] }
empty_statement : "$delimiter"
alter : ALTER TABLE table_name alter_specification(s /,/) "$delimiter"
{
my $table_name = $item{'table_name'};
die "Cannot ALTER table '$table_name'; it does not exist"
unless $tables{ $table_name };
for my $definition ( @{ $item[4] } ) {
$definition->{'extra'}->{'alter'} = 1;
push @{ $tables{ $table_name }{'constraints'} }, $definition;
}
}
alter_specification : ADD foreign_key_def
{ $return = $item[2] }
create : CREATE /database/i NAME "$delimiter"
{ @table_comments = () }
create : CREATE TEMPORARY(?) TABLE opt_if_not_exists(?) table_name '(' create_definition(s /,/) /(,\s*)?\)/ table_option(s?) "$delimiter"
{
my $table_name = $item{'table_name'};
die "There is more than one definition for $table_name"
if ($tables{$table_name});
$tables{ $table_name }{'order'} = ++$table_order;
$tables{ $table_name }{'table_name'} = $table_name;
if ( @table_comments ) {
$tables{ $table_name }{'comments'} = [ @table_comments ];
@table_comments = ();
}
my $i = 1;
for my $definition ( @{ $item[7] } ) {
if ( $definition->{'supertype'} eq 'field' ) {
my $field_name = $definition->{'name'};
$tables{ $table_name }{'fields'}{ $field_name } =
{ %$definition, order => $i };
$i++;
if ( $definition->{'is_primary_key'} ) {
push @{ $tables{ $table_name }{'constraints'} },
{
type => 'primary_key',
fields => [ $field_name ],
}
;
}
}
elsif ( $definition->{'supertype'} eq 'constraint' ) {
push @{ $tables{ $table_name }{'constraints'} }, $definition;
}
elsif ( $definition->{'supertype'} eq 'index' ) {
push @{ $tables{ $table_name }{'indices'} }, $definition;
}
}
if ( my @options = @{ $item{'table_option(s?)'} } ) {
for my $option ( @options ) {
my ( $key, $value ) = each %$option;
if ( $key eq 'comment' ) {
push @{ $tables{ $table_name }{'comments'} }, $value;
}
else {
push @{ $tables{ $table_name }{'table_options'} }, $option;
}
}
}
1;
}
opt_if_not_exists : /if not exists/i
create : CREATE UNIQUE(?) /(index|key)/i index_name /on/i table_name '(' field_name(s /,/) ')' "$delimiter"
{
@table_comments = ();
push @{ $tables{ $item{'table_name'} }{'indices'} },
{
name => $item[4],
type => $item[2][0] ? 'unique' : 'normal',
fields => $item[8],
}
;
}
create : CREATE /trigger/i NAME not_delimiter "$delimiter"
{
@table_comments = ();
}
create : CREATE PROCEDURE NAME not_delimiter "$delimiter"
{
@table_comments = ();
my $func_name = $item[3];
my $owner = '';
my $sql = "$item[1] $item[2] $item[3] $item[4]";
$procedures{ $func_name }{'order'} = ++$proc_order;
$procedures{ $func_name }{'name'} = $func_name;
$procedures{ $func_name }{'owner'} = $owner;
$procedures{ $func_name }{'sql'} = $sql;
}
PROCEDURE : /procedure/i
| /function/i
create : CREATE or_replace(?) create_view_option(s?) /view/i NAME /as/i view_select_statement "$delimiter"
{
@table_comments = ();
my $view_name = $item{'NAME'};
my $select_sql = $item{'view_select_statement'};
my $options = $item{'create_view_option(s?)'};
my $sql = join(q{ },
grep { defined and length }
map { ref $_ eq 'ARRAY' ? @$_ : $_ }
$item{'CREATE'},
$item{'or_replace(?)'},
$options,
$view_name,
'as select',
join(', ',
map {
sprintf('%s%s',
$_->{'name'},
$_->{'alias'} ? ' as ' . $_->{'alias'} : ''
)
}
@{ $select_sql->{'columns'} || [] }
),
' from ',
join(', ',
map {
sprintf('%s%s',
$_->{'name'},
$_->{'alias'} ? ' as ' . $_->{'alias'} : ''
)
}
@{ $select_sql->{'from'}{'tables'} || [] }
),
$select_sql->{'from'}{'where'}
? 'where ' . $select_sql->{'from'}{'where'}
: ''
,
);
# Hack to strip database from function calls in SQL
$sql =~ s#`\w+`\.(`\w+`\()##g;
$views{ $view_name }{'order'} = ++$view_order;
$views{ $view_name }{'name'} = $view_name;
$views{ $view_name }{'sql'} = $sql;
$views{ $view_name }{'options'} = $options;
$views{ $view_name }{'select'} = $item{'view_select_statement'};
}
create_view_option : view_algorithm | view_sql_security | view_definer
or_replace : /or replace/i
view_algorithm : /algorithm/i /=/ WORD
{
$return = "$item[1]=$item[3]";
}
view_definer : /definer=\S+/i
view_sql_security : /sql \s+ security \s+ (definer|invoker)/ixs
not_delimiter : /.*?(?=$delimiter)/is
view_select_statement : /[(]?/ /select/i view_column_def /from/i view_table_def /[)]?/
{
$return = {
columns => $item{'view_column_def'},
from => $item{'view_table_def'},
};
}
view_column_def : /(.*?)(?=\bfrom\b)/ixs
{
# split on commas not in parens,
# e.g., "concat_ws(\' \', first, last) as first_last"
my @tmp = $1 =~ /((?:[^(,]+|\(.*?\))+)/g;
my @cols;
for my $col ( @tmp ) {
my ( $name, $alias ) = map {
s/^\s+|\s+$//g;
s/[`]//g;
$_
} split /\s+as\s+/i, $col;
push @cols, { name => $name, alias => $alias || '' };
}
$return = \@cols;
}
not_delimiter : /.*?(?=$delimiter)/is
view_table_def : not_delimiter
{
my $clause = $item[1];
my $where = $1 if $clause =~ s/\bwhere \s+ (.*)//ixs;
$clause =~ s/[)]\s*$//;
my @tables;
for my $tbl ( split( /\s*,\s*/, $clause ) ) {
my ( $name, $alias ) = split /\s+as\s+/i, $tbl;
push @tables, { name => $name, alias => $alias || '' };
}
$return = {
tables => \@tables,
where => $where || '',
};
}
view_column_alias : /as/i NAME
{ $return = $item[2] }
create_definition : constraint
| index
| field
| comment
|
comment : /^\s*(?:#|-{2}).*\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|--)\s*//;
$comment =~ s/\s*$//;
$return = $comment;
}
comment : m{ / \* (?! \!) .*? \* / }xs
{
my $comment = $item[2];
$comment = substr($comment, 0, -2);
$comment =~ s/^\s*|\s*$//g;
$return = $comment;
}
comment_like_command : m{/\*!(\d+)?}s
comment_end : m{ \* / }xs
field_comment : /^\s*(?:#|-{2}).*\n/
{
my $comment = $item[1];
$comment =~ s/^\s*(#|--)\s*//;
$comment =~ s/\s*$//;
$return = $comment;
}
blank : /\s*/
field : field_comment(s?) field_name data_type field_qualifier(s?) reference_definition(?) on_update(?) field_comment(s?)
{
my %qualifiers = map { %$_ } @{ $item{'field_qualifier(s?)'} || [] };
if ( my @type_quals = @{ $item{'data_type'}{'qualifiers'} || [] } ) {
$qualifiers{ $_ } = 1 for @type_quals;
}
my $null = defined $qualifiers{'not_null'}
? $qualifiers{'not_null'} : 1;
delete $qualifiers{'not_null'};
my @comments = ( @{ $item[1] }, (exists $qualifiers{comment} ? delete $qualifiers{comment} : ()) , @{ $item[7] } );
$return = {
supertype => 'field',
name => $item{'field_name'},
data_type => $item{'data_type'}{'type'},
size => $item{'data_type'}{'size'},
list => $item{'data_type'}{'list'},
null => $null,
constraints => $item{'reference_definition(?)'},
comments => [ @comments ],
%qualifiers,
}
}
|
field_qualifier : not_null
{
$return = {
null => $item{'not_null'},
}
}
field_qualifier : default_val
{
$return = {
default => $item{'default_val'},
}
}
field_qualifier : auto_inc
{
$return = {
is_auto_inc => $item{'auto_inc'},
}
}
field_qualifier : primary_key
{
$return = {
is_primary_key => $item{'primary_key'},
}
}
field_qualifier : unsigned
{
$return = {
is_unsigned => $item{'unsigned'},
}
}
field_qualifier : /character set/i WORD
{
$return = {
'CHARACTER SET' => $item[2],
}
}
field_qualifier : /collate/i WORD
{
$return = {
COLLATE => $item[2],
}
}
field_qualifier : /on update/i CURRENT_TIMESTAMP
{
$return = {
'ON UPDATE' => $item[2],
}
}
field_qualifier : /unique/i KEY(?)
{
$return = {
is_unique => 1,
}
}
field_qualifier : KEY
{
$return = {
has_index => 1,
}
}
field_qualifier : /comment/i string
{
$return = {
comment => $item[2],
}
}
reference_definition : /references/i table_name parens_field_list(?) match_type(?) on_delete(?) on_update(?)
{
$return = {
type => 'foreign_key',
reference_table => $item[2],
reference_fields => $item[3][0],
match_type => $item[4][0],
on_delete => $item[5][0],
on_update => $item[6][0],
}
}
match_type : /match full/i { 'full' }
|
/match partial/i { 'partial' }
on_delete : /on delete/i reference_option
{ $item[2] }
on_update :
/on update/i CURRENT_TIMESTAMP
{ $item[2] }
|
/on update/i reference_option
{ $item[2] }
reference_option: /restrict/i |
/cascade/i |
/set null/i |
/no action/i |
/set default/i
{ $item[1] }
index : normal_index
| fulltext_index
| spatial_index
|
table_name : NAME
field_name : NAME
index_name : NAME
data_type : WORD parens_value_list(s?) type_qualifier(s?)
{
my $type = $item[1];
my $size; # field size, applicable only to non-set fields
my $list; # set list, applicable only to sets (duh)
if ( uc($type) =~ /^(SET|ENUM)$/ ) {
$size = undef;
$list = $item[2][0];
}
else {
$size = $item[2][0];
$list = [];
}
$return = {
type => $type,
size => $size,
list => $list,
qualifiers => $item[3],
}
}
parens_field_list : '(' field_name(s /,/) ')'
{ $item[2] }
parens_value_list : '(' VALUE(s /,/) ')'
{ $item[2] }
type_qualifier : /(BINARY|UNSIGNED|ZEROFILL)/i
{ lc $item[1] }
field_type : WORD
create_index : /create/i /index/i
not_null : /not/i /null/i
{ $return = 0 }
|
/null/i
{ $return = 1 }
unsigned : /unsigned/i { $return = 0 }
default_val :
/default/i CURRENT_TIMESTAMP
{
$return = $item[2];
}
|
/default/i VALUE
{
$return = $item[2];
}
|
/default/i bit
{
$item[2] =~ s/b['"]([01]+)['"]/$1/g;
$return = $item[2];
}
|
/default/i /[\w\d:.-]+/
{
$return = $item[2];
}
|
/default/i NAME # column value, allowed in MariaDB
{
$return = $item[2];
}
auto_inc : /auto_increment/i { 1 }
primary_key : /primary/i /key/i { 1 }
constraint : primary_key_def
| unique_key_def
| foreign_key_def
| check_def
|
expr : /[^)]* \( [^)]+ \) [^)]*/x # parens, balanced one deep
| /[^)]+/
check_def : check_def_begin '(' expr ')'
{
$return = {
supertype => 'constraint',
type => 'check',
name => $item[1],
expression => $item[3],
}
}
check_def_begin : /constraint/i /check/i NAME
{ $return = $item[3] }
|
/constraint/i NAME /check/i
{ $return = $item[2] }
|
/constraint/i /check/i
{ $return = '' }
foreign_key_def : foreign_key_def_begin parens_field_list reference_definition
{
$return = {
supertype => 'constraint',
type => 'foreign_key',
name => $item[1],
fields => $item[2],
%{ $item{'reference_definition'} },
}
}
foreign_key_def_begin : /constraint/i /foreign key/i NAME
{ $return = $item[3] }
|
/constraint/i NAME /foreign key/i
{ $return = $item[2] }
|
/constraint/i /foreign key/i
{ $return = '' }
|
/foreign key/i NAME
{ $return = $item[2] }
|
/foreign key/i
{ $return = '' }
primary_key_def : primary_key index_type(?) '(' name_with_opt_paren(s /,/) ')' index_type(?)
{
$return = {
supertype => 'constraint',
type => 'primary_key',
fields => $item[4],
options => $item[2][0] || $item[6][0],
};
}
# In theory, and according to the doc, names should not be allowed here, but
# MySQL accept (and ignores) them, so we are not going to be less :)
| primary_key index_name_not_using(?) '(' name_with_opt_paren(s /,/) ')' index_type(?)
{
$return = {
supertype => 'constraint',
type => 'primary_key',
fields => $item[4],
options => $item[6][0],
};
}
unique_key_def : UNIQUE KEY(?) index_name_not_using(?) index_type(?) '(' name_with_opt_paren(s /,/) ')' index_type(?)
{
$return = {
supertype => 'constraint',
name => $item[3][0],
type => 'unique',
fields => $item[6],
options => $item[4][0] || $item[8][0],
}
}
normal_index : KEY index_name_not_using(?) index_type(?) '(' name_with_opt_paren(s /,/) ')' index_type(?)
{
$return = {
supertype => 'index',
type => 'normal',
name => $item[2][0],
fields => $item[5],
options => $item[3][0] || $item[7][0],
}
}
index_name_not_using : QUOTED_NAME
| /(\b(?!using)\w+\b)/ { $return = ($1 =~ /^using/i) ? undef : $1 }
index_type : /using (btree|hash|rtree)/i { $return = uc $1 }
fulltext_index : /fulltext/i KEY(?) index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'index',
type => 'fulltext',
name => $item{'index_name(?)'}[0],
fields => $item[5],
}
}
spatial_index : /spatial/i KEY(?) index_name(?) '(' name_with_opt_paren(s /,/) ')'
{
$return = {
supertype => 'index',
type => 'spatial',
name => $item{'index_name(?)'}[0],
fields => $item[5],
}
}
name_with_opt_paren : NAME parens_value_list(s?)
{ $item[2][0] ? "$item[1]($item[2][0][0])" : $item[1] }
UNIQUE : /unique/i
KEY : /key/i | /index/i
table_option : /comment/i /=/ string
{
$return = { comment => $item[3] };
}
| /(default )?(charset|character set)/i /\s*=?\s*/ NAME
{
$return = { 'CHARACTER SET' => $item[3] };
}
| /collate/i NAME
{
$return = { 'COLLATE' => $item[2] }
}
| /union/i /\s*=\s*/ '(' table_name(s /,/) ')'
{
$return = { $item[1] => $item[4] };
}
| WORD /\s*=\s*/ table_option_value
{
$return = { $item[1] => $item[3] };
}
table_option_value : VALUE
| NAME
default : /default/i
ADD : /add/i
ALTER : /alter/i
CREATE : /create/i
TEMPORARY : /temporary/i
TABLE : /table/i
WORD : /\w+/
DIGITS : /\d+/
COMMA : ','
BACKTICK : '`'
DOUBLE_QUOTE: '"'
SINGLE_QUOTE: "'"
QUOTED_NAME : BQSTRING
| SQSTRING
| DQSTRING
# MySQL strings, unlike common SQL strings, can have the delmiters
# escaped either by doubling or by backslashing.
BQSTRING: BACKTICK /(?:[^\\`]|``|\\.)*/ BACKTICK
{ ($return = $item[3]) =~ s/(\\[\\`]|``)/substr($1,1)/ge }
DQSTRING: DOUBLE_QUOTE /(?:[^\\"]|""|\\.)*/ DOUBLE_QUOTE
{ ($return = $item[3]) =~ s/(\\[\\"]|"")/substr($1,1)/ge }
SQSTRING: SINGLE_QUOTE /(?:[^\\']|''|\\.)*/ SINGLE_QUOTE
{ ($return = $item[3]) =~ s/(\\[\\']|'')/substr($1,1)/ge }
NAME: QUOTED_NAME
| /\w+/
VALUE : /[-+]?\d*\.?\d+(?:[eE]\d+)?/
{ $item[1] }
| SQSTRING
| DQSTRING
| /NULL/i
{ 'NULL' }
# always a scalar-ref, so that it is treated as a function and not quoted by consumers
CURRENT_TIMESTAMP :
/current_timestamp(\(\))?/i { \'CURRENT_TIMESTAMP' }
| /now\(\)/i { \'CURRENT_TIMESTAMP' }
END_OF_GRAMMAR
sub parse {
my ($translator, $data) = @_;
# Enable warnings within the Parse::RecDescent module.
# Make sure the parser dies when it encounters an error
local $::RD_ERRORS = 1 unless defined $::RD_ERRORS;
# Enable warnings. This will warn on unused rules &c.
local $::RD_WARN = 1 unless defined $::RD_WARN;
# Give out hints to help fix problems.
local $::RD_HINT = 1 unless defined $::RD_HINT;
local $::RD_TRACE = $translator->trace ? 1 : undef;
local $DEBUG = $translator->debug;
my $parser = ddl_parser_instance('MySQL');
# Preprocess for MySQL-specific and not-before-version comments
# from mysqldump
my $parser_version = parse_mysql_version($translator->parser_args->{mysql_parser_version}, 'mysql')
|| DEFAULT_PARSER_VERSION;
while ($data =~ s#/\*!(\d{5})?(.*?)\*/#($1 && $1 > $parser_version ? '' : $2)#es) {
# do nothing; is there a better way to write this? -- ky
}
my $result = $parser->startrule($data);
return $translator->error("Parse failed.") unless defined $result;
warn "Parse result:" . Dumper($result) if $DEBUG;
my $schema = $translator->schema;
$schema->name($result->{'database_name'}) if $result->{'database_name'};
my @tables
= sort { $result->{'tables'}{$a}{'order'} <=> $result->{'tables'}{$b}{'order'} } keys %{ $result->{'tables'} };
for my $table_name (@tables) {
my $tdata = $result->{tables}{$table_name};
my $table = $schema->add_table(name => $tdata->{'table_name'},)
or die $schema->error;
$table->comments($tdata->{'comments'});
my @fields = sort { $tdata->{'fields'}->{$a}->{'order'} <=> $tdata->{'fields'}->{$b}->{'order'} }
keys %{ $tdata->{'fields'} };
for my $fname (@fields) {
my $fdata = $tdata->{'fields'}{$fname};
my $field = $table->add_field(
name => $fdata->{'name'},
data_type => $fdata->{'data_type'},
size => $fdata->{'size'},
default_value => $fdata->{'default'},
is_auto_increment => $fdata->{'is_auto_inc'},
is_nullable => $fdata->{'null'},
comments => $fdata->{'comments'},
) or die $table->error;
$table->primary_key($field->name) if $fdata->{'is_primary_key'};
for my $qual (qw[ binary unsigned zerofill list collate ], 'character set', 'on update') {
if (my $val = $fdata->{$qual} || $fdata->{ uc $qual }) {
next if ref $val eq 'ARRAY' && !@$val;
$field->extra($qual, $val);
}
}
if ($fdata->{'has_index'}) {
$table->add_index(
name => '',
type => 'NORMAL',
fields => $fdata->{'name'},
) or die $table->error;
}
if ($fdata->{'is_unique'}) {
$table->add_constraint(
name => '',
type => 'UNIQUE',
fields => $fdata->{'name'},
) or die $table->error;
}
for my $cdata (@{ $fdata->{'constraints'} }) {
next unless $cdata->{'type'} eq 'foreign_key';
$cdata->{'fields'} ||= [ $field->name ];
push @{ $tdata->{'constraints'} }, $cdata;
}
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
my $index = $table->add_index(
name => $idata->{'name'},
type => uc $idata->{'type'},
fields => $idata->{'fields'},
) or die $table->error;
}
if (my @options = @{ $tdata->{'table_options'} || [] }) {
my @cleaned_options;
my @ignore_opts
= $translator->parser_args->{'ignore_opts'}
? split(/,/, $translator->parser_args->{'ignore_opts'})
: ();
if (@ignore_opts) {
my $ignores = { map { $_ => 1 } @ignore_opts };
foreach my $option (@options) {
# make sure the option isn't in ignore list
my ($option_key) = keys %$option;
if (!exists $ignores->{$option_key}) {
push @cleaned_options, $option;
}
}
} else {
@cleaned_options = @options;
}
$table->options(\@cleaned_options) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
my $constraint = $table->add_constraint(
name => $cdata->{'name'},
type => $cdata->{'type'},
fields => $cdata->{'fields'},
expression => $cdata->{'expression'},
reference_table => $cdata->{'reference_table'},
reference_fields => $cdata->{'reference_fields'},
match_type => $cdata->{'match_type'} || '',
on_delete => $cdata->{'on_delete'} || $cdata->{'on_delete_do'},
on_update => $cdata->{'on_update'} || $cdata->{'on_update_do'},
) or die $table->error;
}
# After the constrains and PK/idxs have been created,
# we normalize fields
normalize_field($_) for $table->get_fields;
}
my @procedures = sort { $result->{procedures}->{$a}->{'order'} <=> $result->{procedures}->{$b}->{'order'} }
keys %{ $result->{procedures} };
for my $proc_name (@procedures) {
$schema->add_procedure(
name => $proc_name,
owner => $result->{procedures}->{$proc_name}->{owner},
sql => $result->{procedures}->{$proc_name}->{sql},
);
}
my @views
= sort { $result->{views}->{$a}->{'order'} <=> $result->{views}->{$b}->{'order'} } keys %{ $result->{views} };
for my $view_name (@views) {
my $view = $result->{'views'}{$view_name};
my @flds = map { $_->{'alias'} || $_->{'name'} } @{ $view->{'select'}{'columns'} || [] };
my @from = map { $_->{'alias'} || $_->{'name'} } @{ $view->{'from'}{'tables'} || [] };
$schema->add_view(
name => $view_name,
sql => $view->{'sql'},
order => $view->{'order'},
fields => \@flds,
tables => \@from,
options => $view->{'options'}
);
}
return 1;
}
# Takes a field, and returns
sub normalize_field {
my ($field) = @_;
my ($size, $type, $list, $unsigned, $changed);
$size = $field->size;
$type = $field->data_type;
$list = $field->extra->{list} || [];
$unsigned = defined($field->extra->{unsigned});
if (!ref $size && $size eq 0) {
if (lc $type eq 'tinyint') {
$changed = $size != 4 - $unsigned;
$size = 4 - $unsigned;
} elsif (lc $type eq 'smallint') {
$changed = $size != 6 - $unsigned;
$size = 6 - $unsigned;
} elsif (lc $type eq 'mediumint') {
$changed = $size != 9 - $unsigned;
$size = 9 - $unsigned;
} elsif ($type =~ /^int(eger)?$/i) {
$changed = $size != 11 - $unsigned || $type ne 'int';
$type = 'int';
$size = 11 - $unsigned;
} elsif (lc $type eq 'bigint') {
$changed = $size != 20;
$size = 20;
} elsif (lc $type =~ /(float|double|decimal|numeric|real|fixed|dec)/) {
my $old_size = (ref $size || '') eq 'ARRAY' ? $size : [];
$changed
= @$old_size != 2
|| $old_size->[0] != 8
|| $old_size->[1] != 2;
$size = [ 8, 2 ];
}
}
if ($type =~ /^tiny(text|blob)$/i) {
$changed = $size != 255;
$size = 255;
} elsif ($type =~ /^(blob|text)$/i) {
$changed = $size != 65_535;
$size = 65_535;
} elsif ($type =~ /^medium(blob|text)$/i) {
$changed = $size != 16_777_215;
$size = 16_777_215;
} elsif ($type =~ /^long(blob|text)$/i) {
$changed = $size != 4_294_967_295;
$size = 4_294_967_295;
}
if ($field->data_type =~ /(set|enum)/i && !$field->size) {
my %extra = $field->extra;
my $longest = 0;
for my $len (map {length} @{ $extra{'list'} || [] }) {
$longest = $len if $len > $longest;
}
$changed = 1;
$size = $longest if $longest;
}
if ($changed) {
# We only want to clone the field, not *everything*
{
local $field->{table} = undef;
$field->parsed_field(dclone($field));
$field->parsed_field->{table} = $field->table;
}
$field->size($size);
$field->data_type($type);
$field->sql_data_type($type_mapping{ lc $type })
if exists $type_mapping{ lc $type };
$field->extra->{list} = $list if @$list;
}
}
1;
# -------------------------------------------------------------------
# Where man is not nature is barren.
# William Blake
# -------------------------------------------------------------------
=pod
=head1 AUTHOR
Ken Youens-Clark Ekclark@cpan.orgE,
Chris Mungall Ecjm@fruitfly.orgE.
=head1 SEE ALSO
Parse::RecDescent, SQL::Translator::Schema.
=cut
SQL-Translator-1.65/lib/SQL/Translator/Parser/JSON.pm 0000644 0000000 0000000 00000006462 14551163724 022152 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::JSON;
use strict;
use warnings;
our $VERSION = '1.65';
use SQL::Translator::Schema;
use SQL::Translator::Utils qw(header_comment);
use Data::Dumper;
use JSON::MaybeXS 'from_json';
sub parse {
my ($translator, $data) = @_;
$data = from_json($data);
$data = $data->{'schema'};
warn "JSON data:", Dumper($data) if $translator->debug;
my $schema = $translator->schema;
#
# Tables
#
my @tables = map { $data->{'tables'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'tables'}{$_}{'order'} || 0, $_ ] }
keys %{ $data->{'tables'} };
for my $tdata (@tables) {
my $table = $schema->add_table(map { $tdata->{$_} ? ($_ => $tdata->{$_}) : () } (qw/name extra options/))
or die $schema->error;
my @fields = map { $tdata->{'fields'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $tdata->{'fields'}{$_}{'order'}, $_ ] }
keys %{ $tdata->{'fields'} };
for my $fdata (@fields) {
$table->add_field(%$fdata) or die $table->error;
$table->primary_key($fdata->{'name'})
if $fdata->{'is_primary_key'};
}
for my $idata (@{ $tdata->{'indices'} || [] }) {
$table->add_index(%$idata) or die $table->error;
}
for my $cdata (@{ $tdata->{'constraints'} || [] }) {
$table->add_constraint(%$cdata) or die $table->error;
}
$table->comments($tdata->{'comments'})
if exists $tdata->{'comments'};
}
#
# Views
#
my @views = map { $data->{'views'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'views'}{$_}{'order'}, $_ ] }
keys %{ $data->{'views'} };
for my $vdata (@views) {
$schema->add_view(%$vdata) or die $schema->error;
}
#
# Triggers
#
my @triggers = map { $data->{'triggers'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'triggers'}{$_}{'order'}, $_ ] }
keys %{ $data->{'triggers'} };
for my $tdata (@triggers) {
$schema->add_trigger(%$tdata) or die $schema->error;
}
#
# Procedures
#
my @procedures = map { $data->{'procedures'}{ $_->[1] } }
sort { $a->[0] <=> $b->[0] }
map { [ $data->{'procedures'}{$_}{'order'}, $_ ] }
keys %{ $data->{'procedures'} };
for my $tdata (@procedures) {
$schema->add_procedure(%$tdata) or die $schema->error;
}
if (my $tr_data = $data->{'translator'}) {
$translator->add_drop_table($tr_data->{'add_drop_table'});
$translator->filename($tr_data->{'filename'});
$translator->no_comments($tr_data->{'no_comments'});
$translator->parser_args($tr_data->{'parser_args'});
$translator->producer_args($tr_data->{'producer_args'});
$translator->parser_type($tr_data->{'parser_type'});
$translator->producer_type($tr_data->{'producer_type'});
$translator->show_warnings($tr_data->{'show_warnings'});
$translator->trace($tr_data->{'trace'});
}
return 1;
}
1;
__END__
=head1 NAME
SQL::Translator::Parser::JSON - Parse a JSON representation of a schema
=head1 SYNOPSIS
use SQL::Translator;
my $translator = SQL::Translator->new(parser => "JSON");
=head1 DESCRIPTION
C parses a schema serialized with JSON.
=head1 AUTHORS
Darren Chamberlain Edarren@cpan.orgE,
Ken Y. Clark Ekclark@cpan.orgE.
Jon Jensen Ejonj@cpan.orgE.
SQL-Translator-1.65/lib/SQL/Translator/Parser/DB2/ 0000755 0000000 0000000 00000000000 14551164244 021400 5 ustar 00root root 0000000 0000000 SQL-Translator-1.65/lib/SQL/Translator/Parser/DB2/Grammar.pm 0000644 0000000 0000000 00006300242 14541265222 023330 0 ustar 00root root 0000000 0000000 package SQL::Translator::Parser::DB2::Grammar;
use Parse::RecDescent;
{
my $ERRORS;
package Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar;
use strict;
use vars qw($skip $AUTOLOAD );
$skip = '\s*';
my (%tables, $table_order, @table_comments, @views, @triggers);
{
local $SIG{__WARN__} = sub {0};
# PRETEND TO BE IN Parse::RecDescent NAMESPACE
*Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::AUTOLOAD = sub {
no strict 'refs';
$AUTOLOAD =~ s/^Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar/Parse::RecDescent/;
goto &{$AUTOLOAD};
}
}
push @Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::ISA, 'Parse::RecDescent';
# ARGS ARE: ($parser, $text; $repeating, $_noactions, \@args)
sub Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::_alternation_1_of_production_17_of_rule_sysibm_function
{
my $thisparser = $_[0];
use vars q{$tracelevel};
local $tracelevel = ($tracelevel || 0) + 1;
$ERRORS = 0;
my $thisrule = $thisparser->{"rules"}{"_alternation_1_of_production_17_of_rule_sysibm_function"};
Parse::RecDescent::_trace(
q{Trying rule: [_alternation_1_of_production_17_of_rule_sysibm_function]},
Parse::RecDescent::_tracefirst($_[1]),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
my $err_at = @{ $thisparser->{errors} };
my $score;
my $score_return;
my $_tok;
my $return = undef;
my $_matched = 0;
my $commit = 0;
my @item = ();
my %item = ();
my $repeating = defined($_[2]) && $_[2];
my $_noactions = defined($_[3]) && $_[3];
my @arg = defined $_[4] ? @{ &{ $_[4] } } : ();
my %arg = ($#arg & 01) ? @arg : (@arg, undef);
my $text;
my $lastsep = "";
my $expectation = new Parse::RecDescent::Expectation($thisrule->expected());
$expectation->at($_[1]);
my $thisline;
tie $thisline, q{Parse::RecDescent::LineCounter}, \$text, $thisparser;
while (!$_matched && !$commit) {
Parse::RecDescent::_trace(
q{Trying production: [/DECIMAL/i]},
Parse::RecDescent::_tracefirst($_[1]),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
my $thisprod = $thisrule->{"prods"}[0];
$text = $_[1];
my $_savetext;
@item = (q{_alternation_1_of_production_17_of_rule_sysibm_function});
%item = (__RULE__ => q{_alternation_1_of_production_17_of_rule_sysibm_function});
my $repcount = 0;
Parse::RecDescent::_trace(
q{Trying terminal: [/DECIMAL/i]},
Parse::RecDescent::_tracefirst($text),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
$lastsep = "";
$expectation->is(q{})->at($text);
unless ($text =~ s/\A($skip)/$lastsep=$1 and ""/e
and $text =~ s/\A(?:DECIMAL)//i) {
$expectation->failed();
Parse::RecDescent::_trace(q{<>}, Parse::RecDescent::_tracefirst($text))
if defined $::RD_TRACE;
last;
}
Parse::RecDescent::_trace(q{>>Matched terminal<< (return value: [} . $& . q{])},
Parse::RecDescent::_tracefirst($text))
if defined $::RD_TRACE;
push @item, $item{__PATTERN1__} = $&;
Parse::RecDescent::_trace(
q{>>Matched production: [/DECIMAL/i]<<},
Parse::RecDescent::_tracefirst($text),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
$_matched = 1;
last;
}
while (!$_matched && !$commit) {
Parse::RecDescent::_trace(
q{Trying production: [/DEC/i]},
Parse::RecDescent::_tracefirst($_[1]),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
my $thisprod = $thisrule->{"prods"}[1];
$text = $_[1];
my $_savetext;
@item = (q{_alternation_1_of_production_17_of_rule_sysibm_function});
%item = (__RULE__ => q{_alternation_1_of_production_17_of_rule_sysibm_function});
my $repcount = 0;
Parse::RecDescent::_trace(
q{Trying terminal: [/DEC/i]},
Parse::RecDescent::_tracefirst($text),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
$lastsep = "";
$expectation->is(q{})->at($text);
unless ($text =~ s/\A($skip)/$lastsep=$1 and ""/e
and $text =~ s/\A(?:DEC)//i) {
$expectation->failed();
Parse::RecDescent::_trace(q{<>}, Parse::RecDescent::_tracefirst($text))
if defined $::RD_TRACE;
last;
}
Parse::RecDescent::_trace(q{>>Matched terminal<< (return value: [} . $& . q{])},
Parse::RecDescent::_tracefirst($text))
if defined $::RD_TRACE;
push @item, $item{__PATTERN1__} = $&;
Parse::RecDescent::_trace(
q{>>Matched production: [/DEC/i]<<},
Parse::RecDescent::_tracefirst($text),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
$_matched = 1;
last;
}
unless ($_matched || defined($return) || defined($score)) {
$_[1] = $text; # NOT SURE THIS IS NEEDED
Parse::RecDescent::_trace(
q{<>},
Parse::RecDescent::_tracefirst($_[1]),
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
) if defined $::RD_TRACE;
return undef;
}
if (!defined($return) && defined($score)) {
Parse::RecDescent::_trace(q{>>Accepted scored production<<},
"", q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel)
if defined $::RD_TRACE;
$return = $score_return;
}
splice @{ $thisparser->{errors} }, $err_at;
$return = $item[$#item] unless defined $return;
if (defined $::RD_TRACE) {
Parse::RecDescent::_trace(
q{>>Matched rule<< (return value: [} . $return . q{])}, "",
q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
);
Parse::RecDescent::_trace(
q{(consumed: [} . Parse::RecDescent::_tracemax(substr($_[1], 0, -length($text))) . q{])},
Parse::RecDescent::_tracefirst($text),
, q{_alternation_1_of_production_17_of_rule_sysibm_function}, $tracelevel
);
}
$_[1] = $text;
return $return;
}
# ARGS ARE: ($parser, $text; $repeating, $_noactions, \@args)
sub Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::triggered_action {
my $thisparser = $_[0];
use vars q{$tracelevel};
local $tracelevel = ($tracelevel || 0) + 1;
$ERRORS = 0;
my $thisrule = $thisparser->{"rules"}{"triggered_action"};
Parse::RecDescent::_trace(
q{Trying rule: [triggered_action]},
Parse::RecDescent::_tracefirst($_[1]),
q{triggered_action}, $tracelevel
) if defined $::RD_TRACE;
my $err_at = @{ $thisparser->{errors} };
my $score;
my $score_return;
my $_tok;
my $return = undef;
my $_matched = 0;
my $commit = 0;
my @item = ();
my %item = ();
my $repeating = defined($_[2]) && $_[2];
my $_noactions = defined($_[3]) && $_[3];
my @arg = defined $_[4] ? @{ &{ $_[4] } } : ();
my %arg = ($#arg & 01) ? @arg : (@arg, undef);
my $text;
my $lastsep = "";
my $expectation = new Parse::RecDescent::Expectation($thisrule->expected());
$expectation->at($_[1]);
my $thisline;
tie $thisline, q{Parse::RecDescent::LineCounter}, \$text, $thisparser;
while (!$_matched && !$commit) {
Parse::RecDescent::_trace(
q{Trying production: [when_clause SQL_procedure_statement]},
Parse::RecDescent::_tracefirst($_[1]),
q{triggered_action}, $tracelevel
) if defined $::RD_TRACE;
my $thisprod = $thisrule->{"prods"}[0];
$text = $_[1];
my $_savetext;
@item = (q{triggered_action});
%item = (__RULE__ => q{triggered_action});
my $repcount = 0;
Parse::RecDescent::_trace(
q{Trying repeated subrule: [when_clause]},
Parse::RecDescent::_tracefirst($text),
q{triggered_action}, $tracelevel
) if defined $::RD_TRACE;
$expectation->is(q{})->at($text);
unless (defined(
$_tok = $thisparser->_parserepeat(
$text, \&Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::when_clause,
0, 1, $_noactions, $expectation, undef
)
)) {
Parse::RecDescent::_trace(
q{<>},
Parse::RecDescent::_tracefirst($text),
q{triggered_action}, $tracelevel
) if defined $::RD_TRACE;
last;
}
Parse::RecDescent::_trace(
q{>>Matched repeated subrule: [when_clause]<< (} . @$_tok . q{ times)},
Parse::RecDescent::_tracefirst($text),
q{triggered_action},
$tracelevel
) if defined $::RD_TRACE;
$item{q{when_clause(?)}} = $_tok;
push @item, $_tok;
Parse::RecDescent::_trace(
q{Trying subrule: [SQL_procedure_statement]},
Parse::RecDescent::_tracefirst($text),
q{triggered_action}, $tracelevel
) if defined $::RD_TRACE;
if (1) {
no strict qw{refs};
$expectation->is(q{SQL_procedure_statement})->at($text);
unless (defined(
$_tok = Parse::RecDescent::SQL::Translator::Parser::DB2::Grammar::SQL_procedure_statement(
$thisparser, $text, $repeating, $_noactions, sub { \@arg }
)
)) {
Parse::RecDescent::_trace(
q{<