File Coverage

blib/lib/DBIx/OnlineDDL.pm
Criterion Covered Total %
statement 397 522 76.0
branch 47 140 33.5
condition 32 128 25.0
subroutine 52 63 82.5
pod 11 12 91.6
total 539 865 62.3


line stmt bran cond sub pod time code
1             package DBIx::OnlineDDL;
2              
3             our $AUTHORITY = 'cpan:GSG';
4             # ABSTRACT: Run DDL on online databases safely
5 2     2   1097734 use version;
  2         14  
  2         11  
6             our $VERSION = 'v1.0.0'; # VERSION
7              
8 2     2   137 use utf8;
  2         8  
  2         14  
9 2     2   691 use open qw(:utf8 :std);
  2         1796  
  2         8  
10              
11 2     2   188 use v5.10;
  2         6  
12 2     2   9 use Moo;
  2         2  
  2         14  
13 2     2   586 use MooX::StrictConstructor;
  2         3  
  2         17  
14              
15 2     2   1335 use Types::Standard qw( Str Bool HashRef CodeRef InstanceOf Dict Optional );
  2         3  
  2         22  
16 2     2   2987 use Types::Common::Numeric qw( PositiveNum PositiveInt );
  2         16652  
  2         10  
17              
18 2     2   1415 use Class::Load;
  2         15512  
  2         87  
19 2     2   645 use DBI::Const::GetInfoType;
  2         8683  
  2         184  
20 2     2   12 use DBIx::BatchChunker 0.92; # with stmt attrs
  2         34  
  2         34  
21 2     2   696 use Eval::Reversible;
  2         40778  
  2         80  
22 2     2   17 use List::Util 1.44 (qw( uniq any all first )); # 1.44 has uniq
  2         35  
  2         96  
23 2     2   14 use Sub::Util qw( subname set_subname );
  2         3  
  2         106  
24 2     2   10 use Term::ProgressBar 2.14; # with silent option
  2         25  
  2         55  
25              
26             # Don't export the above, but don't conflict with StrictConstructor, either
27 2     2   9 use namespace::clean -except => [qw< new meta >];
  2         3  
  2         12  
28              
29             my $DEFAULT_MAX_ATTEMPTS = 20;
30              
31             #pod =encoding utf8
32             #pod
33             #pod =head1 SYNOPSIS
34             #pod
35             #pod use DBIx::OnlineDDL;
36             #pod use DBIx::BatchChunker;
37             #pod
38             #pod DBIx::OnlineDDL->construct_and_execute(
39             #pod rsrc => $dbic_schema->source('Account'),
40             #pod ### OR ###
41             #pod dbi_connector => $dbix_connector_retry_object,
42             #pod table_name => 'accounts',
43             #pod
44             #pod coderef_hooks => {
45             #pod # This is the phase where the DDL is actually run
46             #pod before_triggers => \&drop_foobar,
47             #pod
48             #pod # Run other operations right before the swap
49             #pod # WARNING: DML only! No DDL here!
50             #pod before_swap => \&delete_deprecated_accounts,
51             #pod },
52             #pod
53             #pod process_name => 'Dropping foobar from accounts',
54             #pod
55             #pod copy_opts => {
56             #pod chunk_size => 5000,
57             #pod debug => 1,
58             #pod },
59             #pod );
60             #pod
61             #pod sub drop_foobar {
62             #pod my $oddl = shift;
63             #pod my $name = $oddl->new_table_name;
64             #pod my $qname = $oddl->dbh->quote_identifier($name);
65             #pod
66             #pod # Drop the 'foobar' column, since it is no longer used
67             #pod $oddl->dbh_runner_do("ALTER TABLE $qname DROP COLUMN foobar");
68             #pod }
69             #pod
70             #pod sub delete_deprecated_accounts {
71             #pod my $oddl = shift;
72             #pod my $name = $oddl->new_table_name;
73             #pod my $dbh = $oddl->dbh; # only use for quoting!
74             #pod
75             #pod my $qname = $dbh->quote_identifier($name);
76             #pod
77             #pod DBIx::BatchChunker->construct_and_execute(
78             #pod chunk_size => 5000,
79             #pod
80             #pod debug => 1,
81             #pod
82             #pod process_name => 'Deleting deprecated accounts',
83             #pod process_past_max => 1,
84             #pod
85             #pod dbic_storage => $oddl->rsrc->storage,
86             #pod min_stmt => "SELECT MIN(account_id) FROM $qname",
87             #pod max_stmt => "SELECT MAX(account_id) FROM $qname",
88             #pod stmt => join("\n",
89             #pod "DELETE FROM $qname",
90             #pod "WHERE",
91             #pod " account_type = ".$dbh->quote('deprecated')." AND",
92             #pod " account_id BETWEEN ? AND ?",
93             #pod ),
94             #pod );
95             #pod }
96             #pod
97             #pod =head1 DESCRIPTION
98             #pod
99             #pod This is a database utility class for running DDL operations (like C) safely
100             #pod on large tables. It has a similar scope as L, but is designed for
101             #pod DDL, rather than DML. It also has a similar function to other utilities like
102             #pod L or
103             #pod L, but actually works properly with foreign
104             #pod keys, and is written as a Perl module to hook directly into a DBI handle.
105             #pod
106             #pod Like most online schema change tools, this works by creating a new shell table that looks
107             #pod just like the old table, running the DDL changes (through the L hook),
108             #pod copying data to the new table, and swapping the tables. Triggers are created to keep the
109             #pod data in sync. See L for more information.
110             #pod
111             #pod The full operation is protected with an L via L.
112             #pod If any step in the process fails, the undo stack is run to return the DB back to normal.
113             #pod
114             #pod This module uses as many of the DBI info methods as possible, along with ANSI SQL in most
115             #pod places, to be compatible with multiple RDBMS. So far, it will work with MySQL or SQLite,
116             #pod but can be expanded to include more systems with a relatively small amount of code
117             #pod changes. (See L for details.)
118             #pod
119             #pod B You should not rely on this class to magically fix any and all locking
120             #pod problems the DB might experience just because it's being used. Thorough testing and
121             #pod best practices are still required.
122             #pod
123             #pod =head2 When you shouldn't use this module
124             #pod
125             #pod =head3 Online DDL is already available in the RDBMS
126             #pod
127             #pod If you're running MySQL 5.6+ without clustering, just use C for every DDL
128             #pod statement. It is seriously simple and guarantees that the table changes you make are not
129             #pod going to lock the table, or it will fail right away to tell you it's an incompatible
130             #pod change.
131             #pod
132             #pod If you're running something like Galera clusters, this typically wouldn't be an option,
133             #pod as it would lock up the clusters while the C statement is running, despite
134             #pod the C statement. (Galera clusters were the prime motivation for writing this
135             #pod module.)
136             #pod
137             #pod Other RDBMSs may have support for online DDL as well. Check the documentation first. If
138             #pod they don't, patches for this tool are welcome!
139             #pod
140             #pod =head3 The operation is small
141             #pod
142             #pod Does your DDL only take 2 seconds? Just do it! Don't bother with trying to swap tables
143             #pod around, wasting time with full table copies, etc. It's not worth the time spent or risk.
144             #pod
145             #pod =head3 When you actually want to run DML, not DDL
146             #pod
147             #pod L is more appropriate for running DML operations (like C,
148             #pod C, C). If you need to do both, you can use the L hook
149             #pod for DDL, and the L hook for DML. Or just run DBIx::BatchChunker after the
150             #pod OnlineDDL process is complete.
151             #pod
152             #pod =head3 Other online schema change tools fit your needs
153             #pod
154             #pod Don't have foreign key constraints and C is already working for you? Great!
155             #pod Keep using it.
156             #pod
157             #pod =head1 ATTRIBUTES
158             #pod
159             #pod =head2 DBIC Attributes
160             #pod
161             #pod =head3 rsrc
162             #pod
163             #pod A L. This will be the source used for all operations, DDL or
164             #pod otherwise. Optional, but recommended for DBIC users.
165             #pod
166             #pod The DBIC storage handler's C will be tweaked to ensure sane defaults and
167             #pod proper post-connection details.
168             #pod
169             #pod =cut
170              
171             has rsrc => (
172             is => 'ro',
173             isa => InstanceOf['DBIx::Class::ResultSource'],
174             required => 0,
175             );
176              
177             #pod =head3 dbic_retry_opts
178             #pod
179             #pod A hashref of DBIC retry options. These options control how retry protection works within
180             #pod DBIC. Right now, this is just limited to C, which controls the number of
181             #pod times to retry. The default C is 20.
182             #pod
183             #pod =cut
184              
185             has dbic_retry_opts => (
186             is => 'ro',
187             isa => HashRef,
188             required => 0,
189             default => sub { {} },
190             );
191              
192             #pod =head2 DBI Attributes
193             #pod
194             #pod =head3 dbi_connector
195             #pod
196             #pod A L object. Instead of L statement handles, this is the
197             #pod recommended non-DBIC way for OnlineDDL (and BatchChunker) to interface with the DBI, as
198             #pod it handles retries on failures. The connection mode used is whatever default is set
199             #pod within the object.
200             #pod
201             #pod Required, except for DBIC users, who should be setting L above. It is also
202             #pod assumed that the correct database is already active.
203             #pod
204             #pod The object will be tweaked to ensure sane defaults, proper post-connection details, a
205             #pod custom C, and set a default C of 20, if not already set.
206             #pod
207             #pod =cut
208              
209             has dbi_connector => (
210             is => 'ro',
211             isa => InstanceOf['DBIx::Connector::Retry'],
212             required => 0,
213             );
214              
215             #pod =head3 table_name
216             #pod
217             #pod The table name to be copied and eventually replaced. Required unless L is
218             #pod specified.
219             #pod
220             #pod =cut
221              
222             has table_name => (
223             is => 'ro',
224             isa => Str,
225             required => 1,
226             lazy => 1,
227             default => sub {
228             my $rsrc = shift->rsrc // return;
229             $rsrc->from;
230             },
231             );
232              
233             #pod =head3 new_table_name
234             #pod
235             #pod The new table name to be created, copied to, and eventually used as the final table.
236             #pod Optional.
237             #pod
238             #pod If not defined, a name will be created automatically. This might be the better route,
239             #pod since the default builder will search for an unused name in the DB right before OnlineDDL
240             #pod needs it.
241             #pod
242             #pod =cut
243              
244             has new_table_name => (
245             is => 'ro',
246             isa => Str,
247             required => 0,
248             lazy => 1,
249             builder => 1,
250             );
251              
252             sub _build_new_table_name {
253 28     28   8238 my $self = shift;
254 28         101 my $dbh = $self->dbh;
255 28         763256 my $vars = $self->_vars;
256              
257 28         245 my $catalog = $vars->{catalog};
258 28         87 my $schema = $vars->{schema};
259 28         496 my $orig_table_name = $self->table_name;
260              
261 28   50     407 my $escape = $dbh->get_info( $GetInfoType{SQL_SEARCH_PATTERN_ESCAPE} ) // '\\';
262              
263             return $self->_find_new_identifier(
264             "_${orig_table_name}_new" => set_subname('_new_table_name_finder', sub {
265 28     28   78 $dbh = shift;
266 28         836 my $like_expr = shift;
267 28         313 $like_expr =~ s/([_%])/$escape$1/g;
268              
269 28         231 $dbh->table_info($catalog, $schema, $like_expr)->fetchrow_array;
270 28         755 }),
271             'SQL_MAXIMUM_TABLE_NAME_LENGTH',
272             );
273             }
274              
275             #pod =head2 Progress Bar Attributes
276             #pod
277             #pod =head3 progress_bar
278             #pod
279             #pod The progress bar used for most of the process. A different one is used for the actual
280             #pod table copy with L, since that step takes longer.
281             #pod
282             #pod Optional. If the progress bar isn't specified, a default one will be created. If the
283             #pod terminal isn't interactive, the default L will be set to C to
284             #pod naturally skip the output.
285             #pod
286             #pod =cut
287              
288             has progress_bar => (
289             is => 'rw',
290             isa => InstanceOf['Term::ProgressBar'],
291             );
292              
293             sub _progress_bar_setup {
294 28     28   59 my $self = shift;
295 28         415 my $vars = $self->_vars;
296              
297 28         199 my $steps = 6 + scalar keys %{ $self->coderef_hooks };
  28         168  
298              
299 28   33     437 my $progress = $self->progress_bar || Term::ProgressBar->new({
300             name => $self->progress_name,
301             count => $steps,
302             ETA => 'linear',
303             silent => !(-t *STDERR && -t *STDIN), # STDERR is what {fh} is set to use
304             });
305              
306 28         49980 $vars->{progress_bar} = $progress;
307             }
308              
309             #pod =head3 progress_name
310             #pod
311             #pod A string used to assist in creating a progress bar. Ignored if L is
312             #pod already specified.
313             #pod
314             #pod This is the preferred way of customizing the progress bar without having to create one
315             #pod from scratch.
316             #pod
317             #pod =cut
318              
319             has progress_name => (
320             is => 'rw',
321             isa => Str,
322             required => 0,
323             lazy => 1,
324             default => sub {
325             my $table_name = shift->table_name;
326             'Altering'.($table_name ? " $table_name" : '');
327             },
328             );
329              
330             #pod =head2 Other Attributes
331             #pod
332             #pod =head3 coderef_hooks
333             #pod
334             #pod A hashref of coderefs. Each of these are used in different steps in the process. All
335             #pod of these are optional, but it is B that C is
336             #pod specified. Otherwise, you're not actually running any DDL and the table copy is
337             #pod essentially a no-op.
338             #pod
339             #pod All of these triggers pass the C object as the only argument. The
340             #pod L can be acquired from that and used in SQL statements. The L
341             #pod and L methods should be used to protect against disconnections or locks.
342             #pod
343             #pod There is room to add more hooks here, but only if there's a good reason to do so.
344             #pod (Running the wrong kind of SQL at the wrong time could be dangerous.) Create a GitHub
345             #pod issue if you can think of one.
346             #pod
347             #pod =head4 before_triggers
348             #pod
349             #pod This is called before the table triggers are applied. Your DDL should take place here,
350             #pod for a few reasons:
351             #pod
352             #pod 1. The table is empty, so DDL should take no time at all now.
353             #pod
354             #pod 2. After this hook, the table is reanalyzed to make sure it has an accurate picture
355             #pod of the new columns. This is critical for the creation of the triggers.
356             #pod
357             #pod =head4 before_swap
358             #pod
359             #pod This is called after the new table has been analyzed, but before the big table swap. This
360             #pod hook might be used if a large DML operation needs to be done while the new table is still
361             #pod available. If you use this hook, it's highly recommended that you use something like
362             #pod L to make sure the changes are made in a safe and batched manner.
363             #pod
364             #pod =cut
365              
366             has coderef_hooks => (
367             is => 'ro',
368             isa => Dict[
369             before_triggers => Optional[CodeRef],
370             before_swap => Optional[CodeRef],
371             ],
372             required => 0,
373             default => sub { +{} },
374             );
375              
376             #pod =head3 copy_opts
377             #pod
378             #pod A hashref of different options to pass to L, which is used in the
379             #pod L step. Some of these are defined automatically. It's recommended that you
380             #pod specify at least these options:
381             #pod
382             #pod chunk_size => 5000, # or whatever is a reasonable size for that table
383             #pod id_name => 'pk_id', # especially if there isn't an obvious integer PK
384             #pod
385             #pod Specifying L is not recommended, since Active DBI Processing
386             #pod mode will be used.
387             #pod
388             #pod These options will be included into the hashref, unless specifically overridden by key
389             #pod name:
390             #pod
391             #pod id_name => $first_pk_column, # will warn if the PK is multi-column
392             #pod target_time => 1,
393             #pod sleep => 0.5,
394             #pod
395             #pod # If using DBIC
396             #pod dbic_storage => $rsrc->storage,
397             #pod rsc => $id_rsc,
398             #pod dbic_retry_opts => {
399             #pod max_attempts => 20,
400             #pod # best not to change this, unless you know what you're doing
401             #pod retry_handler => $onlineddl_retry_handler,
402             #pod },
403             #pod
404             #pod # If using DBI
405             #pod dbi_connector => $oddl->dbi_connector,
406             #pod min_stmt => $min_sql,
407             #pod max_stmt => $max_sql,
408             #pod
409             #pod # For both
410             #pod count_stmt => $count_sql,
411             #pod stmt => $insert_select_sql,
412             #pod progress_name => $copying_msg,
413             #pod
414             #pod =cut
415              
416             has copy_opts => (
417             is => 'ro',
418             isa => HashRef,
419             required => 0,
420             lazy => 1,
421             default => sub { {} },
422             );
423              
424             # This is filled in during copy_rows, since the _column_list call needs to happen after
425             # the DDL has run.
426             sub _fill_copy_opts {
427 27     27   156 my $self = shift;
428 27         98 my $rsrc = $self->rsrc;
429 27         83 my $dbh = $self->dbh;
430 27         760253 my $vars = $self->_vars;
431              
432 27         624 my $copy_opts = $self->copy_opts;
433 27         638 my $helper = $self->_helper;
434              
435 27         268 my $catalog = $vars->{catalog};
436 27         81 my $schema = $vars->{schema};
437 27         423 my $orig_table_name = $self->table_name;
438 27         616 my $new_table_name = $self->new_table_name;
439              
440 27         369 my $orig_table_name_quote = $dbh->quote_identifier($orig_table_name);
441 27         1387 my $new_table_name_quote = $dbh->quote_identifier($new_table_name);
442              
443             # Sane defaults for timing
444 27   100     698 $copy_opts->{target_time} //= 1;
445             # Copies create lots of rapid I/O, binlog generation, etc. on the primary.
446             # Some sleep time gives other servers a chance to catch up:
447 27   100     232 $copy_opts->{sleep} //= 0.5;
448              
449             # Figure out what the id_name is going to be
450             my $id_name = $copy_opts->{id_name} //= $self->dbh_runner(run => set_subname '_pk_finder', sub {
451 13     13   37 $dbh = $_;
452 13         95 my @ids = $dbh->primary_key($catalog, $schema, $orig_table_name);
453              
454 13 50       16223 die "No primary key found for $orig_table_name" unless @ids;
455 13 50       51 warn "Using the first column of a multi-column primary key for $orig_table_name" if @ids > 1;
456              
457 13         54 $ids[0];
458 27   66     268 });
459              
460 27         154 my $id_name_quote = $dbh->quote_identifier($id_name);
461              
462 27 50       662 if ($rsrc) {
463 27   66     161 $copy_opts->{dbic_storage} //= $rsrc->storage;
464 27   66     808 $copy_opts->{rsc} //= $rsrc->resultset->get_column($id_name);
465              
466 27   100     14936 $copy_opts->{dbic_retry_opts} //= {};
467 27   66     499 $copy_opts->{dbic_retry_opts}{max_attempts} //= $DEFAULT_MAX_ATTEMPTS;
468 27     3   749 $copy_opts->{dbic_retry_opts}{retry_handler} = sub { $self->_retry_handler(@_) };
  3         30541  
469             }
470             else {
471 0   0     0 $copy_opts->{dbi_connector} //= $self->dbi_connector;
472 0   0     0 $copy_opts->{min_stmt} //= "SELECT MIN($id_name_quote) FROM $orig_table_name_quote";
473 0   0     0 $copy_opts->{max_stmt} //= "SELECT MAX($id_name_quote) FROM $orig_table_name_quote";
474             }
475              
476 27         130 my @column_list = $self->_column_list;
477 27         74 my $column_list_str = join(', ', map { $dbh->quote_identifier($_) } @column_list );
  94         1534  
478              
479             # The INSERT..SELECT is a bit different depending on the RDBMS used, mostly because
480             # of the IGNORE part
481 27         686 my $insert_select_stmt = $helper->insert_select_stmt($column_list_str);
482              
483 27   66     231 $copy_opts->{count_stmt} //= "SELECT COUNT(*) FROM $orig_table_name_quote WHERE $id_name_quote BETWEEN ? AND ?";
484 27   66     140 $copy_opts->{stmt} //= $insert_select_stmt;
485              
486 27 50 66     175 $copy_opts->{progress_name} //= "Copying $orig_table_name" unless $copy_opts->{progress_bar};
487              
488 27         124 return $copy_opts;
489             }
490              
491             #pod =head3 db_timeouts
492             #pod
493             #pod A hashref of timeouts used for various DB operations, and usually set at the beginning of
494             #pod each connection. Some of these settings may be RDBMS-specific.
495             #pod
496             #pod =head4 lock_file
497             #pod
498             #pod Amount of time (in seconds) to wait when attempting to acquire filesystem locks (on
499             #pod filesystems which support locking). Float or fractional values are allowed. This
500             #pod currently only applies to SQLite.
501             #pod
502             #pod Default value is 1 second. The downside is that the SQLite default is actually 0, so
503             #pod other (non-OnlineDDL) connections should have a setting that is more than that to prevent
504             #pod lock contention.
505             #pod
506             #pod =head4 lock_db
507             #pod
508             #pod Amount of time (in whole seconds) to wait when attempting to acquire table and/or database
509             #pod level locks before falling back to retry.
510             #pod
511             #pod Default value is 60 seconds.
512             #pod
513             #pod =head4 lock_row
514             #pod
515             #pod Amount of time (in whole seconds) to wait when attempting to acquire row-level locks,
516             #pod which apply to much lower-level operations than L. At this scope, the lesser
517             #pod of either of these two settings will take precedence.
518             #pod
519             #pod Default value is 2 seconds. Lower values are preferred for row lock wait timeouts, so
520             #pod that OnlineDDL is more likely to be the victim of lock contention. OnlineDDL can simply
521             #pod retry the connection at that point.
522             #pod
523             #pod =head4 session
524             #pod
525             #pod Amount of time (in whole seconds) for inactive session timeouts on the database side.
526             #pod
527             #pod Default value is 28,800 seconds (8 hours), which is MySQL's default.
528             #pod
529             #pod =cut
530              
531             has db_timeouts => (
532             is => 'ro',
533             isa => Dict[
534             lock_file => Optional[PositiveNum],
535             lock_db => Optional[PositiveInt],
536             lock_row => Optional[PositiveInt],
537             session => Optional[PositiveInt],
538             ],
539             required => 0,
540             );
541              
542             #pod =head3 reversible
543             #pod
544             #pod A L object, used for rollbacks. A default will be created, if not
545             #pod specified.
546             #pod
547             #pod =cut
548              
549             has reversible => (
550             is => 'rw',
551             isa => InstanceOf['Eval::Reversible'],
552             required => 1,
553             lazy => 1,
554             default => sub { Eval::Reversible->new },
555             );
556              
557             ### Private attributes
558              
559             has _vars => (
560             is => 'rw',
561             isa => HashRef,
562             required => 0,
563             init_arg => undef,
564             lazy => 1,
565             default => sub { {} },
566             );
567              
568             has _helper => (
569             is => 'ro',
570             isa => InstanceOf['DBIx::OnlineDDL::Helper::Base'],
571             required => 0,
572             init_arg => undef,
573             lazy => 1,
574             builder => '_build_helper',
575             );
576              
577             sub _build_helper {
578 28     28   408 my $self = shift;
579              
580 28         100 my $dbh = $self->dbh;
581              
582             # Get and store the DBMS_NAME. This is not the lowercase driver name (ie: mysql),
583             # unless the {Driver}{Name} alternative wins out.
584 28   33     791013 my $dbms_name = $self->_vars->{dbms_name} = $dbh->get_info( $GetInfoType{SQL_DBMS_NAME} ) // $dbh->{Driver}->{Name};
585              
586 28         1163 my $helper_class = "DBIx::OnlineDDL::Helper::$dbms_name";
587              
588             # Die if we can't load the RDBMS-specific class, since there's a lot of gaps in Base
589 28 50       187 die "OnlineDDL is not designed for $dbms_name systems yet!" unless Class::Load::load_optional_class($helper_class);
590              
591 28         3639 return $helper_class->new( online_ddl => $self );
592             }
593              
594             ### BUILD methods
595              
596             around BUILDARGS => sub {
597             my $next = shift;
598             my $class = shift;
599              
600             my %args = @_ == 1 ? %{ $_[0] } : @_;
601              
602             # Quick sanity checks
603             die 'A DBIC ResultSource (rsrc) or DBIx::Connector::Retry object (dbi_connector) is required' unless (
604             $args{rsrc} || $args{dbi_connector}
605             );
606              
607             $args{db_timeouts} //= {};
608             $args{coderef_hooks} //= {};
609             if (ref $args{db_timeouts} ne 'HASH' || ref $args{coderef_hooks} ne 'HASH') {
610             # Let Moo complain about the isa check
611             $class->$next( %args );
612             die "Should have failed an isa check...";
613             }
614              
615             unless ($args{coderef_hooks}{before_triggers}) {
616             warn
617             "No before_triggers hook appears to be defined. There may be a few reasons for this:\n\n".
618              
619             "1. Are you running DDL? If not, this may be the wrong tool for you.\n".
620             "2. Did you add DDL into the wrong coderef hook? This is widely regarded as a Very Bad Ideaâ„¢.\n".
621             "3. Are you intending to use this as an OPTIMIZE TABLE or ALTER TABLE FORCE operation? If so,\n".
622             " add in an empty coderef for before_triggers to silence this warning.\n"
623             ;
624             }
625              
626             # Defaults for db_timeouts (see POD above). We set these here, because each
627             # individual timeout should be checked to see if it's defined.
628             $args{db_timeouts}{lock_file} //= 1;
629             $args{db_timeouts}{lock_db} //= 60;
630             $args{db_timeouts}{lock_row} //= 2;
631             $args{db_timeouts}{session} //= 28_800;
632              
633             $class->$next( %args );
634             };
635              
636             sub BUILD {
637 28     28 0 1506 my $self = shift;
638 28         101 my $rsrc = $self->rsrc;
639              
640 28         127 my $dbh = $self->dbh;
641 28         34893 my $helper = $self->_helper;
642              
643             # Get the current catalog/schema
644 28         8209 my ($catalog, $schema) = $helper->current_catalog_schema;
645              
646 28         617 $self->_vars->{catalog} = $catalog;
647 28         575 $self->_vars->{schema} = $schema;
648              
649             # Add in the post-connection details
650 28         270 my @stmts = $helper->post_connection_stmts;
651              
652 28 50       105 if ($rsrc) {
653             ### DBIC Storage
654              
655 28         75 my @post_connection_details = map { [ do_sql => $_ ] } @stmts;
  84         198  
656              
657             # XXX: Tapping into a private attribute here, but it's a lot better than parsing
658             # $storage->connect_info. We are also not attaching these details to
659             # connect_info, so public introspection won't pick up our changes. Undecided
660             # whether this is good or bad...
661              
662 28         127 my $storage = $rsrc->storage;
663 28         1311 my $on_connect_call = $storage->_dbic_connect_attributes->{on_connect_call};
664              
665             # Parse on_connect_call to make sure we can add to it
666 28   33     120 my $ref = defined $on_connect_call && ref $on_connect_call;
667 28 0       93 unless ($on_connect_call) {
    0          
    0          
    50          
668 28         59 $on_connect_call = \@post_connection_details;
669             }
670 0         0 elsif (!$ref) {
671 0         0 $on_connect_call = [ [ do_sql => $on_connect_call ], @post_connection_details ];
672             }
673 0         0 elsif ($ref eq 'ARRAY') {
674             # Double-check that we're not repeating ourselves by inspecting the array for
675             # our own statements.
676             @$on_connect_call = grep {
677 0         0 my $e = $_;
  0         0  
678             !( # exclude any of ours
679             $e && ref $e && ref $e eq 'ARRAY' && @$e == 2 &&
680             $e->[0] && !ref $e->[0] && $e->[0] eq 'do_sql' &&
681 0   0 0   0 $e->[1] && !ref $e->[1] && (any { $e->[1] eq $_ } @stmts)
  0         0  
682             );
683             } @$on_connect_call;
684              
685 0         0 my $first_occ = $on_connect_call->[0];
686 0 0 0     0 if ($first_occ && ref $first_occ && ref $first_occ eq 'ARRAY') {
      0        
687 0         0 $on_connect_call = [ @$on_connect_call, @post_connection_details ];
688             }
689             else {
690 0         0 $on_connect_call = [ $on_connect_call, @post_connection_details ];
691             }
692             }
693 0         0 elsif ($ref eq 'CODE') {
694 0         0 $on_connect_call = [ $on_connect_call, @post_connection_details ];
695             }
696             else {
697 0         0 die "Illegal reftype $ref for on_connect_call connection attribute!";
698             }
699              
700             # Set the new options on the relevant attributes that Storage::DBI->connect_info touches.
701 28         78 $storage->_dbic_connect_attributes->{on_connect_call} = $on_connect_call;
702 28         116 $storage->on_connect_call($on_connect_call);
703             }
704             else {
705             ### DBIx::Connector::Retry (via DBI Callbacks)
706              
707 0         0 my $conn = $self->dbi_connector;
708 0         0 my $dbi_attrs = $conn->connect_info->[3];
709              
710             # Playing with refs, so no need to re-set connect_info
711 0 0       0 $conn->connect_info->[3] = $dbi_attrs = {} unless $dbi_attrs;
712              
713             # Make sure the basic settings are sane
714 0         0 $dbi_attrs->{AutoCommit} = 1;
715 0         0 $dbi_attrs->{RaiseError} = 1;
716              
717             # Add the DBI callback
718 0   0     0 my $callbacks = $dbi_attrs->{Callbacks} //= {};
719 0         0 my $package_re = quotemeta(__PACKAGE__.'::_dbi_connected_callback');
720              
721 0   0     0 my $ref = defined $callbacks->{connected} && ref $callbacks->{connected};
722 0 0       0 unless ($callbacks->{connected}) {
    0          
    0          
723             $callbacks->{connected} = set_subname '_dbi_connected_callback' => sub {
724 0     0   0 shift->do($_) for @stmts;
725 0         0 return;
726 0         0 };
727             }
728 0 0       0 elsif (!$ref || $ref ne 'CODE') {
729 0         0 die "Illegal reftype $ref for connected DBI Callback!";
730             }
731 0         0 elsif (subname($callbacks->{connected}) =~ /^$package_re/) { # allow for *_wrapped below
732             # This is one of our callbacks; leave it alone!
733             }
734             else {
735             # This is somebody else's callback; wrap around it
736 0         0 my $old_coderef = $callbacks->{connected};
737             $callbacks->{connected} = set_subname '_dbi_connected_callback_wrapped' => sub {
738 0     0   0 my $h = shift;
739 0         0 $old_coderef->($h);
740 0         0 $h->do($_) for @stmts;
741 0         0 return;
742 0         0 };
743             }
744              
745             # Add a proper retry_handler
746 0     0   0 $conn->retry_handler(sub { $self->_retry_handler(@_) });
  0         0  
747              
748             # And max_attempts. XXX: Maybe they actually wanted 10 and not just the default?
749 0 0       0 $conn->max_attempts($DEFAULT_MAX_ATTEMPTS) if $conn->max_attempts == 10;
750             }
751              
752             # Go ahead and run the post-connection statements for this session
753 28         208 $dbh->{AutoCommit} = 1;
754 28         179 $dbh->{RaiseError} = 1;
755 28         173 $dbh->do($_) for @stmts;
756             }
757              
758             #pod =head1 CONSTRUCTORS
759             #pod
760             #pod See L for information on what can be passed into these constructors.
761             #pod
762             #pod =head2 new
763             #pod
764             #pod my $online_ddl = DBIx::OnlineDDL->new(...);
765             #pod
766             #pod A standard object constructor. If you use this constructor, you will need to manually
767             #pod call L to execute the DB changes.
768             #pod
769             #pod You'll probably just want to use L.
770             #pod
771             #pod =head2 construct_and_execute
772             #pod
773             #pod my $online_ddl = DBIx::OnlineDDL->construct_and_execute(...);
774             #pod
775             #pod Constructs a DBIx::OnlineDDL object and automatically calls each method step, including
776             #pod hooks. Anything passed to this method will be passed through to the constructor.
777             #pod
778             #pod Returns the constructed object, post-execution. This is typically only useful if you want
779             #pod to inspect the attributes after the process has finished. Otherwise, it's safe to just
780             #pod ignore the return and throw away the object immediately.
781             #pod
782             #pod =cut
783              
784             sub construct_and_execute {
785 0     0 1 0 my $class = shift;
786 0         0 my $online_ddl = $class->new(@_);
787              
788 0         0 $online_ddl->execute;
789              
790 0         0 return $online_ddl;
791             }
792              
793             #pod =head1 METHODS
794             #pod
795             #pod =head2 Step Runners
796             #pod
797             #pod =head3 execute
798             #pod
799             #pod Runs all of the steps as documented in L. This also includes undo
800             #pod protection, in case of exceptions.
801             #pod
802             #pod =cut
803              
804             sub execute {
805 28     28 1 2230 my $self = shift;
806 28         616 my $reversible = $self->reversible;
807              
808 28         8420 $self->_progress_bar_setup;
809              
810             $reversible->run_reversibly(set_subname '_execute_part_one', sub {
811 28     28   4346 $self->create_new_table;
812 28         2556 $self->create_triggers;
813 27         3162 $self->copy_rows;
814 27         3145 $self->swap_tables;
815 28         402 });
816             $reversible->run_reversibly(set_subname '_execute_part_two', sub {
817 27     27   3388 $self->drop_old_table;
818 27         1916 $self->cleanup_foreign_keys;
819 27         4254 });
820             }
821              
822             #pod =head3 fire_hook
823             #pod
824             #pod $online_ddl->fire_hook('before_triggers');
825             #pod
826             #pod Fires one of the coderef hooks, if it exists. This also updates the progress bar.
827             #pod
828             #pod See L for more details.
829             #pod
830             #pod =cut
831              
832             sub fire_hook {
833 55     55 1 141 my ($self, $hook_name) = @_;
834              
835 55         181 my $hooks = $self->coderef_hooks;
836 55         736 my $vars = $self->_vars;
837              
838 55         368 my $progress = $vars->{progress_bar};
839              
840 55 100 66     421 return unless $hooks && $hooks->{$hook_name};
841              
842 29         168 $progress->message("Firing hook for $hook_name");
843              
844             # Fire the hook
845 29         1135 $hooks->{$hook_name}->($self);
846              
847 29         7374281 $progress->update;
848             }
849              
850             #pod =head2 DBI Helpers
851             #pod
852             #pod =head3 dbh
853             #pod
854             #pod $online_ddl->dbh;
855             #pod
856             #pod Acquires a database handle, either from L or L. Not recommended
857             #pod for active work, as it doesn't offer retry protection. Instead, use L or
858             #pod L.
859             #pod
860             #pod =cut
861              
862             sub dbh {
863 651     651   6373 my $self = shift;
864              
865             # Even acquiring a $dbh could die (eg: 'USE $db' or other pre-connect commands), so
866             # also try to wrap this in our retry handler.
867 651     651   3047 my $dbh = $self->dbh_runner( run => sub { $_[0] } );
  651         2122  
868 651         2042 return $dbh;
869             }
870              
871             #pod =head3 dbh_runner
872             #pod
873             #pod my @items = $online_ddl->dbh_runner(run => sub {
874             #pod my $dbh = $_; # or $_[0]
875             #pod $dbh->selectall_array(...);
876             #pod });
877             #pod
878             #pod Runs the C<$coderef>, locally setting C<$_> to and passing in the database handle. This
879             #pod is essentially a shortcut interface into either L or DBIC's L.
880             #pod
881             #pod The first argument can either be C or C, which controls whether to wrap the
882             #pod code in a DB transaction or not. The return is passed directly back, and return context
883             #pod is honored.
884             #pod
885             #pod =cut
886              
887             sub _retry_handler {
888 3     3   10 my ($self, $runner) = @_;
889 3         46 my $vars = $self->_vars;
890              
891             # NOTE: There's a lot of abusing the fact that BlockRunner and DBIx::Connector::Retry
892             # (a la $runner) share similar accessor interfaces.
893              
894 3         31 my $error = $runner->last_exception;
895 3         95 my $is_retryable = $self->_helper->is_error_retryable($error);
896              
897 3 50       53 if ($is_retryable) {
898 3         45 my ($failed, $max) = ($runner->failed_attempt_count, $runner->max_attempts);
899 3         24 my $progress = $vars->{progress_bar};
900              
901             # Warn about the last error
902 3 50       15 $progress->message("Encountered a recoverable error: $error") if $progress;
903              
904             # Pause for an incremental amount of seconds first, to discourage any future locks
905 3         3000645 sleep $failed;
906              
907             # If retries are escalating, try forcing a disconnect
908 3 50       74 if ($failed >= $max / 2) {
909             # Finally have some differences between the two classes...
910 0 0       0 if ($runner->isa('DBIx::Class::Storage::BlockRunner')) {
911 0         0 eval { $runner->storage->disconnect };
  0         0  
912             }
913             else {
914 0         0 eval { $runner->disconnect };
  0         0  
915             }
916             }
917              
918 3 50       125 $progress->message( sprintf(
919             "Attempt %u of %u", $failed, $max
920             ) ) if $progress;
921             }
922              
923 3         326 return $is_retryable;
924             }
925              
926             sub dbh_runner {
927 1209     1209 1 6271 my ($self, $method, $coderef) = @_;
928 1209         1918 my $wantarray = wantarray;
929              
930 1209 50       5635 die "Only 'txn' or 'run' are acceptable run methods" unless $method =~ /^(?:txn|run)$/;
931              
932 1209         1710 my @res;
933 1209 50       4258 if (my $rsrc = $self->rsrc) {
934             # No need to load BlockRunner, since DBIC loads it in before us if we're using
935             # this method.
936             my $block_runner = DBIx::Class::Storage::BlockRunner->new(
937             # defaults
938             max_attempts => $DEFAULT_MAX_ATTEMPTS,
939              
940             # never overrides the important ones below
941 1209         8163 %{ $self->dbic_retry_opts },
942              
943 0     0   0 retry_handler => sub { $self->_retry_handler(@_) },
944 1209 100       1703 storage => $rsrc->storage,
945             wrap_txn => ($method eq 'txn' ? 1 : 0),
946             );
947              
948             # This wrapping nonsense is necessary because Try::Tiny within BlockRunner has its own
949             # localization of $_. Fortunately, we can pass arguments to avoid closures.
950             my $wrapper = set_subname '_dbh_run_blockrunner_wrapper' => sub {
951 1209     1209   143880 my ($s, $c) = @_;
952 1209         4236 my $dbh = $s->rsrc->storage->dbh;
953              
954 1209         413628 local $_ = $dbh;
955 1209         3308 $c->($dbh); # also pass it in, because that's what DBIx::Connector does
956 1209         114196 };
957              
958             # BlockRunner can still die post-failure, if $storage->ensure_connected (which calls ping
959             # and tries to reconnect) dies. If that's the case, use our retry handler to check the new
960             # error message, and throw it back into BlockRunner.
961 1209         2517 my $br_method = 'run';
962 1209         18981 while ($block_runner->failed_attempt_count < $block_runner->max_attempts) {
963 1209         11077 eval {
964 1209 100       3198 unless (defined $wantarray) { $block_runner->$br_method($wrapper, $self, $coderef) }
  322 100       1224  
965 0         0 elsif ($wantarray) { @res = $block_runner->$br_method($wrapper, $self, $coderef) }
  167         708  
966 720         2374 else { $res[0] = $block_runner->$br_method($wrapper, $self, $coderef) }
967             };
968              
969             # 'run' resets failed_attempt_count, so subsequent attempts must use
970             # '_run', which does not
971 1209         2352122 $br_method = '_run';
972              
973 1209 50       3162 if (my $err = $@) {
974             # Time to really die
975 0 0 0     0 die $err if $err =~ /Reached max_attempts amount of / || $block_runner->failed_attempt_count >= $block_runner->max_attempts;
976              
977             # See if the retry handler likes it
978 0         0 push @{ $block_runner->exception_stack }, $err;
  0         0  
979 0         0 $block_runner->_set_failed_attempt_count( $block_runner->failed_attempt_count + 1 );
980 0 0       0 die $err unless $self->_retry_handler($block_runner);
981             }
982             else {
983 1209         7910 last;
984             }
985             }
986             }
987             else {
988 0         0 my $conn = $self->dbi_connector;
989 0 0       0 unless (defined $wantarray) { $conn->$method($coderef) }
  0 0       0  
990 0         0 elsif ($wantarray) { @res = $conn->$method($coderef) }
  0         0  
991 0         0 else { $res[0] = $conn->$method($coderef) }
992             }
993              
994 1209 100       4948 return $wantarray ? @res : $res[0];
995             }
996              
997             #pod =head3 dbh_runner_do
998             #pod
999             #pod $online_ddl->dbh_runner_do(
1000             #pod "ALTER TABLE $table_name ADD COLUMN foobar",
1001             #pod ["ALTER TABLE ? DROP COLUMN ?", undef, $table_name, 'baz'],
1002             #pod );
1003             #pod
1004             #pod Runs a list of commands, encapsulating each of them in a L coderef with calls
1005             #pod to L. This is handy when you want to run a list of DDL commands, which you don't
1006             #pod care about the output of, but don't want to bundle them into a single non-idempotant
1007             #pod repeatable coderef. Or if you want to save typing on a single do-able SQL command.
1008             #pod
1009             #pod The items can either be a SQL string or an arrayref of options to pass to L.
1010             #pod
1011             #pod The statement is assumed to be non-transactional. If you want to run a DB transaction,
1012             #pod you should use L instead.
1013             #pod
1014             #pod =cut
1015              
1016             sub dbh_runner_do {
1017 180     180 1 384311 my ($self, @commands) = @_;
1018              
1019 180         512 foreach my $command (@commands) {
1020 184         363 my $ref = ref $command;
1021 184 50 33     633 die "$ref references not valid in dbh_runner_do" if $ref && $ref ne 'ARRAY';
1022              
1023             $self->dbh_runner(run => set_subname '_dbh_runner_do', sub {
1024 184 50   184   1025 $_->do( $ref ? @$command : $command );
1025 184         1677 });
1026             }
1027             }
1028              
1029             #pod =head1 STEP METHODS
1030             #pod
1031             #pod You can call these methods individually, but using L instead is
1032             #pod highly recommended. If you do run these yourself, the exception will need to be caught
1033             #pod and the L undo stack should be run to get the DB back to normal.
1034             #pod
1035             #pod =head2 create_new_table
1036             #pod
1037             #pod Creates the new table, making sure to preserve as much of the original table properties
1038             #pod as possible.
1039             #pod
1040             #pod =cut
1041              
1042             sub create_new_table {
1043 28     28 1 66 my $self = shift;
1044 28         101 my $dbh = $self->dbh;
1045 28         783685 my $vars = $self->_vars;
1046              
1047 28         243 my $progress = $vars->{progress_bar};
1048 28         427 my $reversible = $self->reversible;
1049 28         600 my $helper = $self->_helper;
1050              
1051 28         610 my $orig_table_name = $self->table_name;
1052 28         602 my $new_table_name = $self->new_table_name;
1053              
1054 28         759 my $orig_table_name_quote = $dbh->quote_identifier($orig_table_name);
1055 28         1225 my $new_table_name_quote = $dbh->quote_identifier($new_table_name);
1056              
1057             # ANSI quotes could also appear in the statement
1058 28         635 my $orig_table_name_ansi_quote = '"'.$orig_table_name.'"';
1059              
1060 28         199 $progress->message("Creating new table $new_table_name");
1061              
1062 28         1247 my $table_sql = $helper->create_table_sql($orig_table_name);
1063 28 50       130 die "Table $orig_table_name does not exist in the database!" unless $table_sql;
1064              
1065 28 50       118 $table_sql = $helper->rename_fks_in_table_sql($orig_table_name, $table_sql) if $helper->dbms_uses_global_fk_namespace;
1066              
1067             # Change the old->new table name
1068 28         190 my $orig_table_name_quote_re = '('.join('|',
1069             quotemeta($orig_table_name_quote), quotemeta($orig_table_name_ansi_quote), quotemeta($orig_table_name)
1070             ).')';
1071 28         843 $table_sql =~ s/(?<=^CREATE TABLE )$orig_table_name_quote_re/$new_table_name_quote/;
1072              
1073             # NOTE: This SQL will still have the old table name in self-referenced FKs. This is
1074             # okay, since no supported RDBMS currently auto-renames the referenced table name
1075             # during table moves, and the old table is still the definitive point-of-record until
1076             # the table swap. Furthermore, pointing the FK to the new table may cause bad FK
1077             # constraint failures within the triggers, if the referenced ID hasn't been copied to
1078             # the new table yet.
1079             #
1080             # If we ever have a RDBMS that does some sort of auto-renaming of FKs, we'll need to
1081             # accommodate it. It's also worth noting that turning FKs on during the session can
1082             # actually affect this kind of behavior. For example, both MySQL & SQLite will rename
1083             # them during table swaps, but only if the FK checks are on.
1084              
1085             # Actually create the table
1086 28         126 $self->dbh_runner_do($table_sql);
1087              
1088             # Undo commands, including a failure warning update
1089 28         1050 $reversible->failure_warning("\nDropping the new table and rolling back to start!\n\n");
1090 28     1   1821 $reversible->add_undo(sub { $self->dbh_runner_do("DROP TABLE $new_table_name_quote") });
  1         203  
1091              
1092 28         1783 $progress->update;
1093             }
1094              
1095             #pod =head2 create_triggers
1096             #pod
1097             #pod Creates triggers on the original table to make sure any new changes are captured into the
1098             #pod new table.
1099             #pod
1100             #pod =cut
1101              
1102             sub create_triggers {
1103 28     28 1 106 my $self = shift;
1104 28         99 my $rsrc = $self->rsrc;
1105 28         111 my $dbh = $self->dbh;
1106 28         768100 my $vars = $self->_vars;
1107              
1108 28         245 my $progress = $vars->{progress_bar};
1109 28         437 my $reversible = $self->reversible;
1110 28         606 my $helper = $self->_helper;
1111              
1112 28         263 my $catalog = $vars->{catalog};
1113 28         73 my $schema = $vars->{schema};
1114 28         466 my $orig_table_name = $self->table_name;
1115 28         631 my $new_table_name = $self->new_table_name;
1116              
1117             # Fire the before_triggers hook, which would typically include the DDL
1118 28         316 $self->fire_hook('before_triggers');
1119              
1120 28         1455 $progress->message("Creating triggers");
1121              
1122             # This shouldn't be cached until now, since the actual DDL may change the column list
1123 28         952 my @column_list = $self->_column_list;
1124              
1125             ### Look for a unique ID set
1126              
1127             # We need to find a proper PK or unique constraint for UPDATE/DELETE triggers.
1128             # Unlike BatchChunker, we can't just rely on part of a PK.
1129 28         59 my @unique_ids;
1130 28         125 my $indexes = $self->_get_idx_hash($orig_table_name);
1131              
1132 28         52 my %potential_unique_ids;
1133 28         96 $potential_unique_ids{ $_->{name} } = $_ for grep { $_->{unique} } values %$indexes;
  90         289  
1134              
1135 28         89 my %column_set = map { $_ => 1 } @column_list;
  100         209  
1136 28         113 foreach my $index_name ('PRIMARY',
1137             # sort by the number of columns (asc), though PRIMARY still has top priority
1138 17         28 sort { scalar(@{ $potential_unique_ids{$a}{columns} }) <=> scalar(@{ $potential_unique_ids{$b}{columns} }) }
  17         30  
  17         42  
1139 64         160 grep { $_ ne 'PRIMARY' }
1140             keys %potential_unique_ids
1141             ) {
1142 64         94 my @unique_cols = @{ $potential_unique_ids{$index_name}{columns} };
  64         158  
1143 64 50       159 next unless @unique_cols;
1144              
1145             # Only use this set if all of the columns exist in both tables
1146 64 50   102   245 next unless all { $column_set{$_} } @unique_cols;
  102         202  
1147              
1148 64         193 @unique_ids = @unique_cols;
1149             }
1150              
1151 28 50       98 die "Cannot find an appropriate unique index for $orig_table_name!" unless @unique_ids;
1152              
1153             ### Check to make sure existing triggers aren't on the table
1154              
1155 28 100       186 die "Found conflicting triggers on $orig_table_name! Please remove them first, so that our INSERT/UPDATE/DELETE triggers can be applied."
1156             if $helper->has_conflicting_triggers_on_table($orig_table_name);
1157              
1158             ### Find a good set of trigger names
1159              
1160 27         142 foreach my $trigger_type (qw< INSERT UPDATE DELETE >) {
1161 81         1920 my $trigger_name = $helper->find_new_trigger_identifier(
1162             "${orig_table_name}_onlineddl_".lc($trigger_type)
1163             );
1164 81         441 $vars->{trigger_names} {$trigger_type} = $trigger_name;
1165 81         461 $vars->{trigger_names_quoted}{$trigger_type} = $dbh->quote_identifier($trigger_name);
1166             }
1167              
1168             ### Now create the triggers, with (mostly) ANSI SQL
1169              
1170 27         860 my $orig_table_name_quote = $dbh->quote_identifier($orig_table_name);
1171 27         565 my $new_table_name_quote = $dbh->quote_identifier($new_table_name);
1172              
1173 27         586 my $column_list_str = join(', ', map { $dbh->quote_identifier($_) } @column_list );
  94         1336  
1174 27         713 my $new_column_list_str = join(', ', map { "NEW.".$dbh->quote_identifier($_) } @column_list );
  94         1285  
1175              
1176 27         706 my $nseo = $helper->null_safe_equals_op;
1177 27         57 my %trigger_dml_stmts;
1178              
1179             # Using REPLACE just in case the row already exists from the copy
1180 27         180 $trigger_dml_stmts{replace} = join("\n",
1181             "REPLACE INTO $new_table_name_quote",
1182             " ($column_list_str)",
1183             "VALUES",
1184             " ($new_column_list_str)",
1185             );
1186              
1187             my $update_unique_where_str = join(' AND ',
1188             (map {
1189 27         73 join(
  46         824  
1190             # Use NULL-safe equals, since unique indexes could be nullable
1191             " $nseo ",
1192             "OLD.".$dbh->quote_identifier($_),
1193             "NEW.".$dbh->quote_identifier($_),
1194             );
1195             } @unique_ids)
1196             );
1197              
1198             my $delete_unique_where_str = join(' AND ',
1199             (map {
1200 27         1060 join(
  46         775  
1201             # Use NULL-safe equals, since unique indexes could be nullable
1202             " $nseo ",
1203             "$new_table_name_quote.".$dbh->quote_identifier($_),
1204             "OLD.".$dbh->quote_identifier($_),
1205             );
1206             } @unique_ids)
1207             );
1208              
1209             # For the UPDATE trigger, DELETE the row, but only if the unique IDs have been
1210             # changed. The "NOT ($update_unique_where_str)" part keeps from deleting rows where
1211             # the unique ID is untouched.
1212 27         1109 $trigger_dml_stmts{delete_for_update} = join("\n",
1213             "DELETE FROM $new_table_name_quote WHERE",
1214             " NOT ($update_unique_where_str) AND",
1215             " $delete_unique_where_str"
1216             );
1217              
1218 27         141 $trigger_dml_stmts{delete_for_delete} = join("\n",
1219             "DELETE FROM $new_table_name_quote WHERE",
1220             " $delete_unique_where_str"
1221             );
1222              
1223 27         155 $helper->modify_trigger_dml_stmts( \%trigger_dml_stmts );
1224              
1225 27         87 foreach my $trigger_type (qw< INSERT UPDATE DELETE >) {
1226             my $trigger_header = join(' ',
1227 81         3429 "CREATE TRIGGER ".$vars->{trigger_names_quoted}{$trigger_type},
1228             "AFTER $trigger_type ON $orig_table_name_quote FOR EACH ROW"
1229             );
1230              
1231             # Even though some of these are just a single SQL statement, not every RDBMS
1232             # (like SQLite) supports leaving out the BEGIN/END keywords.
1233 81         205 my $trigger_sql = join("\n",
1234             $trigger_header,
1235             "BEGIN",
1236             '',
1237             );
1238              
1239 81 100       362 if ($trigger_type eq 'INSERT') {
    100          
    50          
1240             # INSERT trigger: Just a REPLACE command
1241 27         86 $trigger_sql .= $trigger_dml_stmts{replace}.';';
1242             }
1243             elsif ($trigger_type eq 'UPDATE') {
1244             # UPDATE trigger: DELETE special unique ID changes, then another REPLACE command.
1245             $trigger_sql .= join("\n",
1246             $trigger_dml_stmts{delete_for_update}.';',
1247 27         124 $trigger_dml_stmts{replace}.';',
1248             );
1249             }
1250             elsif ($trigger_type eq 'DELETE') {
1251             # DELETE trigger: Just a DELETE command
1252 27         92 $trigger_sql .= $trigger_dml_stmts{delete_for_delete}.';';
1253             }
1254 81         141 $trigger_sql .= "\nEND";
1255              
1256             # DOIT!
1257 81         265 $self->dbh_runner_do($trigger_sql);
1258              
1259             $reversible->add_undo(sub {
1260 0     0   0 $self->dbh_runner_do( "DROP TRIGGER IF EXISTS ".$self->_vars->{trigger_names_quoted}{$trigger_type} );
1261 81         3235 });
1262             }
1263              
1264 27         1657 $progress->update;
1265             }
1266              
1267             #pod =head2 copy_rows
1268             #pod
1269             #pod Fires up a L process to copy all of the rows from the old table to
1270             #pod the new.
1271             #pod
1272             #pod =cut
1273              
1274             sub copy_rows {
1275 27     27 1 60 my $self = shift;
1276 27         123 my $dbh = $self->dbh;
1277 27         766654 my $vars = $self->_vars;
1278              
1279 27         318 my $progress = $vars->{progress_bar};
1280 27         128 my $copy_opts = $self->_fill_copy_opts;
1281              
1282 27         194 $progress->message("Copying all rows to the new table");
1283              
1284 27         1394 DBIx::BatchChunker->construct_and_execute( %$copy_opts );
1285 27         72687825 $vars->{new_table_copied} = 1;
1286              
1287             # Analyze the table, since we have a ton of new rows now
1288 27         116 $progress->message("Analyzing table");
1289 27         1283 $self->_helper->analyze_table( $self->new_table_name );
1290              
1291 27         213 $progress->update;
1292             }
1293              
1294             #pod =head2 swap_tables
1295             #pod
1296             #pod With the new table completely modified and set up, this swaps the old/new tables.
1297             #pod
1298             #pod =cut
1299              
1300             sub swap_tables {
1301 27     27 1 62 my $self = shift;
1302 27         113 my $dbh = $self->dbh;
1303 27         769161 my $vars = $self->_vars;
1304              
1305 27         244 my $progress = $vars->{progress_bar};
1306 27         445 my $reversible = $self->reversible;
1307 27         620 my $helper = $self->_helper;
1308              
1309 27         235 my $catalog = $vars->{catalog};
1310 27         69 my $schema = $vars->{schema};
1311 27         422 my $orig_table_name = $self->table_name;
1312 27         567 my $new_table_name = $self->new_table_name;
1313              
1314 27   50     350 my $escape = $dbh->get_info( $GetInfoType{SQL_SEARCH_PATTERN_ESCAPE} ) // '\\';
1315              
1316             # Fire the before_swap hook
1317 27         540 $self->fire_hook('before_swap');
1318              
1319 27 50 33     177 if ($helper->dbms_uses_global_fk_namespace || $helper->child_fks_need_adjusting) {
1320             # The existing parent/child FK list needs to be captured prior to the swap. The FKs
1321             # have already been created, and possibly changed/deleted, from the new table, so we
1322             # use that as reference. They have *not* been re-created on the child tables, so
1323             # the original table is used as reference.
1324 0   0     0 my $fk_hash = $vars->{foreign_keys}{definitions} //= {};
1325             $self->dbh_runner(run => set_subname '_fk_parent_info_query', sub {
1326 0     0   0 $fk_hash->{parent} = $self->_fk_info_to_hash( $helper->foreign_key_info(undef, undef, undef, $catalog, $schema, $new_table_name) );
1327 0         0 });
1328             $self->dbh_runner(run => set_subname '_fk_child_info_query', sub {
1329 0     0   0 $fk_hash->{child} = $self->_fk_info_to_hash( $helper->foreign_key_info($catalog, $schema, $orig_table_name, undef, undef, undef) );
1330 0         0 });
1331              
1332             # Furthermore, we should capture the indexes from parent/child tables in case the data
1333             # is needed for FK cleanup
1334 0   0     0 my $idx_hash = $vars->{indexes}{definitions} //= {};
1335 0 0 0     0 if ($dbh->can('statistics_info') && %$fk_hash) {
1336 0         0 foreach my $fk_table_name (
1337             uniq sort
1338 0 0 0     0 grep { defined && $_ ne $orig_table_name && $_ ne $new_table_name }
1339 0         0 map { ($_->{pk_table_name}, $_->{fk_table_name}) }
1340 0         0 (values %{$fk_hash->{parent}}, values %{$fk_hash->{child}})
  0         0  
1341             ) {
1342 0         0 $idx_hash->{$fk_table_name} = $self->_get_idx_hash($fk_table_name);
1343             }
1344             }
1345             }
1346              
1347             # Find an "_old" table name first
1348             my $old_table_name = $vars->{old_table_name} = $self->_find_new_identifier(
1349             "_${orig_table_name}_old" => set_subname('_old_table_name_finder', sub {
1350 27     27   101 my ($d, $like_expr) = @_;
1351 27         316 $like_expr =~ s/([_%])/$escape$1/g;
1352              
1353 27         222 $d->table_info($catalog, $schema, $like_expr)->fetchrow_array;
1354 27         366 }),
1355             'SQL_MAXIMUM_TABLE_NAME_LENGTH',
1356             );
1357 27         259 my $old_table_name_quote = $dbh->quote_identifier($old_table_name);
1358              
1359 27         1375 $progress->message("Swapping tables ($new_table_name --> $orig_table_name --> $old_table_name)");
1360              
1361             # Let's swap tables!
1362 27         1371 $helper->swap_tables($new_table_name, $orig_table_name, $old_table_name);
1363              
1364             # Kill the undo stack now, just in case something weird happens between now and the
1365             # end of the reversibly block. We've reached a "mostly successful" state, so rolling
1366             # back here would be undesirable.
1367 27         1308 $reversible->clear_undo;
1368 27         2093 $vars->{new_table_swapped} = 1;
1369              
1370 27         199 $progress->update;
1371             }
1372              
1373             #pod =head2 drop_old_table
1374             #pod
1375             #pod Drops the old table. This will also remove old foreign keys on child tables. (Those FKs
1376             #pod are re-applied to the new table in the next step.)
1377             #pod
1378             #pod =cut
1379              
1380             sub drop_old_table {
1381 27     27 1 76 my $self = shift;
1382 27         161 my $dbh = $self->dbh;
1383 27         661239 my $vars = $self->_vars;
1384              
1385 27         233 my $progress = $vars->{progress_bar};
1386 27         442 my $reversible = $self->reversible;
1387 27         607 my $helper = $self->_helper;
1388              
1389 27         238 my $old_table_name = $vars->{old_table_name};
1390 27         167 my $old_table_name_quote = $dbh->quote_identifier($old_table_name);
1391              
1392 27         1362 $reversible->failure_warning( join "\n",
1393             '',
1394             "The new table has been swapped, but since the process was interrupted, foreign keys will",
1395             "need to be cleaned up, and the old table dropped.",
1396             '',
1397             );
1398              
1399             # The RDBMS may complain about dangling non-functional FKs if the DROP happens first,
1400             # so let's remove those child FKs first, and reapply them later. We turn off FK
1401             # checks, so these constraint drops are quick and low risk.
1402             #
1403             # SQLite doesn't actually support DROP CONSTRAINT, but it doesn't do any messy business with
1404             # FK renames, either. So, SQLite can just skip this step.
1405 27 50       978 if ($helper->child_fks_need_adjusting) {
1406 0         0 $progress->message("Removing FKs from child tables");
1407              
1408 0         0 $self->dbh_runner_do(
1409             $helper->remove_fks_from_child_tables_stmts
1410             );
1411             }
1412              
1413             # Now, the actual DROP
1414 27         186 $progress->message("Dropping old table $old_table_name");
1415              
1416 27         1151 $self->dbh_runner_do("DROP TABLE $old_table_name_quote");
1417              
1418 27         190 $progress->update;
1419             }
1420              
1421             #pod =head2 cleanup_foreign_keys
1422             #pod
1423             #pod Clean up foreign keys on both the new and child tables.
1424             #pod
1425             #pod =cut
1426              
1427             sub cleanup_foreign_keys {
1428 27     27 1 63 my $self = shift;
1429 27         114 my $dbh = $self->dbh;
1430 27         669670 my $vars = $self->_vars;
1431              
1432 27         227 my $progress = $vars->{progress_bar};
1433 27         409 my $reversible = $self->reversible;
1434 27         713 my $helper = $self->_helper;
1435              
1436 27         608 $reversible->failure_warning( join "\n",
1437             '',
1438             "The new table is live, but since the process was interrupted, foreign keys will need to be",
1439             "cleaned up.",
1440             '',
1441             );
1442              
1443 27 50       909 if ($helper->dbms_uses_global_fk_namespace) {
1444             # The DB has global namespaces for foreign keys, so we are renaming them back to
1445             # their original names. The original table has already been dropped, so there's
1446             # no more risk of bumping into that namespace.
1447 0         0 $progress->message("Renaming parent FKs back to the original constraint names");
1448              
1449 0         0 $self->dbh_runner_do(
1450             $helper->rename_fks_back_to_original_stmts
1451             );
1452             }
1453              
1454 27 50       86 if ($helper->child_fks_need_adjusting) {
1455             # Since we captured the child FK names prior to the swap, they should have the
1456             # original FK names, even before MySQL's "helpful" changes on "${tbl_name}_ibfk_" FK
1457             # names.
1458 0         0 $progress->message("Adding FKs back on child tables");
1459              
1460 0         0 $self->dbh_runner_do(
1461             $helper->add_fks_back_to_child_tables_stmts
1462             );
1463              
1464             # The RDBMS may need some post-FK cleanup
1465 0         0 $progress->message("Post-FK cleanup");
1466              
1467 0         0 $self->dbh_runner_do(
1468             $helper->post_fk_add_cleanup_stmts
1469             );
1470             }
1471              
1472 27         152 $progress->update;
1473             }
1474              
1475             ### Private methods
1476              
1477             sub _find_new_identifier {
1478 139     139   2581 my ($self, $desired_identifier, $finder_sub, $length_info_str) = @_;
1479 139   100     672 $length_info_str ||= 'SQL_MAXIMUM_IDENTIFIER_LENGTH';
1480              
1481 139         244 state $hash_digits = ['a' .. 'z', '0' .. '9'];
1482              
1483 139         366 my $hash = join '', map { $hash_digits->[rand @$hash_digits] } 1 .. 10;
  1390         3037  
1484              
1485             # Test out some potential names
1486 139         843 my @potential_names = (
1487             $desired_identifier, "_${desired_identifier}",
1488             "${desired_identifier}_${hash}", "_${desired_identifier}_${hash}",
1489             $hash, "_${hash}"
1490             );
1491              
1492 139   50     493 my $max_len = $self->dbh->get_info( $GetInfoType{$length_info_str} ) || 256;
1493              
1494 139         3837918 my $new_name;
1495 139         393 foreach my $potential_name (@potential_names) {
1496 139         453 $potential_name = substr($potential_name, 0, $max_len); # avoid the ID name character limit
1497              
1498             my @results = $self->dbh_runner(run => set_subname '_find_new_identifier_dbh_runner', sub {
1499 139     139   627 $finder_sub->($_, $potential_name);
1500 139         1460 });
1501              
1502             # Skip if we found it
1503 139 50       658 next if @results;
1504              
1505 139         311 $new_name = $potential_name;
1506 139         261 last;
1507             }
1508              
1509             # This really shouldn't happen...
1510 139 50       425 die "Cannot find a proper identifier name for $desired_identifier! All of them are taken!" unless defined $new_name;
1511              
1512 139         1302 return $new_name;
1513             }
1514              
1515             sub _column_list {
1516 55     55   116 my $self = shift;
1517 55         183 my $dbh = $self->dbh;
1518 55         1521890 my $vars = $self->_vars;
1519              
1520 55         479 my $catalog = $vars->{catalog};
1521 55         155 my $schema = $vars->{schema};
1522 55         830 my $orig_table_name = $self->table_name;
1523 55         1156 my $new_table_name = $self->new_table_name;
1524              
1525 55         479 my (@old_column_list, @new_column_list);
1526             $self->dbh_runner(run => set_subname '_column_list_runner', sub {
1527 55     55   173 $dbh = $_;
1528             @old_column_list =
1529 194         73051 map { $_->{COLUMN_NAME} }
1530 55         149 @{ $dbh->column_info( $catalog, $schema, $orig_table_name, '%' )->fetchall_arrayref({ COLUMN_NAME => 1 }) }
  55         401  
1531             ;
1532             @new_column_list =
1533 222         52565 map { $_->{COLUMN_NAME} }
1534 55         206 @{ $dbh->column_info( $catalog, $schema, $new_table_name, '%' )->fetchall_arrayref({ COLUMN_NAME => 1 }) }
  55         274  
1535             ;
1536 55         659 });
1537              
1538             # We only care about columns that exist in both tables. If a column was added on the
1539             # new table, there's no data to copy. If a column was deleted from the new table, we
1540             # don't care about keeping it.
1541 55         315 my %new_column_set = map { $_ => 1 } @new_column_list;
  222         568  
1542 55         158 return grep { $new_column_set{$_} } @old_column_list;
  194         466  
1543             }
1544              
1545             sub _get_idx_hash {
1546 28     28   78 my ($self, $table_name) = @_;
1547              
1548 28         682 my $vars = $self->_vars;
1549 28         265 my $catalog = $vars->{catalog};
1550 28         81 my $schema = $vars->{schema};
1551              
1552             my %idxs = (
1553             PRIMARY => {
1554             name => 'PRIMARY',
1555             columns => [ $self->dbh_runner(run => set_subname '_pk_info_query', sub {
1556 28     28   280 $_->primary_key($catalog, $schema, $table_name)
1557 28         338 }) ],
1558             unique => 1,
1559             },
1560             );
1561 28 50       105 delete $idxs{PRIMARY} unless @{ $idxs{PRIMARY}{columns} };
  28         126  
1562              
1563 28 50       126 return \%idxs unless $self->dbh->can('statistics_info');
1564              
1565             # Sometimes, this still dies, even with the 'can' check (eg: older DBD::mysql drivers)
1566 28         776828 my $index_stats = [];
1567 28         62 eval {
1568             $index_stats = $self->dbh_runner(run => set_subname '_idx_info_query', sub {
1569 28     28   245 $_->statistics_info($catalog, $schema, $table_name, 0, 1)->fetchall_arrayref({});
1570 28         324 });
1571             };
1572 28 50       177 $index_stats = [] if $@;
1573              
1574 28         94 foreach my $index_name (uniq map { $_->{INDEX_NAME} } @$index_stats) {
  96         316  
1575 62     186   385 my $index_stat = first { $_->{INDEX_NAME} eq $index_name } @$index_stats;
  186         289  
1576             my @cols =
1577 96         198 map { $_->{COLUMN_NAME} }
1578 38         111 sort { $a->{ORDINAL_POSITION} <=> $b->{ORDINAL_POSITION} }
1579 62         162 grep { $_->{INDEX_NAME} eq $index_name }
  314         602  
1580             @$index_stats
1581             ;
1582             $idxs{$index_name} = {
1583             name => $index_name,
1584             columns => \@cols,
1585             unique => !$index_stat->{NON_UNIQUE},
1586 62         264 };
1587             }
1588              
1589 28         216 return \%idxs;
1590             }
1591              
1592             sub _fk_info_to_hash {
1593 0     0     my ($self, $fk_sth) = @_;
1594 0           my $vars = $self->_vars;
1595 0           my $dbh = $self->dbh;
1596              
1597             # NOTE: Need to account for alternate ODBC names
1598              
1599 0           my @fk_rows = @{ $fk_sth->fetchall_arrayref({}) };
  0            
1600             @fk_rows = sort {
1601             # Sort by FK name, then by the column sequence number
1602 0           $a->{FK_NAME} cmp $b->{FK_NAME} ||
1603             ($a->{KEY_SEQ} // $a->{ORDINAL_POSITION}) <=> ($a->{KEY_SEQ} // $a->{ORDINAL_POSITION})
1604 0 0 0       } @fk_rows;
      0        
1605              
1606 0           my (%fks, %create_table_sql);
1607 0           foreach my $row (@fk_rows) {
1608             # Some of these rows aren't even FKs
1609 0 0 0       next unless $row->{PKTABLE_NAME} || $row->{UK_TABLE_CAT};
1610 0 0 0       next unless $row->{FKTABLE_NAME} || $row->{FK_TABLE_NAME};
1611              
1612 0   0       my $fk_name = $row->{FK_NAME} // $row->{FKCOLUMN_NAME};
1613 0   0       my $fk_table_name = $row->{FKTABLE_NAME} // $row->{FK_TABLE_NAME};
1614              
1615             my $key = join( '.',
1616             $row->{PKTABLE_NAME} // $row->{UK_TABLE_CAT},
1617 0   0       $fk_name,
1618             );
1619              
1620             # Since there may be multiple columns per FK, those associated columns are
1621             # arrayrefs.
1622 0 0         unless ($fks{$key}) {
1623              
1624             $fks{$key} = {
1625             fk_name => $fk_name,
1626              
1627             # The table where the original PK exists
1628             pk_table_name => $row->{PKTABLE_NAME} // $row->{UK_TABLE_CAT},
1629             pk_columns => [ $row->{PKCOLUMN_NAME} // $row->{UK_COLUMN_NAME} ],
1630              
1631             # The table where the FK constraint has been declared
1632             fk_table_name => $fk_table_name,
1633 0   0       fk_columns => [ $row->{FKCOLUMN_NAME} // $row->{FK_COLUMN_NAME} ],
      0        
      0        
1634             };
1635              
1636             # Sadly, foreign_key_info doesn't always fill in all of the details for the FK, so the
1637             # CREATE TABLE SQL is actually the better record. Fortunately, this is all ANSI SQL.
1638 0   0       my $create_table_sql = $create_table_sql{$fk_table_name} //= $self->_helper->create_table_sql($fk_table_name);
1639 0           my $fk_name_quote_re = '(?:'.join('|',
1640             quotemeta( $dbh->quote_identifier($fk_name) ), quotemeta('"'.$fk_name.'"'), quotemeta($fk_name)
1641             ).')';
1642              
1643 0 0         if ($create_table_sql =~ m<
1644             CONSTRAINT \s $fk_name_quote_re \s ( # start capture of full SQL
1645             FOREIGN \s KEY \s \( [^\)]+ \) \s # "FOREIGN KEY" plus column list (which we already have above)
1646             REFERENCES \s [^\(]+ \s \( [^\)]+ \) # "REFERENCES" plus table+column list (again, already captured above)
1647             \s? ( [^\)\,]* ) # ON DELETE/UPDATE, DEFER, MATCH, etc.
1648             ) # end capture of full SQL
1649             >isx) {
1650 0           my ($fk_sql, $extra_sql) = ($1, $2);
1651 0           $fk_sql =~ s/^\s+|\s+$//g;
1652              
1653 0           $fks{$key}{fk_sql} = $fk_sql;
1654 0 0         $fks{$key}{delete_rule} = $1 if $extra_sql =~ /ON DELETE ((?:SET |NO )?\w+)/i;
1655 0 0         $fks{$key}{update_rule} = $1 if $extra_sql =~ /ON UPDATE ((?:SET |NO )?\w+)/i;
1656 0 0         $fks{$key}{defer} = $1 if $extra_sql =~ /((?:NOT )?DEFERRABLE(?: INITIALLY \w+)?)/i;
1657 0 0         $fks{$key}{match} = $1 if $extra_sql =~ /(MATCH \w+)/i;
1658             }
1659             }
1660             else {
1661 0   0       push @{ $fks{$key}{pk_columns} }, $row->{PKCOLUMN_NAME} // $row->{UK_COLUMN_NAME};
  0            
1662 0   0       push @{ $fks{$key}{fk_columns} }, $row->{FKCOLUMN_NAME} // $row->{FK_COLUMN_NAME};
  0            
1663             }
1664             }
1665              
1666 0           return \%fks;
1667             }
1668              
1669             sub _fk_to_sql {
1670 0     0     my ($self, $fk) = @_;
1671 0           my $dbh = $self->dbh;
1672              
1673             # Everything after the CONSTRAINT keyword (ANSI SQL)
1674              
1675 0 0         if ($fk->{fk_sql}) {
1676             # Already have most of the SQL
1677             return join(' ',
1678             $dbh->quote_identifier($fk->{fk_name}),
1679             $fk->{fk_sql},
1680 0           );
1681             }
1682              
1683             return join(' ',
1684             $dbh->quote_identifier($fk->{fk_name}),
1685             'FOREIGN KEY',
1686 0           '('.join(', ', map { $dbh->quote_identifier($_) } @{ $fk->{fk_columns} }).')',
  0            
1687             'REFERENCES',
1688             $dbh->quote_identifier($fk->{pk_table_name}),
1689 0           '('.join(', ', map { $dbh->quote_identifier($_) } @{ $fk->{pk_columns} }).')',
  0            
1690             ( $fk->{match} ? $fk->{match} : () ),
1691             ( $fk->{delete_rule} ? 'ON DELETE '.$fk->{delete_rule} : () ),
1692             ( $fk->{update_rule} ? 'ON UPDATE '.$fk->{update_rule} : () ),
1693 0 0         ( $fk->{defer} ? $fk->{defer} : () ),
    0          
    0          
    0          
1694             );
1695             }
1696              
1697             #pod =head1 SEE ALSO
1698             #pod
1699             #pod =over
1700             #pod
1701             #pod =item *
1702             #pod
1703             #pod L
1704             #pod
1705             #pod =item *
1706             #pod
1707             #pod L
1708             #pod
1709             #pod =item *
1710             #pod
1711             #pod L
1712             #pod
1713             #pod =item *
1714             #pod
1715             #pod L
1716             #pod
1717             #pod =back
1718             #pod
1719             #pod =head1 WHY YET ANOTHER OSC?
1720             #pod
1721             #pod The biggest reason is that none of the above fully support foreign key constraints.
1722             #pod Percona's C comes close, but also includes this paragraph:
1723             #pod
1724             #pod Due to a limitation in MySQL, foreign keys will not have the same names after the ALTER
1725             #pod that they did prior to it. The tool has to rename the foreign key when it redefines it,
1726             #pod which adds a leading underscore to the name. In some cases, MySQL also automatically
1727             #pod renames indexes required for the foreign key.
1728             #pod
1729             #pod So, tables swapped with C are not exactly what they used to be before the swap.
1730             #pod It also had a number of other quirks that just didn't work out for us, related to FKs and
1731             #pod the amount of switches required to make it (semi-)work.
1732             #pod
1733             #pod Additionally, by making DBIx::OnlineDDL its own Perl module, it's a lot easier to run
1734             #pod Perl-based schema changes along side L without having to switch
1735             #pod between Perl and CLI. If other people want to subclass this module for their own
1736             #pod environment-specific quirks, they have the power to do so, too.
1737             #pod
1738             #pod =cut
1739              
1740             1;
1741              
1742             __END__