| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
package DBIx::DataStore; |
|
2
|
|
|
|
|
|
|
$DBIx::DataStore::VERSION = '0.097'; |
|
3
|
2
|
|
|
2
|
|
178147
|
use strict; |
|
|
2
|
|
|
|
|
5
|
|
|
|
2
|
|
|
|
|
61
|
|
|
4
|
2
|
|
|
2
|
|
9
|
use warnings; |
|
|
2
|
|
|
|
|
2
|
|
|
|
2
|
|
|
|
|
61
|
|
|
5
|
|
|
|
|
|
|
|
|
6
|
|
|
|
|
|
|
# ABSTRACT: Abstracts away oft-repeated parts of DBI and simplifies the way SQL is issued. |
|
7
|
|
|
|
|
|
|
|
|
8
|
2
|
|
|
2
|
|
3312
|
use DBI; |
|
|
2
|
|
|
|
|
33427
|
|
|
|
2
|
|
|
|
|
15784
|
|
|
9
|
|
|
|
|
|
|
|
|
10
|
|
|
|
|
|
|
=head1 NAME |
|
11
|
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
DBIx::DataStore |
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
=head1 DESCRIPTION |
|
15
|
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
DBIx::DataStore is designed to abstract away the oft-repeated parts of DBI and to |
|
17
|
|
|
|
|
|
|
simplify the way you issue SQL statements to your database(s). |
|
18
|
|
|
|
|
|
|
|
|
19
|
|
|
|
|
|
|
=head1 EXAMPLE |
|
20
|
|
|
|
|
|
|
|
|
21
|
|
|
|
|
|
|
As a fairly contrived example, below is a chunk of code using this module in a |
|
22
|
|
|
|
|
|
|
relatively simple way. It starts by instantiating a new DBIx::DataStore object |
|
23
|
|
|
|
|
|
|
connected to the "commerce" data store. It then issues a single SELECT query, |
|
24
|
|
|
|
|
|
|
with the optional paginating feature turned on and set to the first page of |
|
25
|
|
|
|
|
|
|
results. It then gets a Data::Page object through the pager() method and loops |
|
26
|
|
|
|
|
|
|
over the first page's worth of results from the database to print them. |
|
27
|
|
|
|
|
|
|
|
|
28
|
|
|
|
|
|
|
use DBIx::DataStore ( config => 'yaml' ); |
|
29
|
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
my $db = DBIx::DataStore->new('commerce'); |
|
31
|
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
my $results = $db->do({ page => 1, per_page => 15 }, q{ |
|
33
|
|
|
|
|
|
|
select p.name, p.price, c.name as category |
|
34
|
|
|
|
|
|
|
from products p |
|
35
|
|
|
|
|
|
|
join product_categories pc on (pc.produc_id = p.id) |
|
36
|
|
|
|
|
|
|
join categories c on (c.id = pc.category_id) |
|
37
|
|
|
|
|
|
|
where c.id in ??? |
|
38
|
|
|
|
|
|
|
and p.price between ? and ? |
|
39
|
|
|
|
|
|
|
order by p.price desc, p.name asc |
|
40
|
|
|
|
|
|
|
}, [2,3,5], 17, 23); |
|
41
|
|
|
|
|
|
|
|
|
42
|
|
|
|
|
|
|
my $pager = $results->pager; |
|
43
|
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
while ($results->next) { |
|
45
|
|
|
|
|
|
|
print sprintf("%s was found in category %s for \$%.2f.\n", |
|
46
|
|
|
|
|
|
|
@{$results}{qw( name category price )}); |
|
47
|
|
|
|
|
|
|
} |
|
48
|
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
print sprintf("\nShowing %d to %d of %d total results.\n", |
|
50
|
|
|
|
|
|
|
$pager->first, $pager->last, $pager->total_entries); |
|
51
|
|
|
|
|
|
|
|
|
52
|
|
|
|
|
|
|
And here is what the output from that code might look like. |
|
53
|
|
|
|
|
|
|
|
|
54
|
|
|
|
|
|
|
Golden Apple was found in category Food for $22.24. |
|
55
|
|
|
|
|
|
|
Mermaid Statue was found in category Artwork for $17.76. |
|
56
|
|
|
|
|
|
|
Norton's Epaulets was found in category Clothing for $17.76. |
|
57
|
|
|
|
|
|
|
|
|
58
|
|
|
|
|
|
|
Showing 1 to 3 of 3 total results. |
|
59
|
|
|
|
|
|
|
|
|
60
|
|
|
|
|
|
|
=head2 IMPORTANT NOTICE |
|
61
|
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
This is the legacy release of DBIx::DataStore and has a low chance of seeing |
|
63
|
|
|
|
|
|
|
future (non-critical bug fix) releases. It is being published for the primary |
|
64
|
|
|
|
|
|
|
purpose of easing the maintenance of existing installations. |
|
65
|
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
Future versions of this module will make attempts to maintain as much backwards |
|
67
|
|
|
|
|
|
|
compatibility as possible, but there are no guarantees that every feature or |
|
68
|
|
|
|
|
|
|
method will carry over unchanged from the user perspective. It is recommended |
|
69
|
|
|
|
|
|
|
that if you do build something around this module that you pin to pre-1.0 |
|
70
|
|
|
|
|
|
|
versions. A future release which breaks functionality with what is presented |
|
71
|
|
|
|
|
|
|
here will begin with a new major version. |
|
72
|
|
|
|
|
|
|
|
|
73
|
|
|
|
|
|
|
This code has been in heavy production use at multiple companies for almost |
|
74
|
|
|
|
|
|
|
fifteen years and is considered pretty (though not perfectly) stable. You are |
|
75
|
|
|
|
|
|
|
welcome to make use of it, in the form presented here, in your own projects. |
|
76
|
|
|
|
|
|
|
Significant feature requests for this version will likely be met with a |
|
77
|
|
|
|
|
|
|
somewhat low priority, and development of new applications or libraries with it |
|
78
|
|
|
|
|
|
|
is not strongly encouraged. |
|
79
|
|
|
|
|
|
|
|
|
80
|
|
|
|
|
|
|
Critical security and bug fix requests will be reviewed. |
|
81
|
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
=head1 CONCEPTS |
|
83
|
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
In using DBIx::DataStore, there are three main types of objects with which you'll |
|
85
|
|
|
|
|
|
|
generally interact. |
|
86
|
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
=over |
|
88
|
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
=item * |
|
90
|
|
|
|
|
|
|
|
|
91
|
|
|
|
|
|
|
Database objects |
|
92
|
|
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
These objects manage the connections to your database servers (either one or two |
|
94
|
|
|
|
|
|
|
servers at all times, depending on whether you have any reader databases |
|
95
|
|
|
|
|
|
|
configured) and are used to issue all commands to your database backend. |
|
96
|
|
|
|
|
|
|
|
|
97
|
|
|
|
|
|
|
=item * |
|
98
|
|
|
|
|
|
|
|
|
99
|
|
|
|
|
|
|
Result Set objects |
|
100
|
|
|
|
|
|
|
|
|
101
|
|
|
|
|
|
|
Every time you issue a read-oriented query through the C method of a |
|
102
|
|
|
|
|
|
|
database object, a new result set object is created. These objects are what you |
|
103
|
|
|
|
|
|
|
use to access the results of your query. Unlike the normal method of accessing |
|
104
|
|
|
|
|
|
|
a row's data using DBI methods directly, with DBIx::DataStore result set objects, |
|
105
|
|
|
|
|
|
|
the current row's data is accessed through the result set object itself. |
|
106
|
|
|
|
|
|
|
|
|
107
|
|
|
|
|
|
|
Result set objects also contain a single result row object at any given time (or |
|
108
|
|
|
|
|
|
|
no result row object if you haven't yet called the C method on a result |
|
109
|
|
|
|
|
|
|
set object). You don't actually directly interact with a distinct object for |
|
110
|
|
|
|
|
|
|
each row -- row methods are issued through the result set object to act on the |
|
111
|
|
|
|
|
|
|
currently visible row, and will simply fall through to the row object. |
|
112
|
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
=item * |
|
114
|
|
|
|
|
|
|
|
|
115
|
|
|
|
|
|
|
Pager objects |
|
116
|
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
These objects are used only when you request one by calling the C |
|
118
|
|
|
|
|
|
|
method on a result set object. They are normal L objects, so refer |
|
119
|
|
|
|
|
|
|
to the documentation for that module for further details. Please note that you |
|
120
|
|
|
|
|
|
|
can only use pager objects on a result set when you passed in at least one of |
|
121
|
|
|
|
|
|
|
C or C arguments to the C method. Without either of those |
|
122
|
|
|
|
|
|
|
arguments, your query will be performed in I mode and you will trigger |
|
123
|
|
|
|
|
|
|
an error if you attempt to call the C method on your result set. You |
|
124
|
|
|
|
|
|
|
will also get an error if you explicitly turned paging support off when loading |
|
125
|
|
|
|
|
|
|
DBIx::DataStore. |
|
126
|
|
|
|
|
|
|
|
|
127
|
|
|
|
|
|
|
=back |
|
128
|
|
|
|
|
|
|
|
|
129
|
|
|
|
|
|
|
=head1 MODULE ARGUMENTS |
|
130
|
|
|
|
|
|
|
|
|
131
|
|
|
|
|
|
|
DBIx::DataStore allows a number of options to be specified at the time you |
|
132
|
|
|
|
|
|
|
import the module into your code. These options, and their effects, are |
|
133
|
|
|
|
|
|
|
described here. Arguments are passed as a hash (not a reference) to the |
|
134
|
|
|
|
|
|
|
module as part of the use statement. For example, to load DBIx::DataStore |
|
135
|
|
|
|
|
|
|
with the default options, except for debugging which we'll set to "5", do: |
|
136
|
|
|
|
|
|
|
|
|
137
|
|
|
|
|
|
|
use DBIx::DataStore ( debug => 5 ); |
|
138
|
|
|
|
|
|
|
|
|
139
|
|
|
|
|
|
|
Below is a description of each option that can be fiddled with on module |
|
140
|
|
|
|
|
|
|
import. The name in parentheses at the start of each subsection is the |
|
141
|
|
|
|
|
|
|
key name to use in the hash passed to DBIx::DataStore during use. |
|
142
|
|
|
|
|
|
|
|
|
143
|
|
|
|
|
|
|
=head2 Debugging (debug) |
|
144
|
|
|
|
|
|
|
|
|
145
|
|
|
|
|
|
|
Accepts any integer value. Non-integer values, or any numbers zero or lower |
|
146
|
|
|
|
|
|
|
will turn off debugging. Any positive integers will turn on debugging, with |
|
147
|
|
|
|
|
|
|
higher numbers producing more debugging output. Typically, a debugging level |
|
148
|
|
|
|
|
|
|
of 1 will only produce non-fatal-but-concerning debug messages, analogous to |
|
149
|
|
|
|
|
|
|
an INFO level. At level 2 messages will generally be produced that are more |
|
150
|
|
|
|
|
|
|
warning-only in nature, but not serious issues. Debugging level 3 introduces |
|
151
|
|
|
|
|
|
|
submodule and method entry messages into the output. Debugging level 4 and |
|
152
|
|
|
|
|
|
|
above are more or less reserved for "here" statements to trace detailed |
|
153
|
|
|
|
|
|
|
code execution. |
|
154
|
|
|
|
|
|
|
|
|
155
|
|
|
|
|
|
|
Debugging level 5 is (or at least should) be the highest number that actually |
|
156
|
|
|
|
|
|
|
introduces any changes in the output. This level should not actually trigger |
|
157
|
|
|
|
|
|
|
any more calls to the internal logger, so in terms of the number of statements |
|
158
|
|
|
|
|
|
|
it should be functionally equivalent to debugging level 4. However, unlike |
|
159
|
|
|
|
|
|
|
lower levels of output, this will cause a full stack trace to be produced for |
|
160
|
|
|
|
|
|
|
every single call to the logger. As such, this debugging level is only |
|
161
|
|
|
|
|
|
|
recommended for tracking down really nasty bugs or for general use by the |
|
162
|
|
|
|
|
|
|
clinically insane. |
|
163
|
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
Calls to the internal logger are handled by a foldable constant, so there |
|
165
|
|
|
|
|
|
|
should be no performance penalty at all when debugging is turned off -- the |
|
166
|
|
|
|
|
|
|
Perl compiler should remove those calls from the code entirely. |
|
167
|
|
|
|
|
|
|
|
|
168
|
|
|
|
|
|
|
=head2 Configuration Loader (config) |
|
169
|
|
|
|
|
|
|
|
|
170
|
|
|
|
|
|
|
DBIx::DataStore can use multiple configuration formats. Right now support |
|
171
|
|
|
|
|
|
|
only exists for YAML, but if you'd rather use INI files or on-disk Storable |
|
172
|
|
|
|
|
|
|
seralized data structures (and if a DBIx::DataStore::Config submodule has |
|
173
|
|
|
|
|
|
|
been written to support it) you're more than welcome to change that. This |
|
174
|
|
|
|
|
|
|
is done by passing in the config argument when loading DBIx::DataStore. |
|
175
|
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
You can also indicate that none of the configuration loader submodules should |
|
177
|
|
|
|
|
|
|
be used by not passing in a config argument at all. If you do this, you |
|
178
|
|
|
|
|
|
|
will be expected to pass in an appropriate configuration data structure |
|
179
|
|
|
|
|
|
|
(details on that later in this document) to the constructor. |
|
180
|
|
|
|
|
|
|
|
|
181
|
|
|
|
|
|
|
Note that if you do use a configuration loader, they read their actual |
|
182
|
|
|
|
|
|
|
configuration files and do the processing work immediately when DBIx::DataStore |
|
183
|
|
|
|
|
|
|
is imported, then cache the parsed configuration data. Thus, you shouldn't |
|
184
|
|
|
|
|
|
|
have to worry about the performance in web-based applications if you |
|
185
|
|
|
|
|
|
|
have a facility to pre-load this module (such as mod_perl in Apache) |
|
186
|
|
|
|
|
|
|
when you start the web server. |
|
187
|
|
|
|
|
|
|
|
|
188
|
|
|
|
|
|
|
=head2 Home Directory Configurations (use_home) |
|
189
|
|
|
|
|
|
|
|
|
190
|
|
|
|
|
|
|
This option goes in hand with the config option, and indicates to any |
|
191
|
|
|
|
|
|
|
relevant configuration loaders that they should also look inside the |
|
192
|
|
|
|
|
|
|
current user's home directory for configuration files. This is turned off |
|
193
|
|
|
|
|
|
|
by default because of the extra modules that are loaded (File::HomeDir and |
|
194
|
|
|
|
|
|
|
all of its dependencies) as well as the extra CPU time and stat calls |
|
195
|
|
|
|
|
|
|
necessary to do these checks. |
|
196
|
|
|
|
|
|
|
|
|
197
|
|
|
|
|
|
|
=head2 Result Set Paginating (paging) |
|
198
|
|
|
|
|
|
|
|
|
199
|
|
|
|
|
|
|
By default, Data::Page is automatically imported for use by the C |
|
200
|
|
|
|
|
|
|
method on result sets. In situations where you have no need for paging |
|
201
|
|
|
|
|
|
|
of your result sets and wish to avoid the extra time and memory spent on |
|
202
|
|
|
|
|
|
|
that code, you can explicitly disable it. Note that if you do so and then |
|
203
|
|
|
|
|
|
|
try to call the pager method on a result set, you will trigger a fatal |
|
204
|
|
|
|
|
|
|
error. |
|
205
|
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
You can also set this option to "auto" which allows you to call pager() |
|
207
|
|
|
|
|
|
|
without dying, but won't load Data::Page and its dependencies until the |
|
208
|
|
|
|
|
|
|
first time you need it. This load-on-demand can be bad in some cases, |
|
209
|
|
|
|
|
|
|
though, even if it seems likes a good idea. In single-process code that |
|
210
|
|
|
|
|
|
|
may or may not ever need to page something, setting this to auto would |
|
211
|
|
|
|
|
|
|
make sense. In a situation like mod_perl in Apache, it is advised |
|
212
|
|
|
|
|
|
|
against. |
|
213
|
|
|
|
|
|
|
|
|
214
|
|
|
|
|
|
|
With load on demand in mod_perl, you end up only loading it |
|
215
|
|
|
|
|
|
|
for a single Apache process when it's first needed. If more than one |
|
216
|
|
|
|
|
|
|
process needs it, more than one copy is loaded. If those processes are |
|
217
|
|
|
|
|
|
|
eventually killed (through max keepalive request like settings) and its |
|
218
|
|
|
|
|
|
|
needed again, then it has to be loaded all over again. Instead, preloading |
|
219
|
|
|
|
|
|
|
it in the main Apache process creates a single copy available to every |
|
220
|
|
|
|
|
|
|
child Apache process for the lifetime of that Apache run. |
|
221
|
|
|
|
|
|
|
|
|
222
|
|
|
|
|
|
|
=head1 DATABASE METHODS |
|
223
|
|
|
|
|
|
|
|
|
224
|
|
|
|
|
|
|
=head2 General methods |
|
225
|
|
|
|
|
|
|
|
|
226
|
|
|
|
|
|
|
The following methods are your primary interface to database objects. Typically |
|
227
|
|
|
|
|
|
|
you will only be calling the C method once your applications, but unless |
|
228
|
|
|
|
|
|
|
you have very simple database needs you will almost certainly be making many |
|
229
|
|
|
|
|
|
|
calls to the C method. |
|
230
|
|
|
|
|
|
|
|
|
231
|
|
|
|
|
|
|
=over |
|
232
|
|
|
|
|
|
|
|
|
233
|
|
|
|
|
|
|
=item new() |
|
234
|
|
|
|
|
|
|
|
|
235
|
|
|
|
|
|
|
The constructor method actually supports multiple distinct syntaxes. The first |
|
236
|
|
|
|
|
|
|
is the old syntax from the SQL::Wrapper module (the immediate predecessor to |
|
237
|
|
|
|
|
|
|
DBIx::DataStore and never widely released). This syntax is deprecated and |
|
238
|
|
|
|
|
|
|
will some day be removed, so it is not discussed here (look at the code if you |
|
239
|
|
|
|
|
|
|
really must know what it is). |
|
240
|
|
|
|
|
|
|
|
|
241
|
|
|
|
|
|
|
There are three main forms of the currently-supported constructor syntax. The |
|
242
|
|
|
|
|
|
|
first of these is to simply specify the name of the data store to which you |
|
243
|
|
|
|
|
|
|
want to connect and optionally and alternate schema list: |
|
244
|
|
|
|
|
|
|
|
|
245
|
|
|
|
|
|
|
my $db = DBIx::DataStore->new($datastore); |
|
246
|
|
|
|
|
|
|
my $db = DBIx::DataStore->new($datastore, @schemas); |
|
247
|
|
|
|
|
|
|
|
|
248
|
|
|
|
|
|
|
This should be a single scalar value containing a string that matches the name of |
|
249
|
|
|
|
|
|
|
one of the datastores defined in your configuration (whether it be YAML or any of |
|
250
|
|
|
|
|
|
|
the other configuration loaders supported). |
|
251
|
|
|
|
|
|
|
|
|
252
|
|
|
|
|
|
|
The second form allows more control over specific parts of a datastore's |
|
253
|
|
|
|
|
|
|
configuration and connection parameters: |
|
254
|
|
|
|
|
|
|
|
|
255
|
|
|
|
|
|
|
my $db = DBIx::DataStore->new({ store => $datastore, ... }); |
|
256
|
|
|
|
|
|
|
|
|
257
|
|
|
|
|
|
|
This version allows for overriding not just the schemas, but which reader |
|
258
|
|
|
|
|
|
|
should be used, changing the default settings for statement preparation, statement |
|
259
|
|
|
|
|
|
|
caching and so on. |
|
260
|
|
|
|
|
|
|
|
|
261
|
|
|
|
|
|
|
TODO: Go into more detail on how exactly to set these extra options. |
|
262
|
|
|
|
|
|
|
|
|
263
|
|
|
|
|
|
|
The last is the simplest, to pass in no arguments at all to the constructor. |
|
264
|
|
|
|
|
|
|
One of three things will happen. First, DBIx::DataStore will get a list |
|
265
|
|
|
|
|
|
|
of all the package names from the caller's stack, and starting with the |
|
266
|
|
|
|
|
|
|
bottom, working its way up to the very top of the stack, will look for any |
|
267
|
|
|
|
|
|
|
datastore which matches one of those package names with the regular |
|
268
|
|
|
|
|
|
|
expression in its "packages" variable. The first match to succeed will |
|
269
|
|
|
|
|
|
|
cause that datastore to be used for the connection. |
|
270
|
|
|
|
|
|
|
|
|
271
|
|
|
|
|
|
|
If no matches were found, then a datastore is looked for which has the |
|
272
|
|
|
|
|
|
|
"is_default" flag set to a true value. If there is one, then that datastore |
|
273
|
|
|
|
|
|
|
will be used. If that check fails, then an error is produced indicating that |
|
274
|
|
|
|
|
|
|
there was no suitable choice for a default datastore connection. |
|
275
|
|
|
|
|
|
|
|
|
276
|
|
|
|
|
|
|
=item do(\%options, $query, @binds) |
|
277
|
|
|
|
|
|
|
|
|
278
|
|
|
|
|
|
|
This method requires at least one argument (C<$query>) but can in some cases be |
|
279
|
|
|
|
|
|
|
called with far more than that. The first argument is optional and is used |
|
280
|
|
|
|
|
|
|
primarily to enable automated pagination of query results. If passed in, it |
|
281
|
|
|
|
|
|
|
should contain at least one of "page" (which will default to 1 if not specified) |
|
282
|
|
|
|
|
|
|
or "per_page" (which defaults to 25). If neither is passed in, automatic |
|
283
|
|
|
|
|
|
|
pagination will not be available for the returned result set. |
|
284
|
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
If you do take advantage of the paginating options to this method, do not |
|
286
|
|
|
|
|
|
|
include any sort of row limit clause in your actual query (such as "LIMIT x |
|
287
|
|
|
|
|
|
|
OFFSET y", "ROWS x TO y" or whatever the flavor is for your particular database |
|
288
|
|
|
|
|
|
|
server). This method will add that clause to your query as appropriate. |
|
289
|
|
|
|
|
|
|
|
|
290
|
|
|
|
|
|
|
The next argument (which would be the first one if you choose to leave |
|
291
|
|
|
|
|
|
|
pagination disabled) is required and must contain the SQL statement you would |
|
292
|
|
|
|
|
|
|
like issued to your database server. Regular scalar placeholders are allowed, |
|
293
|
|
|
|
|
|
|
as well as a non-DBI placeholder that can be used with arrays and hashes (in |
|
294
|
|
|
|
|
|
|
specific contexts). Refer to the L"PLACEHOLDERS"> section of this document |
|
295
|
|
|
|
|
|
|
for details. |
|
296
|
|
|
|
|
|
|
|
|
297
|
|
|
|
|
|
|
All arguments after the query will be used as your bind variables when executing |
|
298
|
|
|
|
|
|
|
the SQL statement. They must be in the same order as the actual placeholders |
|
299
|
|
|
|
|
|
|
within your query and you must provide the exact same number of bind variables |
|
300
|
|
|
|
|
|
|
as you did placeholders. Failure to do so will result in a fatal error. |
|
301
|
|
|
|
|
|
|
|
|
302
|
|
|
|
|
|
|
This method, when successful, returns a DBIx::DataStore result set object (see |
|
303
|
|
|
|
|
|
|
L"RESULT SET METHODS"> for details on what you can do with these). |
|
304
|
|
|
|
|
|
|
|
|
305
|
|
|
|
|
|
|
=back |
|
306
|
|
|
|
|
|
|
|
|
307
|
|
|
|
|
|
|
=head2 Transaction related methods |
|
308
|
|
|
|
|
|
|
|
|
309
|
|
|
|
|
|
|
The following methods are used when dealing with transactions. If your database |
|
310
|
|
|
|
|
|
|
server does not support transactions, these will be useless to you. You'll |
|
311
|
|
|
|
|
|
|
probably also receive fatal errors if you try to use them. |
|
312
|
|
|
|
|
|
|
|
|
313
|
|
|
|
|
|
|
=over |
|
314
|
|
|
|
|
|
|
|
|
315
|
|
|
|
|
|
|
=item begin() |
|
316
|
|
|
|
|
|
|
|
|
317
|
|
|
|
|
|
|
This method starts a new transaction block in your current database session. |
|
318
|
|
|
|
|
|
|
Please note that not all databases support nested transactions (and even those |
|
319
|
|
|
|
|
|
|
that do may have limits on how deeply they can be nested). You will receive an |
|
320
|
|
|
|
|
|
|
error if you attempt to call this method multiple times (with no intervening |
|
321
|
|
|
|
|
|
|
C or C calls) when using a database that does not support |
|
322
|
|
|
|
|
|
|
nested transactions. You may want to look into whether savepoints will suffice |
|
323
|
|
|
|
|
|
|
for your needs in those cases. |
|
324
|
|
|
|
|
|
|
|
|
325
|
|
|
|
|
|
|
As soon as you open a transaction, B queries issued to your database |
|
326
|
|
|
|
|
|
|
through the C method will be sent to your primary server. No queries will |
|
327
|
|
|
|
|
|
|
be issued to your reader database until you either commit or rollback the |
|
328
|
|
|
|
|
|
|
transaction. |
|
329
|
|
|
|
|
|
|
|
|
330
|
|
|
|
|
|
|
=item commit() |
|
331
|
|
|
|
|
|
|
|
|
332
|
|
|
|
|
|
|
This method ends the current transaction block in your database session. If you |
|
333
|
|
|
|
|
|
|
are using a database server which supports nested transactions, you may need to |
|
334
|
|
|
|
|
|
|
call this method as many times as you called C (or you may not -- at |
|
335
|
|
|
|
|
|
|
least some versions of Oracle, for instance, default to commiting B your |
|
336
|
|
|
|
|
|
|
open transactions unless you set a server/session variable). |
|
337
|
|
|
|
|
|
|
|
|
338
|
|
|
|
|
|
|
=item rollback($savepoint) |
|
339
|
|
|
|
|
|
|
|
|
340
|
|
|
|
|
|
|
This method takes one optional argument. Called with no arguments, it rolls |
|
341
|
|
|
|
|
|
|
back all of the changes you've made to your database within the current |
|
342
|
|
|
|
|
|
|
transaction block. If you are using a database server that supports savepoints, |
|
343
|
|
|
|
|
|
|
you may also pass in a single argument with the name of an already defined |
|
344
|
|
|
|
|
|
|
savepoint (it B have been defined within the current transaction block, |
|
345
|
|
|
|
|
|
|
and not have already been cleared out by another rollback) to undo all of the |
|
346
|
|
|
|
|
|
|
changes made I the savepoint was created. Passing in an invalid |
|
347
|
|
|
|
|
|
|
savepoint name will generate a fatal error. |
|
348
|
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
=item savepoint($name) |
|
350
|
|
|
|
|
|
|
|
|
351
|
|
|
|
|
|
|
Calling this method on a database server which supports savepoints will create a |
|
352
|
|
|
|
|
|
|
new savepoint at the current point of your open transaction with the name you |
|
353
|
|
|
|
|
|
|
provide. This method can only be called when you have an open transaction |
|
354
|
|
|
|
|
|
|
block. Attempts to call it outside of a transaction will trigger a fatal error. |
|
355
|
|
|
|
|
|
|
It is also your responsibility to make sure you use a unique name for each of |
|
356
|
|
|
|
|
|
|
your savepoints if you require more than one. |
|
357
|
|
|
|
|
|
|
|
|
358
|
|
|
|
|
|
|
=item in_transaction() |
|
359
|
|
|
|
|
|
|
|
|
360
|
|
|
|
|
|
|
This method returns true if your database object is currently in an open |
|
361
|
|
|
|
|
|
|
transaction. |
|
362
|
|
|
|
|
|
|
|
|
363
|
|
|
|
|
|
|
=back |
|
364
|
|
|
|
|
|
|
|
|
365
|
|
|
|
|
|
|
=head2 Convenience methods |
|
366
|
|
|
|
|
|
|
|
|
367
|
|
|
|
|
|
|
=over |
|
368
|
|
|
|
|
|
|
|
|
369
|
|
|
|
|
|
|
=item base_tables() |
|
370
|
|
|
|
|
|
|
|
|
371
|
|
|
|
|
|
|
This method returns a list of the tables, excluding any views. |
|
372
|
|
|
|
|
|
|
|
|
373
|
|
|
|
|
|
|
=item databases() |
|
374
|
|
|
|
|
|
|
|
|
375
|
|
|
|
|
|
|
This method returns a list of the database (schema) names available on the |
|
376
|
|
|
|
|
|
|
primary database server. |
|
377
|
|
|
|
|
|
|
|
|
378
|
|
|
|
|
|
|
=item db_primary() |
|
379
|
|
|
|
|
|
|
|
|
380
|
|
|
|
|
|
|
This method returns a hash reference containing the following information about |
|
381
|
|
|
|
|
|
|
the primary database server: name (always "primary"), driver, host and database. |
|
382
|
|
|
|
|
|
|
|
|
383
|
|
|
|
|
|
|
=item db_reader() |
|
384
|
|
|
|
|
|
|
|
|
385
|
|
|
|
|
|
|
This method returns a hash reference containing the following information about |
|
386
|
|
|
|
|
|
|
the currently selected reader database: name, driver, host and database. |
|
387
|
|
|
|
|
|
|
|
|
388
|
|
|
|
|
|
|
=item last_insert_id() |
|
389
|
|
|
|
|
|
|
|
|
390
|
|
|
|
|
|
|
This method is a pass-through for DBI's C function. All the |
|
391
|
|
|
|
|
|
|
same caveats apply to this method. But just in case you aren't familiar with |
|
392
|
|
|
|
|
|
|
them, basically consider this method unreliable on many database servers. It |
|
393
|
|
|
|
|
|
|
should only be used with care, and only if you know your underlying RDBMS's |
|
394
|
|
|
|
|
|
|
DBD driver will do The Right Thing. |
|
395
|
|
|
|
|
|
|
|
|
396
|
|
|
|
|
|
|
=item ping() |
|
397
|
|
|
|
|
|
|
|
|
398
|
|
|
|
|
|
|
This method, when called, attempts to issue a very simple SQL statement |
|
399
|
|
|
|
|
|
|
(generally "select 1") against both the primary and reader database servers (or |
|
400
|
|
|
|
|
|
|
primary only if no reader has been selected). A true value will be returned if |
|
401
|
|
|
|
|
|
|
the statements were successful. |
|
402
|
|
|
|
|
|
|
|
|
403
|
|
|
|
|
|
|
=item schemas() |
|
404
|
|
|
|
|
|
|
|
|
405
|
|
|
|
|
|
|
If called with no arguments, returns a list of the schemas currently in the |
|
406
|
|
|
|
|
|
|
search path for the primary server connection. If called with a list of |
|
407
|
|
|
|
|
|
|
scalar arguments, sets the connection's search path to those schemas (in |
|
408
|
|
|
|
|
|
|
the order they were provided). |
|
409
|
|
|
|
|
|
|
|
|
410
|
|
|
|
|
|
|
=item servers() |
|
411
|
|
|
|
|
|
|
|
|
412
|
|
|
|
|
|
|
Returns a list of hash references, detailing the database servers defined in the |
|
413
|
|
|
|
|
|
|
YAML config file. All servers defined are returned, regardless of whether they |
|
414
|
|
|
|
|
|
|
have, or will, respond to connections. |
|
415
|
|
|
|
|
|
|
|
|
416
|
|
|
|
|
|
|
Within each hash reference the following key/value pairs are provided: name (as |
|
417
|
|
|
|
|
|
|
defined in the configuration file), driver, host and database. The first hash |
|
418
|
|
|
|
|
|
|
reference in the returned list will always be the server defined as the primary, |
|
419
|
|
|
|
|
|
|
followed by the readers sorted by their names. |
|
420
|
|
|
|
|
|
|
|
|
421
|
|
|
|
|
|
|
=item tables() |
|
422
|
|
|
|
|
|
|
|
|
423
|
|
|
|
|
|
|
This method returns a list of the table names that are present within the |
|
424
|
|
|
|
|
|
|
currently selected database (schema) on the primary server. The list returned |
|
425
|
|
|
|
|
|
|
will also include views (use C if you don't want the views). |
|
426
|
|
|
|
|
|
|
|
|
427
|
|
|
|
|
|
|
=item views() |
|
428
|
|
|
|
|
|
|
|
|
429
|
|
|
|
|
|
|
This method will return a list of the views defined within your current schema. |
|
430
|
|
|
|
|
|
|
|
|
431
|
|
|
|
|
|
|
=back |
|
432
|
|
|
|
|
|
|
|
|
433
|
|
|
|
|
|
|
=head1 RESULT SET METHODS |
|
434
|
|
|
|
|
|
|
|
|
435
|
|
|
|
|
|
|
Every call to the C method on a database object which contains a |
|
436
|
|
|
|
|
|
|
read-oriented SQL query returns a result set object. These objects can then be |
|
437
|
|
|
|
|
|
|
used to access the data contained within the database query's results. |
|
438
|
|
|
|
|
|
|
|
|
439
|
|
|
|
|
|
|
=over |
|
440
|
|
|
|
|
|
|
|
|
441
|
|
|
|
|
|
|
=item Hash and Array accessors |
|
442
|
|
|
|
|
|
|
|
|
443
|
|
|
|
|
|
|
Each time you retrieve a record (aka "result row") from a query's result set, |
|
444
|
|
|
|
|
|
|
the values for each column in that record can be transparently accessed through |
|
445
|
|
|
|
|
|
|
hash keys (where the keys are the column names as defined by the original query) |
|
446
|
|
|
|
|
|
|
or array indices (in the order the columns were defined by the query). Both |
|
447
|
|
|
|
|
|
|
methods of accessing the record's values are available at all times (unlike the |
|
448
|
|
|
|
|
|
|
standard DBI methods where you have to choose up front between using |
|
449
|
|
|
|
|
|
|
C or C). Thus, something like the |
|
450
|
|
|
|
|
|
|
following is perfectly acceptable: |
|
451
|
|
|
|
|
|
|
|
|
452
|
|
|
|
|
|
|
my $result = $db->do(q{ |
|
453
|
|
|
|
|
|
|
select id, name from users order by name asc |
|
454
|
|
|
|
|
|
|
}); |
|
455
|
|
|
|
|
|
|
while ($result->next) { |
|
456
|
|
|
|
|
|
|
print sprintf("ID %d: %s\n", |
|
457
|
|
|
|
|
|
|
$result->[0], |
|
458
|
|
|
|
|
|
|
$result->{'name'} |
|
459
|
|
|
|
|
|
|
); |
|
460
|
|
|
|
|
|
|
} |
|
461
|
|
|
|
|
|
|
|
|
462
|
|
|
|
|
|
|
=item next() |
|
463
|
|
|
|
|
|
|
|
|
464
|
|
|
|
|
|
|
Retrieves the next row of results from the result set. The row's data is then |
|
465
|
|
|
|
|
|
|
directly accessible through the result set object itself (see L"Hash and Array |
|
466
|
|
|
|
|
|
|
accessors">). This method also returns a reference to the result set object, |
|
467
|
|
|
|
|
|
|
making the following two snippets of code effectively identical (though the |
|
468
|
|
|
|
|
|
|
second is unnecessarily verbose): |
|
469
|
|
|
|
|
|
|
|
|
470
|
|
|
|
|
|
|
while ($result->next) { |
|
471
|
|
|
|
|
|
|
print $result->{'some_col_name'}; |
|
472
|
|
|
|
|
|
|
} |
|
473
|
|
|
|
|
|
|
|
|
474
|
|
|
|
|
|
|
or |
|
475
|
|
|
|
|
|
|
|
|
476
|
|
|
|
|
|
|
while (my $row = $result->next) { |
|
477
|
|
|
|
|
|
|
print $row->{'some_col_name'}; |
|
478
|
|
|
|
|
|
|
} |
|
479
|
|
|
|
|
|
|
|
|
480
|
|
|
|
|
|
|
The return value will be undef when there are no more rows to retrieve from the |
|
481
|
|
|
|
|
|
|
database. |
|
482
|
|
|
|
|
|
|
|
|
483
|
|
|
|
|
|
|
=item next_hashref() |
|
484
|
|
|
|
|
|
|
|
|
485
|
|
|
|
|
|
|
Similar to a next() call, in that it moves to the next row in the result set |
|
486
|
|
|
|
|
|
|
(or returns an undefined value when all rows have been read already). However, |
|
487
|
|
|
|
|
|
|
this method returns a stand-alone hash reference containing as keys the column |
|
488
|
|
|
|
|
|
|
names from the query, and as values the contents of the current row of the |
|
489
|
|
|
|
|
|
|
result set. |
|
490
|
|
|
|
|
|
|
|
|
491
|
|
|
|
|
|
|
=item hashref() |
|
492
|
|
|
|
|
|
|
|
|
493
|
|
|
|
|
|
|
Returns the exact same data structure as next_hashref(), except that it does |
|
494
|
|
|
|
|
|
|
not move to the next row in the result set first. You get a hash representation |
|
495
|
|
|
|
|
|
|
of the current row from the results, not the next row. |
|
496
|
|
|
|
|
|
|
|
|
497
|
|
|
|
|
|
|
=item all() |
|
498
|
|
|
|
|
|
|
|
|
499
|
|
|
|
|
|
|
This method retrieves all rows from the database at once and returns a list of |
|
500
|
|
|
|
|
|
|
result set row objects, each one containing a single row from the result set. |
|
501
|
|
|
|
|
|
|
It is functionally equivalent to the following: |
|
502
|
|
|
|
|
|
|
|
|
503
|
|
|
|
|
|
|
my (@rows); |
|
504
|
|
|
|
|
|
|
while (my $row = $result->next) { |
|
505
|
|
|
|
|
|
|
push(@rows, $row); |
|
506
|
|
|
|
|
|
|
} |
|
507
|
|
|
|
|
|
|
|
|
508
|
|
|
|
|
|
|
Please keep in mind that, internally, that is effectively what this method does |
|
509
|
|
|
|
|
|
|
itself (though slightly more efficiently). So C won't actually return |
|
510
|
|
|
|
|
|
|
I if you've already called C one or more times on your |
|
511
|
|
|
|
|
|
|
result set. You will only get the rows you have not yet retrieved via other |
|
512
|
|
|
|
|
|
|
means -- which also means that only the first call to this method will actually |
|
513
|
|
|
|
|
|
|
do anything. |
|
514
|
|
|
|
|
|
|
|
|
515
|
|
|
|
|
|
|
It is recommended you don't use this method if you will actually be operating on |
|
516
|
|
|
|
|
|
|
each row's data in your Perl code. You're better off using C and doing |
|
517
|
|
|
|
|
|
|
whatever processing you need to on each row. However, this method is useful if |
|
518
|
|
|
|
|
|
|
you're passing the results of your SQL query directly into another module like |
|
519
|
|
|
|
|
|
|
Template Toolkit and don't actually need to do any processing on the data before |
|
520
|
|
|
|
|
|
|
handing it off. |
|
521
|
|
|
|
|
|
|
|
|
522
|
|
|
|
|
|
|
=item columns() |
|
523
|
|
|
|
|
|
|
|
|
524
|
|
|
|
|
|
|
This method returns a list of the column names from your SQL query, in the same |
|
525
|
|
|
|
|
|
|
order they were returned by the database. |
|
526
|
|
|
|
|
|
|
|
|
527
|
|
|
|
|
|
|
=item count() |
|
528
|
|
|
|
|
|
|
|
|
529
|
|
|
|
|
|
|
This method will return the number of rows that were either returned by the |
|
530
|
|
|
|
|
|
|
database (in the case of read-oriented queries) or the number of the rows that |
|
531
|
|
|
|
|
|
|
were affected by your query (in the case of updates, inserts, etc.). |
|
532
|
|
|
|
|
|
|
|
|
533
|
|
|
|
|
|
|
If you used pagination in your call to the C method, the number returned |
|
534
|
|
|
|
|
|
|
by this method is the number of rows your select query would have returned |
|
535
|
|
|
|
|
|
|
B pagination. This makes it very simple to do simple pagination of |
|
536
|
|
|
|
|
|
|
your query results, but still be able to display to the end user of your |
|
537
|
|
|
|
|
|
|
application how many total results there are. |
|
538
|
|
|
|
|
|
|
|
|
539
|
|
|
|
|
|
|
There is a catch to this feature, though. Your database server B support |
|
540
|
|
|
|
|
|
|
subqueries against derived tables for this method to succeed. If you aren't |
|
541
|
|
|
|
|
|
|
sure whether your database supports this feature (most of them which support |
|
542
|
|
|
|
|
|
|
subqueries do), you can try to run the following SQL query (change "some_table" |
|
543
|
|
|
|
|
|
|
to a table name that actually exists first): |
|
544
|
|
|
|
|
|
|
|
|
545
|
|
|
|
|
|
|
select count(*) from (select * from some_table) derived |
|
546
|
|
|
|
|
|
|
|
|
547
|
|
|
|
|
|
|
Some database servers, such as Oracle, don't allow you to give a name to a |
|
548
|
|
|
|
|
|
|
derived table in a SQL query like the one above. But if you're running Oracle, |
|
549
|
|
|
|
|
|
|
this method is properly supported anyway (for what it's worth, the only change |
|
550
|
|
|
|
|
|
|
to that query above to have it work on Oracle is to omit the word "derived" at |
|
551
|
|
|
|
|
|
|
the end). |
|
552
|
|
|
|
|
|
|
|
|
553
|
|
|
|
|
|
|
=item pager() |
|
554
|
|
|
|
|
|
|
|
|
555
|
|
|
|
|
|
|
This method will return a L object which you can use for easier |
|
556
|
|
|
|
|
|
|
paginating of your database query results. You cannot call this method on a |
|
557
|
|
|
|
|
|
|
result set object which was created from a call to C that lacked the |
|
558
|
|
|
|
|
|
|
optional pagination options. Attempting to do so will generate a fatal error. |
|
559
|
|
|
|
|
|
|
|
|
560
|
|
|
|
|
|
|
=back |
|
561
|
|
|
|
|
|
|
|
|
562
|
|
|
|
|
|
|
=head1 PLACEHOLDERS |
|
563
|
|
|
|
|
|
|
|
|
564
|
|
|
|
|
|
|
In addition to the standard scalar-value placeholder available through standard |
|
565
|
|
|
|
|
|
|
DBI calls, this module adds another type of placeholder which can be used for |
|
566
|
|
|
|
|
|
|
inserting entire arrays or hashes into your queries. There are limitations on |
|
567
|
|
|
|
|
|
|
how and where they can be used, though. Both types of placeholders are written |
|
568
|
|
|
|
|
|
|
as a series of three question marks. |
|
569
|
|
|
|
|
|
|
|
|
570
|
|
|
|
|
|
|
These additional forms for placeholders are B in your queries. You |
|
571
|
|
|
|
|
|
|
are not forced to always use array placeholders for your IN clauses, nor are you |
|
572
|
|
|
|
|
|
|
forced to use the hash placeholders for your UPDATEs and INSERTs. You are more |
|
573
|
|
|
|
|
|
|
than welcome to use the regular style placeholders if you really prefer them (or |
|
574
|
|
|
|
|
|
|
have some other reason to not use the more convenient forms added by this |
|
575
|
|
|
|
|
|
|
module). You can also mix and match the styles within any single query as well, |
|
576
|
|
|
|
|
|
|
having one IN specified with single-value placeholders and another IN with an |
|
577
|
|
|
|
|
|
|
array placeholder in a single query. |
|
578
|
|
|
|
|
|
|
|
|
579
|
|
|
|
|
|
|
=over |
|
580
|
|
|
|
|
|
|
|
|
581
|
|
|
|
|
|
|
=item * |
|
582
|
|
|
|
|
|
|
|
|
583
|
|
|
|
|
|
|
Array placeholders |
|
584
|
|
|
|
|
|
|
|
|
585
|
|
|
|
|
|
|
These allow you to pass in a list of values to be used in an IN (...) clause, |
|
586
|
|
|
|
|
|
|
without you having to know or care how many elements are in the array. They |
|
587
|
|
|
|
|
|
|
cannot be used anywhere else but as part of an IN (although placing the |
|
588
|
|
|
|
|
|
|
parentheses around the placeholder is optional -- it will be added if you didn't |
|
589
|
|
|
|
|
|
|
include it). |
|
590
|
|
|
|
|
|
|
|
|
591
|
|
|
|
|
|
|
Example: |
|
592
|
|
|
|
|
|
|
|
|
593
|
|
|
|
|
|
|
my $result = $db->do(q{ |
|
594
|
|
|
|
|
|
|
select * from users where id in (???) |
|
595
|
|
|
|
|
|
|
}, \@userid_list); |
|
596
|
|
|
|
|
|
|
|
|
597
|
|
|
|
|
|
|
If your list of user IDs contained the values "1", "2" and "3" this would have |
|
598
|
|
|
|
|
|
|
achieved the same exact effect as you writing out the C call more |
|
599
|
|
|
|
|
|
|
verbosely as: |
|
600
|
|
|
|
|
|
|
|
|
601
|
|
|
|
|
|
|
my $result = $db->do(q{ |
|
602
|
|
|
|
|
|
|
select * from users where id in (?,?,?) |
|
603
|
|
|
|
|
|
|
}, 1, 2, 3); |
|
604
|
|
|
|
|
|
|
|
|
605
|
|
|
|
|
|
|
But then, you would have needed to know exactly how many elements were going to |
|
606
|
|
|
|
|
|
|
be in the list of IDs you wanted to match against and would have had to write |
|
607
|
|
|
|
|
|
|
that many single placeholders in the query yourself. Because the values in your |
|
608
|
|
|
|
|
|
|
list remain as real placeholders under the hood of this module, you can still |
|
609
|
|
|
|
|
|
|
take advantage of statement caching and the like from the underlying DBI methods |
|
610
|
|
|
|
|
|
|
(assuming you have that turned on), and the values you pass into the query will |
|
611
|
|
|
|
|
|
|
be safely escaped as usual. |
|
612
|
|
|
|
|
|
|
|
|
613
|
|
|
|
|
|
|
=item * |
|
614
|
|
|
|
|
|
|
|
|
615
|
|
|
|
|
|
|
Hash placeholders for UPDATE |
|
616
|
|
|
|
|
|
|
|
|
617
|
|
|
|
|
|
|
These can be used to fill in the SET portion of your UPDATE statement. Each |
|
618
|
|
|
|
|
|
|
key/value pair will be turned into "column = ?" with the values of your hash |
|
619
|
|
|
|
|
|
|
remaining as placeholders so the same advantages of array placeholders apply |
|
620
|
|
|
|
|
|
|
here as well. |
|
621
|
|
|
|
|
|
|
|
|
622
|
|
|
|
|
|
|
Example: |
|
623
|
|
|
|
|
|
|
|
|
624
|
|
|
|
|
|
|
my $num_rows_updated = $db->do(q{ |
|
625
|
|
|
|
|
|
|
update users set ??? where id = ? |
|
626
|
|
|
|
|
|
|
}, { name => $new_name }, $user_id); |
|
627
|
|
|
|
|
|
|
|
|
628
|
|
|
|
|
|
|
Writing out the normal placeholder(s) yourself would work too, but would get |
|
629
|
|
|
|
|
|
|
pretty annoying if you're updating many columns at once. |
|
630
|
|
|
|
|
|
|
|
|
631
|
|
|
|
|
|
|
=item * |
|
632
|
|
|
|
|
|
|
|
|
633
|
|
|
|
|
|
|
Hash placeholders for INSERT |
|
634
|
|
|
|
|
|
|
|
|
635
|
|
|
|
|
|
|
The second place in which hash placeholders can be used is for INSERT |
|
636
|
|
|
|
|
|
|
statements. Multiple record inserts are also supported, just put all the hash |
|
637
|
|
|
|
|
|
|
references containing each record's data into an array reference. |
|
638
|
|
|
|
|
|
|
|
|
639
|
|
|
|
|
|
|
There is one slight I with using hash placeholders in INSERT statements. |
|
640
|
|
|
|
|
|
|
You cannot specify the column name list inside your SQL query. The C |
|
641
|
|
|
|
|
|
|
method will fill that in for you when it processes the placeholder. The |
|
642
|
|
|
|
|
|
|
I keyword in your query is optional, but if present, the placeholder |
|
643
|
|
|
|
|
|
|
must come after it, not before. |
|
644
|
|
|
|
|
|
|
|
|
645
|
|
|
|
|
|
|
Example: |
|
646
|
|
|
|
|
|
|
|
|
647
|
|
|
|
|
|
|
my $num_rows_inserted = $db->do(q{ |
|
648
|
|
|
|
|
|
|
insert into users values ??? |
|
649
|
|
|
|
|
|
|
}, { name => $name, email => $email, ... }); |
|
650
|
|
|
|
|
|
|
|
|
651
|
|
|
|
|
|
|
=back |
|
652
|
|
|
|
|
|
|
|
|
653
|
|
|
|
|
|
|
=head1 EXCEPTIONS |
|
654
|
|
|
|
|
|
|
|
|
655
|
|
|
|
|
|
|
=head2 Exceptions to selection of secondary servers for read-only statements |
|
656
|
|
|
|
|
|
|
|
|
657
|
|
|
|
|
|
|
If indicated by the "reader" argument to new(), a secondary server may be used |
|
658
|
|
|
|
|
|
|
for distributing the load of read-only statements. However, no statements |
|
659
|
|
|
|
|
|
|
within a transaction will be issued to any server other than the primary, |
|
660
|
|
|
|
|
|
|
regardless of the "reader" setting. Also, if only a single server (the primary) |
|
661
|
|
|
|
|
|
|
is defined in the config, but "__random" is indicated in new()'s arguments, it |
|
662
|
|
|
|
|
|
|
will have no effect (all statements will be issued through the primary) and no |
|
663
|
|
|
|
|
|
|
errors will be reported. Thus, it is safe to use "__random" even if you have no |
|
664
|
|
|
|
|
|
|
secondary databases (and can save you time updating your code later if you add |
|
665
|
|
|
|
|
|
|
more database servers down the road). |
|
666
|
|
|
|
|
|
|
|
|
667
|
|
|
|
|
|
|
=head1 CONFIGURATION |
|
668
|
|
|
|
|
|
|
|
|
669
|
|
|
|
|
|
|
Database server configuration may current be done through either a YAML file or |
|
670
|
|
|
|
|
|
|
by passing in an equivalent datastructure to your C call. Other |
|
671
|
|
|
|
|
|
|
file formats would be possible with an appropriate config loader, |
|
672
|
|
|
|
|
|
|
but YAML is the only one currently supported. |
|
673
|
|
|
|
|
|
|
|
|
674
|
|
|
|
|
|
|
You can override where the configuration file is located when calling the new() |
|
675
|
|
|
|
|
|
|
method, or you can pass in a scalar containing raw, unprocessed YAML, or even |
|
676
|
|
|
|
|
|
|
pass in a hash reference which contains a data structure identical to what YAML |
|
677
|
|
|
|
|
|
|
would have returned itself if you need to bypass the YAML parsing for any |
|
678
|
|
|
|
|
|
|
reason. The latter is particularly useful if your application already has its |
|
679
|
|
|
|
|
|
|
own configuration files and you wish to embed the DBIx::DataStore config data |
|
680
|
|
|
|
|
|
|
within them. |
|
681
|
|
|
|
|
|
|
|
|
682
|
|
|
|
|
|
|
Your configuration must contain at least one "primary" server definition, and |
|
683
|
|
|
|
|
|
|
may contain any number of "reader" server definitions (or none at all if you |
|
684
|
|
|
|
|
|
|
only have a single server). Each server definition in the config must contain |
|
685
|
|
|
|
|
|
|
the following: DBD driver name, host address, database name, username and |
|
686
|
|
|
|
|
|
|
password. You may optionally include a list of key/value pairs for each server |
|
687
|
|
|
|
|
|
|
under the heading "dbd_opts" which will be passed directly through to any |
|
688
|
|
|
|
|
|
|
connection setup to that server. For details on what settings are available, |
|
689
|
|
|
|
|
|
|
check the documentation for DBI and for any relevant DBD::* modules you will be |
|
690
|
|
|
|
|
|
|
using. |
|
691
|
|
|
|
|
|
|
|
|
692
|
|
|
|
|
|
|
For some database servers, depending on your configuration, you may also need to |
|
693
|
|
|
|
|
|
|
specify a list of schemas for your connection's search path. This is currently |
|
694
|
|
|
|
|
|
|
only handled for PostgreSQL connections, and is only necessary if you will be |
|
695
|
|
|
|
|
|
|
accessing tables, functions, etc. that exist outside the default "public" |
|
696
|
|
|
|
|
|
|
schema. If unspecified, you will only be able to access objects in the default |
|
697
|
|
|
|
|
|
|
schema, unless you provide fully qualified identifiers (and assuming you have |
|
698
|
|
|
|
|
|
|
appropriate permissions to do so). If specified, you must list all schemas for |
|
699
|
|
|
|
|
|
|
which you want in your search path, including the "public" schema. Any number |
|
700
|
|
|
|
|
|
|
of schemas may be listed, and they will be added to your search path in the |
|
701
|
|
|
|
|
|
|
same order you specify in the configuration. For all non-PostgreSQL servers, |
|
702
|
|
|
|
|
|
|
the schemas option will be ignored if specified. |
|
703
|
|
|
|
|
|
|
|
|
704
|
|
|
|
|
|
|
In addition to the definition of individual servers, there are a few top-level |
|
705
|
|
|
|
|
|
|
configuration settings. |
|
706
|
|
|
|
|
|
|
|
|
707
|
|
|
|
|
|
|
=over |
|
708
|
|
|
|
|
|
|
|
|
709
|
|
|
|
|
|
|
=item * |
|
710
|
|
|
|
|
|
|
|
|
711
|
|
|
|
|
|
|
default_reader |
|
712
|
|
|
|
|
|
|
|
|
713
|
|
|
|
|
|
|
Defines which reader database to use. Valid values are: the name of one of your |
|
714
|
|
|
|
|
|
|
defined reader databases, "__random" (which will, as the name implies, choose a |
|
715
|
|
|
|
|
|
|
random reader database from the list), "primary" and "none". The last two have |
|
716
|
|
|
|
|
|
|
the same effect as not defining a default reader at all. Without a specific |
|
717
|
|
|
|
|
|
|
reader named, or "__random" to choose one randomly, no reader database will be |
|
718
|
|
|
|
|
|
|
used and all queries will be issued to the primary server. |
|
719
|
|
|
|
|
|
|
|
|
720
|
|
|
|
|
|
|
=item * |
|
721
|
|
|
|
|
|
|
|
|
722
|
|
|
|
|
|
|
reader_failover |
|
723
|
|
|
|
|
|
|
|
|
724
|
|
|
|
|
|
|
Valid values are one or zero (defaults to zero). With this option turned on, |
|
725
|
|
|
|
|
|
|
every query issued to a reader database will be preceded by a C call. |
|
726
|
|
|
|
|
|
|
If the ping fails, then a new reader database will be selected (for the current |
|
727
|
|
|
|
|
|
|
and all future queries issued to the reader). A warning will be printed each |
|
728
|
|
|
|
|
|
|
time a new reader database needs to be selected due to a failed ping, but no |
|
729
|
|
|
|
|
|
|
error will be issued unless a new reader cannot be selected. |
|
730
|
|
|
|
|
|
|
|
|
731
|
|
|
|
|
|
|
Please note that each time a new reader database needs to be selected, B |
|
732
|
|
|
|
|
|
|
readers will be considered, even if they had failed before. This is done in the |
|
733
|
|
|
|
|
|
|
event that a previously unresponsive reader becomes available again. You can |
|
734
|
|
|
|
|
|
|
turn this off (and only consider readers that have not failed before) by turning |
|
735
|
|
|
|
|
|
|
on the I option. |
|
736
|
|
|
|
|
|
|
|
|
737
|
|
|
|
|
|
|
=item * |
|
738
|
|
|
|
|
|
|
|
|
739
|
|
|
|
|
|
|
flag_bad_readers |
|
740
|
|
|
|
|
|
|
|
|
741
|
|
|
|
|
|
|
If both this option and I are turned on, then a reader database |
|
742
|
|
|
|
|
|
|
will be taken out of consideration for all future reconnection attempts if it |
|
743
|
|
|
|
|
|
|
has failed a ping attempt at any point within the current process. |
|
744
|
|
|
|
|
|
|
|
|
745
|
|
|
|
|
|
|
=item * |
|
746
|
|
|
|
|
|
|
|
|
747
|
|
|
|
|
|
|
cache_connections |
|
748
|
|
|
|
|
|
|
|
|
749
|
|
|
|
|
|
|
With this option turned on, new database connections will be created through |
|
750
|
|
|
|
|
|
|
DBI's C method instead of the normal C method. This |
|
751
|
|
|
|
|
|
|
allows for basic connection pooling. For the full details, check DBI's |
|
752
|
|
|
|
|
|
|
documentation. Basically what happens is if you make multiple calls to |
|
753
|
|
|
|
|
|
|
C with the exact same arguments (including the extra |
|
754
|
|
|
|
|
|
|
connection parameters like I, I, etc. -- not just the |
|
755
|
|
|
|
|
|
|
DSN, username and password) you will get back the same database connection |
|
756
|
|
|
|
|
|
|
handle each time, instead of brand new and untainted handles. The exception is |
|
757
|
|
|
|
|
|
|
if an existing, and cached, database handle still has an active statement handle |
|
758
|
|
|
|
|
|
|
on it, it will not be returned. Instead it will be removed from the cache and a |
|
759
|
|
|
|
|
|
|
fully-new connection to the database will be established, cached and returned. |
|
760
|
|
|
|
|
|
|
|
|
761
|
|
|
|
|
|
|
=item * |
|
762
|
|
|
|
|
|
|
|
|
763
|
|
|
|
|
|
|
cache_statements |
|
764
|
|
|
|
|
|
|
|
|
765
|
|
|
|
|
|
|
When this option is turned on, statement preparation in DBI will use |
|
766
|
|
|
|
|
|
|
C instead of C. For some databases this can |
|
767
|
|
|
|
|
|
|
provide measurable performance improvements if you issue the same query (this |
|
768
|
|
|
|
|
|
|
includes the placeholders, but not the values being used within those |
|
769
|
|
|
|
|
|
|
placeholders) repeatedly. Not all databases' DBD modules show much or any |
|
770
|
|
|
|
|
|
|
difference in performance between C and C, but |
|
771
|
|
|
|
|
|
|
preparation caching is generally very safe. |
|
772
|
|
|
|
|
|
|
|
|
773
|
|
|
|
|
|
|
=back |
|
774
|
|
|
|
|
|
|
|
|
775
|
|
|
|
|
|
|
=head2 YAML Configuration Example |
|
776
|
|
|
|
|
|
|
|
|
777
|
|
|
|
|
|
|
default_reader: __random |
|
778
|
|
|
|
|
|
|
reader_failover: 1 |
|
779
|
|
|
|
|
|
|
flag_bad_readers: 0 |
|
780
|
|
|
|
|
|
|
cache_connections: 0 |
|
781
|
|
|
|
|
|
|
cache_statements: 1 |
|
782
|
|
|
|
|
|
|
primary: |
|
783
|
|
|
|
|
|
|
driver: Pg |
|
784
|
|
|
|
|
|
|
db: mydatabase |
|
785
|
|
|
|
|
|
|
host: db-1 |
|
786
|
|
|
|
|
|
|
user: username |
|
787
|
|
|
|
|
|
|
pass: password |
|
788
|
|
|
|
|
|
|
schemas: |
|
789
|
|
|
|
|
|
|
- myschema |
|
790
|
|
|
|
|
|
|
- public |
|
791
|
|
|
|
|
|
|
dbd_opts: |
|
792
|
|
|
|
|
|
|
AutoCommit: 0 |
|
793
|
|
|
|
|
|
|
readers: |
|
794
|
|
|
|
|
|
|
reader1: |
|
795
|
|
|
|
|
|
|
driver: Pg |
|
796
|
|
|
|
|
|
|
db: mydatabase |
|
797
|
|
|
|
|
|
|
host: db-2 |
|
798
|
|
|
|
|
|
|
user: username |
|
799
|
|
|
|
|
|
|
schemas: |
|
800
|
|
|
|
|
|
|
- myschema |
|
801
|
|
|
|
|
|
|
- public |
|
802
|
|
|
|
|
|
|
secondreader: |
|
803
|
|
|
|
|
|
|
driver: Pg |
|
804
|
|
|
|
|
|
|
db: mydatabase |
|
805
|
|
|
|
|
|
|
host: 10.1.2.3 |
|
806
|
|
|
|
|
|
|
port: 8306 |
|
807
|
|
|
|
|
|
|
user: username |
|
808
|
|
|
|
|
|
|
schemas: |
|
809
|
|
|
|
|
|
|
- myschema |
|
810
|
|
|
|
|
|
|
- public |
|
811
|
|
|
|
|
|
|
|
|
812
|
|
|
|
|
|
|
=head2 Explicit Hashref Configuration Example |
|
813
|
|
|
|
|
|
|
|
|
814
|
|
|
|
|
|
|
my $config = { |
|
815
|
|
|
|
|
|
|
default_reader => '__random', |
|
816
|
|
|
|
|
|
|
reader_failover => 1, |
|
817
|
|
|
|
|
|
|
flag_bad_readers => 0, |
|
818
|
|
|
|
|
|
|
cache_connections => 0, |
|
819
|
|
|
|
|
|
|
cache_statements => 1, |
|
820
|
|
|
|
|
|
|
primary => { |
|
821
|
|
|
|
|
|
|
driver => 'Pg', |
|
822
|
|
|
|
|
|
|
db => 'mydatabase', |
|
823
|
|
|
|
|
|
|
host => 'db-1', |
|
824
|
|
|
|
|
|
|
user => 'username', |
|
825
|
|
|
|
|
|
|
pass => 'password', |
|
826
|
|
|
|
|
|
|
schemas => ['myschema','public'], |
|
827
|
|
|
|
|
|
|
dbd_opts => { |
|
828
|
|
|
|
|
|
|
AutoCommit => 0, |
|
829
|
|
|
|
|
|
|
} |
|
830
|
|
|
|
|
|
|
}, |
|
831
|
|
|
|
|
|
|
readers => { |
|
832
|
|
|
|
|
|
|
reader1 => { |
|
833
|
|
|
|
|
|
|
driver => 'Pg', |
|
834
|
|
|
|
|
|
|
db => 'mydatabase', |
|
835
|
|
|
|
|
|
|
host => 'db-2', |
|
836
|
|
|
|
|
|
|
user => 'username', |
|
837
|
|
|
|
|
|
|
schemas => ['myschema','public'] |
|
838
|
|
|
|
|
|
|
}, |
|
839
|
|
|
|
|
|
|
reader2 => { |
|
840
|
|
|
|
|
|
|
driver => 'Pg', |
|
841
|
|
|
|
|
|
|
db => 'mydatabase', |
|
842
|
|
|
|
|
|
|
host => '10.1.2.3', |
|
843
|
|
|
|
|
|
|
port => 8306, |
|
844
|
|
|
|
|
|
|
user => 'username', |
|
845
|
|
|
|
|
|
|
schemas => ['myschema','public'] |
|
846
|
|
|
|
|
|
|
} |
|
847
|
|
|
|
|
|
|
} |
|
848
|
|
|
|
|
|
|
}; |
|
849
|
|
|
|
|
|
|
my $db = DBIx::DataStore->new({ config => $config }); |
|
850
|
|
|
|
|
|
|
|
|
851
|
|
|
|
|
|
|
=head2 Configuring Database Passwords |
|
852
|
|
|
|
|
|
|
|
|
853
|
|
|
|
|
|
|
Because DBIx::DataStore uses the normal DBI/DBD layers underneath, all the |
|
854
|
|
|
|
|
|
|
usual methods of locating and presenting database credentials to the |
|
855
|
|
|
|
|
|
|
appropriate database server are available. This includes methods such as the |
|
856
|
|
|
|
|
|
|
C<.pgpass> file for PostgreSQL and equivalents for other RDBMSes. If your |
|
857
|
|
|
|
|
|
|
DBIx::DataStore configuration does not include a C attribute for a given |
|
858
|
|
|
|
|
|
|
database host, these alternate methods will be used as long as they are |
|
859
|
|
|
|
|
|
|
properly configured. |
|
860
|
|
|
|
|
|
|
|
|
861
|
|
|
|
|
|
|
=head1 SEE ALSO |
|
862
|
|
|
|
|
|
|
|
|
863
|
|
|
|
|
|
|
L, L, L |
|
864
|
|
|
|
|
|
|
|
|
865
|
|
|
|
|
|
|
=head1 AUTHORS |
|
866
|
|
|
|
|
|
|
|
|
867
|
|
|
|
|
|
|
Jon Sime Ejonsime@gmail.comE, |
|
868
|
|
|
|
|
|
|
Buddy Burden Ebuddy@barefoot.netE |
|
869
|
|
|
|
|
|
|
|
|
870
|
|
|
|
|
|
|
=head1 LICENSE |
|
871
|
|
|
|
|
|
|
|
|
872
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it |
|
873
|
|
|
|
|
|
|
under the same terms as Perl itself. |
|
874
|
|
|
|
|
|
|
|
|
875
|
|
|
|
|
|
|
=cut |
|
876
|
|
|
|
|
|
|
|
|
877
|
|
|
|
|
|
|
my $HASH_PH = qr/\?\?\?/; |
|
878
|
|
|
|
|
|
|
my $ARRAY_PH = $HASH_PH; |
|
879
|
|
|
|
|
|
|
|
|
880
|
|
|
|
|
|
|
my $USE_PAGER = 1; |
|
881
|
|
|
|
|
|
|
|
|
882
|
|
|
|
|
|
|
# some "true" values/strings reused in a few places (mostly submodules) |
|
883
|
|
|
|
|
|
|
our %TV = map { $_ => 1 } qw( 1 yes true on enable enabled ); |
|
884
|
|
|
|
|
|
|
# same thing, but for "false" |
|
885
|
|
|
|
|
|
|
our %FV = map { $_ => 1 } qw( 0 no false off disable disabled ); |
|
886
|
|
|
|
|
|
|
|
|
887
|
|
|
|
|
|
|
sub import { |
|
888
|
2
|
|
|
2
|
|
23
|
my ($pkg, %t) = @_; |
|
889
|
|
|
|
|
|
|
|
|
890
|
2
|
|
|
|
|
156
|
foreach (keys %t) { |
|
891
|
0
|
|
|
|
|
0
|
$t{lc($_)} = lc($t{$_}); |
|
892
|
0
|
0
|
|
|
|
0
|
delete $t{$_} unless lc($_) eq $_; |
|
893
|
|
|
|
|
|
|
} |
|
894
|
|
|
|
|
|
|
|
|
895
|
|
|
|
|
|
|
# set up debugging and logger |
|
896
|
|
|
|
|
|
|
$t{'debug'} = $ENV{'DATASTORE_DEBUG'} if (!defined $t{'debug'} || $t{'debug'} !~ /^\d+$/o) |
|
897
|
2
|
50
|
33
|
|
|
30
|
&& defined $ENV{'DATASTORE_DEBUG'} && $ENV{'DATASTORE_DEBUG'} =~ /^\d+$/o; |
|
|
|
|
33
|
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
898
|
2
|
50
|
33
|
|
|
9
|
$t{'debug'} = 0 unless defined $t{'debug'} && $t{'debug'} =~ /^\d+$/o; |
|
899
|
2
|
|
|
2
|
|
1016
|
eval("use DBIx::DataStore::Debug ($t{'debug'});"); |
|
|
2
|
|
|
|
|
4
|
|
|
|
2
|
|
|
|
|
21
|
|
|
|
2
|
|
|
|
|
186
|
|
|
900
|
|
|
|
|
|
|
|
|
901
|
2
|
50
|
|
|
|
8
|
if (defined $t{'paging'}) { |
|
902
|
0
|
0
|
|
|
|
0
|
if (exists $TV{lc($t{'paging'})}) { #load Data::Page now |
|
|
|
0
|
|
|
|
|
|
|
903
|
0
|
|
|
|
|
0
|
$USE_PAGER = 1; |
|
904
|
0
|
|
|
|
|
0
|
eval("use Data::Page"); |
|
905
|
|
|
|
|
|
|
} elsif (exists $FV{lc($t{'paging'})}) { #don't ever load Data::Page |
|
906
|
0
|
|
|
|
|
0
|
$USE_PAGER = 0; |
|
907
|
|
|
|
|
|
|
} else { # auto-loading of Data::Page on first use |
|
908
|
0
|
|
|
|
|
0
|
$USE_PAGER = -1; |
|
909
|
|
|
|
|
|
|
} |
|
910
|
|
|
|
|
|
|
} |
|
911
|
|
|
|
|
|
|
|
|
912
|
|
|
|
|
|
|
# call the config loader submodule |
|
913
|
2
|
50
|
33
|
|
|
9
|
$t{'use_home'} = 0 if !defined $t{'use_home'} || $t{'use_home'} !~ /^\d+$/o; |
|
914
|
|
|
|
|
|
|
eval("use DBIx::DataStore::Config ('$t{'config'}', $t{'use_home'});") |
|
915
|
2
|
50
|
33
|
|
|
17
|
if defined $t{'config'} && length($t{'config'}) > 0; |
|
916
|
|
|
|
|
|
|
|
|
917
|
|
|
|
|
|
|
# we do these mandatory loads here instead of the normal area because we need to |
|
918
|
|
|
|
|
|
|
# delay their loading until after we've done things like define DEBUG and such |
|
919
|
2
|
|
|
2
|
|
916
|
eval("use DBIx::DataStore::ResultRow"); |
|
|
2
|
|
|
|
|
4
|
|
|
|
2
|
|
|
|
|
50
|
|
|
|
2
|
|
|
|
|
99
|
|
|
920
|
2
|
|
|
2
|
|
867
|
eval("use DBIx::DataStore::ResultSet"); |
|
|
2
|
|
|
|
|
6
|
|
|
|
2
|
|
|
|
|
47
|
|
|
|
2
|
|
|
|
|
106
|
|
|
921
|
|
|
|
|
|
|
} |
|
922
|
|
|
|
|
|
|
|
|
923
|
|
|
|
|
|
|
sub new { |
|
924
|
1
|
|
|
1
|
1
|
10064
|
my $class = shift; |
|
925
|
|
|
|
|
|
|
|
|
926
|
1
|
|
|
|
|
3
|
my $self = { error => '' }; |
|
927
|
1
|
|
|
|
|
2
|
my $opts = {}; |
|
928
|
1
|
|
|
|
|
7
|
my @configs = (); |
|
929
|
|
|
|
|
|
|
|
|
930
|
|
|
|
|
|
|
# check for options hashref to override config file path and/or secondary DB selection |
|
931
|
1
|
50
|
33
|
|
|
9
|
if (scalar(@_) > 0 && ref($_[0]) eq 'HASH') { |
|
932
|
1
|
|
|
|
|
2
|
$opts = shift; |
|
933
|
|
|
|
|
|
|
} |
|
934
|
|
|
|
|
|
|
|
|
935
|
1
|
|
|
|
|
2
|
my @args = @_; |
|
936
|
|
|
|
|
|
|
|
|
937
|
|
|
|
|
|
|
# check first to see if a config option was passed in, and if so whether it was a |
|
938
|
|
|
|
|
|
|
# hashref containing the already-parsed config data, a scalar with raw YAML markup |
|
939
|
|
|
|
|
|
|
# in it that still needs to be parsed, or a filesystem path to a YAML file... |
|
940
|
|
|
|
|
|
|
# alternatively, if no config option was passed in, fall back on the default paths |
|
941
|
1
|
|
|
|
|
1
|
my $found_config = 0; |
|
942
|
1
|
50
|
|
|
|
4
|
if (exists $opts->{'config'}) { |
|
943
|
1
|
50
|
|
|
|
31
|
dslog(q{Deprecated config-in-hashref constructor syntax used. This feature won't exist someday!}) if DEBUG(); |
|
944
|
1
|
50
|
0
|
|
|
4
|
if (ref($opts->{'config'}) eq 'HASH') { |
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
945
|
|
|
|
|
|
|
# blindly assume hashref contains a valid config structure for now... if it |
|
946
|
|
|
|
|
|
|
# doesn't, that will be caught soon enough |
|
947
|
1
|
|
|
|
|
1
|
$self->{'config'} = { %{$opts->{'config'}} }; |
|
|
1
|
|
|
|
|
4
|
|
|
948
|
1
|
|
|
|
|
2
|
$found_config = 1; |
|
949
|
|
|
|
|
|
|
} elsif ($opts->{'config'} =~ /^\// && -r $opts->{'config'}) { |
|
950
|
|
|
|
|
|
|
# scalar contained what appeared to be a path, and lo and behold it pointed to |
|
951
|
|
|
|
|
|
|
# file we're able to read... we don't set found_config here, though... just add |
|
952
|
|
|
|
|
|
|
# it to the list of configs to check further down to make sure it's actually |
|
953
|
|
|
|
|
|
|
# valid |
|
954
|
0
|
|
|
|
|
0
|
@configs = ($opts->{'config'}); |
|
955
|
|
|
|
|
|
|
} elsif (length($opts->{'config'}) > 0) { |
|
956
|
|
|
|
|
|
|
# fall back on assumining it must be raw YAML that needs to be parsed, so |
|
957
|
|
|
|
|
|
|
# give that a shot now |
|
958
|
0
|
|
|
|
|
0
|
eval("use YAML::Syck qw()"); |
|
959
|
0
|
0
|
|
|
|
0
|
if ($self->{'config'} = YAML::Syck::Load($opts->{'config'})) { |
|
960
|
0
|
|
|
|
|
0
|
$found_config = 1; |
|
961
|
|
|
|
|
|
|
} |
|
962
|
|
|
|
|
|
|
} |
|
963
|
|
|
|
|
|
|
|
|
964
|
1
|
50
|
|
|
|
3
|
if (scalar(@configs) > 0) { |
|
965
|
0
|
|
|
|
|
0
|
eval("use YAML::Syck qw()"); |
|
966
|
|
|
|
|
|
|
|
|
967
|
0
|
|
|
|
|
0
|
foreach my $path (@configs) { |
|
968
|
0
|
0
|
|
|
|
0
|
next unless -r $path; |
|
969
|
0
|
0
|
|
|
|
0
|
if ($self->{'config'} = YAML::Syck::LoadFile($path)) { |
|
970
|
0
|
|
|
|
|
0
|
$found_config = 1; |
|
971
|
0
|
|
|
|
|
0
|
last; |
|
972
|
|
|
|
|
|
|
} else { |
|
973
|
0
|
0
|
|
|
|
0
|
dslog(qq{Configuration file "$path" could not be loaded. Skipping.}) if DEBUG(); |
|
974
|
|
|
|
|
|
|
} |
|
975
|
|
|
|
|
|
|
} |
|
976
|
|
|
|
|
|
|
} |
|
977
|
|
|
|
|
|
|
|
|
978
|
1
|
50
|
|
|
|
3
|
die dslog(q{Instance config variable present, but no valid config found.}) unless $found_config; |
|
979
|
|
|
|
|
|
|
|
|
980
|
|
|
|
|
|
|
# Check whether connection caching should be enabled |
|
981
|
1
|
50
|
|
|
|
5
|
if (defined $opts->{'cache_connections'}) { |
|
|
|
50
|
|
|
|
|
|
|
982
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'cache_connections'} = $opts->{'cache_connections'}; |
|
983
|
|
|
|
|
|
|
} elsif (!defined $self->{'config'}->{'cache_connections'}) { |
|
984
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'cache_connections'} = 0; |
|
985
|
|
|
|
|
|
|
} |
|
986
|
|
|
|
|
|
|
|
|
987
|
|
|
|
|
|
|
# Check whether statement handler caching should be used |
|
988
|
1
|
50
|
|
|
|
5
|
if (defined $opts->{'cache_statements'}) { |
|
|
|
50
|
|
|
|
|
|
|
989
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'cache_statements'} = $opts->{'cache_statements'}; |
|
990
|
|
|
|
|
|
|
} elsif (!defined $self->{'config'}->{'cache_statements'}) { |
|
991
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'cache_statements'} = 0; |
|
992
|
|
|
|
|
|
|
} |
|
993
|
|
|
|
|
|
|
} |
|
994
|
|
|
|
|
|
|
|
|
995
|
|
|
|
|
|
|
# now for the new DBIx::DataStore syntax (the previous block was to support the |
|
996
|
|
|
|
|
|
|
# old SQL::Wrapper syntax -- it will likely be dropped some day down the road |
|
997
|
1
|
0
|
33
|
|
|
3
|
if ($found_config != 1 && defined $opts->{'store'} && length($opts->{'store'}) > 0) { |
|
|
|
|
33
|
|
|
|
|
|
998
|
0
|
|
|
|
|
0
|
eval(q|$self->{'config'} = DBIx::DataStore::Config::get_store($opts->{'store'})|); |
|
999
|
0
|
0
|
0
|
|
|
0
|
if ($@ || !defined $self->{'config'}) { |
|
1000
|
0
|
|
|
|
|
0
|
die dslog(q{Error getting configuration for datastore:}, $opts->{'store'}, q{[}, $@, q{]}); |
|
1001
|
|
|
|
|
|
|
} else { |
|
1002
|
0
|
|
|
|
|
0
|
$found_config = 1; |
|
1003
|
|
|
|
|
|
|
} |
|
1004
|
|
|
|
|
|
|
} |
|
1005
|
|
|
|
|
|
|
|
|
1006
|
|
|
|
|
|
|
# check for the alternate-new syntax of "new($store, @schemas)" (where @schemas is optional) |
|
1007
|
1
|
50
|
33
|
|
|
6
|
if ($found_config != 1 && scalar(@args) > 0) { |
|
1008
|
0
|
|
|
|
|
0
|
eval(q|$self->{'config'} = DBIx::DataStore::Config::get_store($args[0])|); |
|
1009
|
0
|
0
|
0
|
|
|
0
|
if ($@ || !defined $self->{'config'}) { |
|
1010
|
0
|
0
|
|
|
|
0
|
dslog(q{Non-hashref args passed in, but first one is not a valid datastore config name.}) if DEBUG(); |
|
1011
|
|
|
|
|
|
|
} else { |
|
1012
|
0
|
0
|
|
|
|
0
|
dslog(q{Alternate constructor syntax [new($datastore, @schemas)] used.}) if DEBUG() >= 3; |
|
1013
|
0
|
|
|
|
|
0
|
$found_config = 1; |
|
1014
|
0
|
|
|
|
|
0
|
shift(@args); # remove datastore name from remaining args |
|
1015
|
0
|
|
|
|
|
0
|
my @manual_schemas = grep { $_ =~ /^\w+$/o } @args; |
|
|
0
|
|
|
|
|
0
|
|
|
1016
|
0
|
0
|
|
|
|
0
|
if (scalar(@manual_schemas) > 0) { |
|
1017
|
0
|
0
|
|
|
|
0
|
dslog(q{Overriding configuration's schemas with custom list:}, join(', ', @manual_schemas)) if DEBUG() >= 2; |
|
1018
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'primary'}->{'schemas'} = [@manual_schemas]; |
|
1019
|
0
|
0
|
0
|
|
|
0
|
if (defined $self->{'config'}->{'readers'} && ref($self->{'config'}->{'readers'}) eq 'ARRAY') { |
|
1020
|
0
|
|
|
|
|
0
|
foreach my $reader (@{$self->{'config'}->{'readers'}}) { |
|
|
0
|
|
|
|
|
0
|
|
|
1021
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'readers'}->{$reader}->{'schemas'} = [@manual_schemas]; |
|
1022
|
|
|
|
|
|
|
} |
|
1023
|
|
|
|
|
|
|
} |
|
1024
|
|
|
|
|
|
|
} |
|
1025
|
|
|
|
|
|
|
} |
|
1026
|
|
|
|
|
|
|
} |
|
1027
|
|
|
|
|
|
|
|
|
1028
|
1
|
|
|
|
|
1
|
my ($i); |
|
1029
|
|
|
|
|
|
|
|
|
1030
|
|
|
|
|
|
|
# if we still don't have a config, use the package-matching option |
|
1031
|
1
|
50
|
|
|
|
2
|
if ($found_config != 1) { |
|
1032
|
0
|
|
|
|
|
0
|
my @packages; |
|
1033
|
0
|
|
|
|
|
0
|
for ($i = 0; my @p = caller($i); $i++) { |
|
1034
|
0
|
0
|
|
|
|
0
|
push(@packages, $p[0]) unless $p[0] eq 'main'; |
|
1035
|
|
|
|
|
|
|
} |
|
1036
|
0
|
0
|
|
|
|
0
|
if (scalar(@packages) > 0) { |
|
1037
|
0
|
|
|
|
|
0
|
eval(q|$self->{'config'} = DBIx::DataStore::Config::match_store(\@packages)|); |
|
1038
|
0
|
0
|
0
|
|
|
0
|
if ($@ || !defined $self->{'config'}) { |
|
1039
|
0
|
0
|
|
|
|
0
|
dslog(q{Locating configuration based on packages in stack failed.}) if DEBUG(); |
|
1040
|
|
|
|
|
|
|
} else { |
|
1041
|
0
|
|
|
|
|
0
|
$found_config = 1; |
|
1042
|
|
|
|
|
|
|
} |
|
1043
|
|
|
|
|
|
|
} |
|
1044
|
|
|
|
|
|
|
} |
|
1045
|
|
|
|
|
|
|
|
|
1046
|
|
|
|
|
|
|
# if that still doesn't work, return the default-marked configuration |
|
1047
|
1
|
50
|
|
|
|
6
|
if ($found_config != 1) { |
|
1048
|
0
|
|
|
|
|
0
|
eval(q|$self->{'config'} = DBIx::DataStore::Config::get_default()|); |
|
1049
|
0
|
0
|
0
|
|
|
0
|
if ($@ || !defined $self->{'config'}) { |
|
1050
|
0
|
|
|
|
|
0
|
die dslog(q{No configuration could be located and used for this connection!}); |
|
1051
|
|
|
|
|
|
|
} |
|
1052
|
|
|
|
|
|
|
} |
|
1053
|
|
|
|
|
|
|
|
|
1054
|
|
|
|
|
|
|
# Validate connection to primary database |
|
1055
|
1
|
|
|
|
|
1
|
$self->{'handles'} = {}; |
|
1056
|
1
|
50
|
|
|
|
2
|
unless ($self->{'handles'}->{'primary'} = _db_connect( |
|
1057
|
|
|
|
|
|
|
cache => $self->{'config'}->{'cache_connections'}, |
|
1058
|
1
|
|
|
|
|
5
|
%{$self->{'config'}->{'primary'}}) |
|
1059
|
|
|
|
|
|
|
) { |
|
1060
|
0
|
|
|
|
|
0
|
die dslog("Validation of connection to primary database failed!"); |
|
1061
|
|
|
|
|
|
|
} |
|
1062
|
|
|
|
|
|
|
$self->{'handles'}->{'primary'} = _set_schema_searchpath( |
|
1063
|
|
|
|
|
|
|
$self->{'handles'}->{'primary'}, |
|
1064
|
|
|
|
|
|
|
$self->{'config'}->{'primary'}->{'driver'}, |
|
1065
|
1
|
|
50
|
|
|
6
|
$self->{'config'}->{'primary'}->{'schemas'} |
|
1066
|
|
|
|
|
|
|
) || die dslog(q{Error setting schema search path.}); |
|
1067
|
|
|
|
|
|
|
|
|
1068
|
|
|
|
|
|
|
# Select the default reader DB |
|
1069
|
|
|
|
|
|
|
my $num_readers = defined $self->{'config'}->{'readers'} && ref($self->{'config'}->{'readers'}) eq 'HASH' |
|
1070
|
1
|
50
|
33
|
|
|
5
|
? scalar keys %{$self->{'config'}->{'readers'}} : 0; |
|
|
0
|
|
|
|
|
0
|
|
|
1071
|
|
|
|
|
|
|
|
|
1072
|
1
|
50
|
|
|
|
2
|
$self->{'config'}->{'default_reader'} = $opts->{'reader'} if defined $opts->{'reader'}; |
|
1073
|
|
|
|
|
|
|
$self->{'config'}->{'default_reader'} = 'primary' |
|
1074
|
|
|
|
|
|
|
if !defined $self->{'config'}->{'default_reader'} |
|
1075
|
|
|
|
|
|
|
|| lc($self->{'config'}->{'default_reader'}) eq 'none' |
|
1076
|
1
|
0
|
33
|
|
|
4
|
|| length($self->{'config'}->{'default_reader'}) < 1 |
|
|
|
|
33
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
1077
|
|
|
|
|
|
|
|| $num_readers < 1; |
|
1078
|
|
|
|
|
|
|
|
|
1079
|
1
|
|
|
|
|
2
|
my @reader_list = (); |
|
1080
|
|
|
|
|
|
|
|
|
1081
|
|
|
|
|
|
|
# if a non-primary reader was selected (either in the YAML config or the opts hashref), populate |
|
1082
|
|
|
|
|
|
|
# the reader_list array with candidates (__random will fill the list with all defined readers in |
|
1083
|
|
|
|
|
|
|
# a randomized order; if a specific server was selected the list will contain only that entry) |
|
1084
|
1
|
50
|
33
|
|
|
7
|
if (defined $self->{'config'}->{'default_reader'} && $self->{'config'}->{'default_reader'} ne 'primary') { |
|
1085
|
0
|
0
|
|
|
|
0
|
if ($self->{'config'}->{'default_reader'} eq '__random') { |
|
1086
|
0
|
0
|
|
|
|
0
|
if ($num_readers > 0) { |
|
1087
|
0
|
|
|
|
|
0
|
@reader_list = keys %{$self->{'config'}->{'readers'}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1088
|
0
|
|
|
|
|
0
|
$i = $#reader_list; |
|
1089
|
0
|
|
|
|
|
0
|
while ($i--) { |
|
1090
|
0
|
|
|
|
|
0
|
my $j = int rand ($i+1); |
|
1091
|
0
|
|
|
|
|
0
|
@reader_list[$i,$j] = @reader_list[$j,$i]; |
|
1092
|
|
|
|
|
|
|
} |
|
1093
|
|
|
|
|
|
|
} |
|
1094
|
|
|
|
|
|
|
} else { |
|
1095
|
|
|
|
|
|
|
die dslog("Non-existent reader database ($self->{'config'}->{'default_reader'}) selected!") |
|
1096
|
0
|
0
|
|
|
|
0
|
unless exists $self->{'config'}->{'readers'}->{ $self->{'config'}->{'default_reader'} }; |
|
1097
|
0
|
|
|
|
|
0
|
@reader_list = ($self->{'config'}->{'default_reader'}); |
|
1098
|
|
|
|
|
|
|
} |
|
1099
|
|
|
|
|
|
|
} |
|
1100
|
|
|
|
|
|
|
|
|
1101
|
1
|
50
|
|
|
|
3
|
if (scalar(@reader_list) < 1) { |
|
1102
|
|
|
|
|
|
|
# if there is no selection for a reader, copy the objref of the primary DB |
|
1103
|
1
|
|
|
|
|
9
|
$self->{'handles'}->{'reader'} = $self->{'handles'}->{'primary'}; |
|
1104
|
1
|
|
|
|
|
2
|
$self->{'config'}->{'default_reader'} = 'primary'; |
|
1105
|
1
|
|
|
|
|
3
|
$self->{'config'}->{'readers'} = { primary => $self->{'config'}->{'primary'} }; |
|
1106
|
1
|
|
|
|
|
2
|
$self->{'reader'} = 'primary'; |
|
1107
|
|
|
|
|
|
|
} else { |
|
1108
|
0
|
|
|
|
|
0
|
my $found_reader = 0; |
|
1109
|
0
|
|
|
|
|
0
|
foreach my $reader (@reader_list) { |
|
1110
|
0
|
|
|
|
|
0
|
my ($dbh); |
|
1111
|
0
|
0
|
|
|
|
0
|
if ($dbh = _db_connect( |
|
1112
|
|
|
|
|
|
|
cache => $self->{'config'}->{'cache_connections'}, |
|
1113
|
0
|
|
|
|
|
0
|
%{$self->{'config'}->{'readers'}->{$reader}}) |
|
1114
|
|
|
|
|
|
|
) { |
|
1115
|
|
|
|
|
|
|
$self->{'handles'}->{'reader'} = _set_schema_searchpath( |
|
1116
|
|
|
|
|
|
|
$dbh, |
|
1117
|
|
|
|
|
|
|
$self->{'config'}->{'readers'}->{$reader}->{'driver'}, |
|
1118
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'readers'}->{$reader}->{'schemas'} |
|
1119
|
|
|
|
|
|
|
); |
|
1120
|
0
|
|
|
|
|
0
|
$self->{'reader'} = $reader; |
|
1121
|
0
|
|
|
|
|
0
|
$found_reader = 1; |
|
1122
|
0
|
|
|
|
|
0
|
last; |
|
1123
|
|
|
|
|
|
|
} |
|
1124
|
|
|
|
|
|
|
} |
|
1125
|
|
|
|
|
|
|
|
|
1126
|
0
|
0
|
|
|
|
0
|
if ($found_reader != 1) { |
|
1127
|
0
|
|
|
|
|
0
|
die dslog("No valid connection could be made to a reader database!"); |
|
1128
|
|
|
|
|
|
|
} |
|
1129
|
|
|
|
|
|
|
} |
|
1130
|
|
|
|
|
|
|
|
|
1131
|
|
|
|
|
|
|
# quick reference flag for whether AutoCommit was turned off on the primary DB |
|
1132
|
|
|
|
|
|
|
$self->{'autocommit'} = |
|
1133
|
|
|
|
|
|
|
defined $self->{'config'}->{'primary'}->{'dbd_opts'} |
|
1134
|
|
|
|
|
|
|
&& ref($self->{'config'}->{'primary'}->{'dbd_opts'}) eq 'HASH' |
|
1135
|
|
|
|
|
|
|
&& defined $self->{'config'}->{'primary'}->{'dbd_opts'}->{'AutoCommit'} |
|
1136
|
1
|
50
|
0
|
|
|
5
|
&& $self->{'config'}->{'primary'}->{'dbd_opts'}->{'AutoCommit'} == 0 |
|
1137
|
|
|
|
|
|
|
? 0 : 1; |
|
1138
|
|
|
|
|
|
|
|
|
1139
|
|
|
|
|
|
|
# Init the "in transaction" flag |
|
1140
|
1
|
|
|
|
|
5
|
$self->{'in_tx'} = 0; |
|
1141
|
|
|
|
|
|
|
|
|
1142
|
|
|
|
|
|
|
# Init the statement counter. This counter is used to check if there have |
|
1143
|
|
|
|
|
|
|
# been any non-select statements issued to the primary DB since the last |
|
1144
|
|
|
|
|
|
|
# commit() or rollback(). While technically it is incremented for each |
|
1145
|
|
|
|
|
|
|
# non-select statement, it cannot be relied on for an exact count of the |
|
1146
|
|
|
|
|
|
|
# statements since the last rollback()/commit(), as selective rollbacks |
|
1147
|
|
|
|
|
|
|
# of savepoints or nested transactions will not reset this counter (it is |
|
1148
|
|
|
|
|
|
|
# only reset by the rollback() method if that rollback has the side effect |
|
1149
|
|
|
|
|
|
|
# of the in_tx flag == 0. Why bother if it's not an exact counter? I'm |
|
1150
|
|
|
|
|
|
|
# glad you asked! When AutoCommit is turned off for the primary DB, this |
|
1151
|
|
|
|
|
|
|
# counter is used to determine whether to silence warnings/errors on the |
|
1152
|
|
|
|
|
|
|
# extraneous calls to transaction methods, particularly in DESTROY. For |
|
1153
|
|
|
|
|
|
|
# the exact details of when this silencing will occur, check out the |
|
1154
|
|
|
|
|
|
|
# code in the various transaction methods. |
|
1155
|
1
|
|
|
|
|
1
|
$self->{'st_count'} = 0; |
|
1156
|
|
|
|
|
|
|
|
|
1157
|
|
|
|
|
|
|
# if AutoCommit is set to 0 for the primary server, we automatically start out inside a |
|
1158
|
|
|
|
|
|
|
# transaction |
|
1159
|
1
|
50
|
|
|
|
3
|
if ($self->{'autocommit'} == 0) { |
|
1160
|
0
|
|
|
|
|
0
|
$self->{'in_tx'} = 1; |
|
1161
|
|
|
|
|
|
|
} |
|
1162
|
|
|
|
|
|
|
|
|
1163
|
|
|
|
|
|
|
# set up arrayref to hold any error strings (usually DBI errors) |
|
1164
|
1
|
|
|
|
|
2
|
$self->{'errors'} = []; |
|
1165
|
|
|
|
|
|
|
|
|
1166
|
1
|
|
|
|
|
5
|
return bless($self, $class); |
|
1167
|
|
|
|
|
|
|
} |
|
1168
|
|
|
|
|
|
|
|
|
1169
|
|
|
|
|
|
|
sub base_tables { |
|
1170
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1171
|
|
|
|
|
|
|
|
|
1172
|
0
|
|
|
|
|
0
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1173
|
0
|
|
|
|
|
0
|
my $schema = $self->{'handles'}->{'primary'}->quote($self->{'config'}->{'primary'}->{'db'}); |
|
1174
|
|
|
|
|
|
|
|
|
1175
|
0
|
|
|
|
|
0
|
my ($sql); |
|
1176
|
|
|
|
|
|
|
|
|
1177
|
0
|
0
|
|
|
|
0
|
if ($driver eq 'mysql') { |
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
1178
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1179
|
|
|
|
|
|
|
select table_name |
|
1180
|
|
|
|
|
|
|
from information_schema.tables |
|
1181
|
|
|
|
|
|
|
where table_schema in ($schema) |
|
1182
|
|
|
|
|
|
|
and table_type = 'BASE TABLE' |
|
1183
|
|
|
|
|
|
|
order by table_name asc |
|
1184
|
|
|
|
|
|
|
}; |
|
1185
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1186
|
|
|
|
|
|
|
# make sure we only list the relations visible in the current search_path |
|
1187
|
0
|
0
|
0
|
|
|
0
|
if (defined $self->{'config'}->{'primary'}->{'schemas'} |
|
1188
|
|
|
|
|
|
|
&& ref($self->{'config'}->{'primary'}->{'schemas'}) eq 'ARRAY') { |
|
1189
|
0
|
|
|
|
|
0
|
$schema = join(',', @{$self->{'config'}->{'primary'}->{'schemas'}}); |
|
|
0
|
|
|
|
|
0
|
|
|
1190
|
|
|
|
|
|
|
} else { |
|
1191
|
0
|
|
|
|
|
0
|
$schema = q{'public'}; |
|
1192
|
|
|
|
|
|
|
} |
|
1193
|
|
|
|
|
|
|
|
|
1194
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1195
|
|
|
|
|
|
|
select c.relname |
|
1196
|
|
|
|
|
|
|
from pg_catalog.pg_class c |
|
1197
|
|
|
|
|
|
|
join pg_catalog.pg_roles r on (r.oid = c.relowner) |
|
1198
|
|
|
|
|
|
|
left join pg_catalog.pg_namespace n on (n.oid = c.relnamespace) |
|
1199
|
|
|
|
|
|
|
where c.relkind in ('r') |
|
1200
|
|
|
|
|
|
|
and n.nspname in ($schema) |
|
1201
|
|
|
|
|
|
|
and pg_catalog.pg_table_is_visible(c.oid) |
|
1202
|
|
|
|
|
|
|
order by relname asc |
|
1203
|
|
|
|
|
|
|
}; |
|
1204
|
|
|
|
|
|
|
} elsif ($driver eq 'oracle') { |
|
1205
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1206
|
|
|
|
|
|
|
select object_name |
|
1207
|
|
|
|
|
|
|
from user_objects |
|
1208
|
|
|
|
|
|
|
where object_type in ('TABLE') |
|
1209
|
|
|
|
|
|
|
order by object_name asc |
|
1210
|
|
|
|
|
|
|
}; |
|
1211
|
|
|
|
|
|
|
} elsif ($driver eq 'db2') { |
|
1212
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1213
|
|
|
|
|
|
|
select tabname |
|
1214
|
|
|
|
|
|
|
from syscat.tables |
|
1215
|
|
|
|
|
|
|
where tabschema not like 'SYS%' and type in ('T') |
|
1216
|
|
|
|
|
|
|
order by tabname asc |
|
1217
|
|
|
|
|
|
|
}; |
|
1218
|
|
|
|
|
|
|
} else { |
|
1219
|
0
|
|
|
|
|
0
|
die dslog("This method is not yet implemented for your database server ($driver)."); |
|
1220
|
|
|
|
|
|
|
} |
|
1221
|
|
|
|
|
|
|
|
|
1222
|
0
|
|
|
|
|
0
|
my $res = $self->do($sql); |
|
1223
|
|
|
|
|
|
|
|
|
1224
|
0
|
0
|
|
|
|
0
|
if ($res) { |
|
1225
|
0
|
|
|
|
|
0
|
my @tables = (); |
|
1226
|
|
|
|
|
|
|
|
|
1227
|
0
|
|
|
|
|
0
|
while ($res->next) { |
|
1228
|
0
|
|
|
|
|
0
|
push(@tables, $res->[0]); |
|
1229
|
|
|
|
|
|
|
} |
|
1230
|
|
|
|
|
|
|
|
|
1231
|
0
|
|
|
|
|
0
|
return @tables; |
|
1232
|
|
|
|
|
|
|
} else { |
|
1233
|
0
|
|
|
|
|
0
|
die dslog("Error encountered when retrieving list of tables: $DBI::errstr"); |
|
1234
|
|
|
|
|
|
|
} |
|
1235
|
|
|
|
|
|
|
} |
|
1236
|
|
|
|
|
|
|
|
|
1237
|
|
|
|
|
|
|
sub begin { |
|
1238
|
2
|
|
|
2
|
1
|
665
|
my ($self) = shift; |
|
1239
|
|
|
|
|
|
|
|
|
1240
|
|
|
|
|
|
|
die dslog("Somehow there appears to be no driver defined for the primary database!") |
|
1241
|
2
|
50
|
|
|
|
25
|
unless defined $self->{'config'}->{'primary'}->{'driver'}; |
|
1242
|
|
|
|
|
|
|
|
|
1243
|
2
|
|
|
|
|
6
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1244
|
|
|
|
|
|
|
|
|
1245
|
|
|
|
|
|
|
# If AutoCommit is turned off, new transactions are automatically started on |
|
1246
|
|
|
|
|
|
|
# connect and immediately after any existing transactions are closed (either |
|
1247
|
|
|
|
|
|
|
# through commit or rollback). This has the side-effect of causing begin() |
|
1248
|
|
|
|
|
|
|
# to trigger a fatal error from DBI's begin_work() every time it is called |
|
1249
|
|
|
|
|
|
|
# (in the "DBI Way of Doing Things" you never actually call begin_work() |
|
1250
|
|
|
|
|
|
|
# yourself if you turn off AutoCommit). Personally, I find this annoying |
|
1251
|
|
|
|
|
|
|
# and a bit counter-intuitive, so DBIx::DataStore will let you call begin() |
|
1252
|
|
|
|
|
|
|
# if you are currently in one of the implicitly created transactions and |
|
1253
|
|
|
|
|
|
|
# you have AutoCommit turned off *and* you have issued NO non-select |
|
1254
|
|
|
|
|
|
|
# statements to the database since either a) connecting or b) closing the |
|
1255
|
|
|
|
|
|
|
# last transaction. |
|
1256
|
2
|
0
|
33
|
|
|
26
|
if ($self->{'st_count'} == 0 && $self->{'in_tx'} == 1 && $self->{'autocommit'} == 0) { |
|
|
|
|
33
|
|
|
|
|
|
1257
|
0
|
|
|
|
|
0
|
return 1; |
|
1258
|
|
|
|
|
|
|
} |
|
1259
|
|
|
|
|
|
|
|
|
1260
|
|
|
|
|
|
|
# We need to make sure the primary database server supports transactions, |
|
1261
|
|
|
|
|
|
|
# and further that it supports nested transactions if we're already inside |
|
1262
|
|
|
|
|
|
|
# one when ->begin() is called. |
|
1263
|
2
|
50
|
|
|
|
15
|
if ($driver eq 'mysql') { |
|
|
|
50
|
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
1264
|
0
|
0
|
|
|
|
0
|
if ($self->{'in_tx'} > 0) { |
|
1265
|
0
|
|
|
|
|
0
|
die dslog("MySQL does not support nested transactions!"); |
|
1266
|
|
|
|
|
|
|
} |
|
1267
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1268
|
0
|
0
|
|
|
|
0
|
if ($self->{'in_tx'} > 0) { |
|
1269
|
0
|
|
|
|
|
0
|
die dslog("PostgreSQL does not support nested transactions (use savepoints instead)!"); |
|
1270
|
|
|
|
|
|
|
} |
|
1271
|
|
|
|
|
|
|
} elsif ($driver eq 'sqlite') { |
|
1272
|
2
|
50
|
|
|
|
13
|
if ($self->{'in_tx'} > 0) { |
|
1273
|
0
|
|
|
|
|
0
|
die dslog("SQLite does not support nested transactions!"); |
|
1274
|
|
|
|
|
|
|
} |
|
1275
|
|
|
|
|
|
|
} elsif ($driver eq 'db2') { |
|
1276
|
0
|
0
|
|
|
|
0
|
if ($self->{'in_tx'} > 0) { |
|
1277
|
0
|
|
|
|
|
0
|
die dslog("DB2 does not support nested transactions (use savepoints instead)!"); |
|
1278
|
|
|
|
|
|
|
} |
|
1279
|
|
|
|
|
|
|
} |
|
1280
|
|
|
|
|
|
|
|
|
1281
|
2
|
50
|
|
|
|
23
|
$self->{'handles'}->{'primary'}->begin_work |
|
1282
|
|
|
|
|
|
|
|| die dslog("Error encountered during attempt to begin transaction: $DBI::errstr"); |
|
1283
|
|
|
|
|
|
|
|
|
1284
|
2
|
|
|
|
|
74
|
$self->{'in_tx'}++; |
|
1285
|
2
|
|
|
|
|
15
|
return 1; |
|
1286
|
|
|
|
|
|
|
} |
|
1287
|
|
|
|
|
|
|
|
|
1288
|
|
|
|
|
|
|
sub commit { |
|
1289
|
1
|
|
|
1
|
1
|
2
|
my ($self) = @_; |
|
1290
|
|
|
|
|
|
|
|
|
1291
|
1
|
50
|
|
|
|
7
|
die dslog("Commit attempted without any open transactions!") unless $self->{'in_tx'} > 0; |
|
1292
|
|
|
|
|
|
|
|
|
1293
|
1
|
50
|
|
|
|
23
|
$self->{'handles'}->{'primary'}->commit |
|
1294
|
|
|
|
|
|
|
|| die dslog("Error encountered during attempt to commit transaction: $DBI::errstr"); |
|
1295
|
|
|
|
|
|
|
|
|
1296
|
1
|
|
|
|
|
2
|
$self->{'in_tx'}--; |
|
1297
|
1
|
|
|
|
|
2
|
$self->{'st_count'} = 0; |
|
1298
|
|
|
|
|
|
|
|
|
1299
|
|
|
|
|
|
|
# if AutoCommit is turned off on the primary DB, then the closing of a transaction |
|
1300
|
|
|
|
|
|
|
# (either through a rollback or commit) automatically begins a new transaction, in |
|
1301
|
|
|
|
|
|
|
# which case we need to re-increment the in_tx count |
|
1302
|
1
|
50
|
|
|
|
4
|
if ($self->{'autocommit'} == 0) { |
|
1303
|
0
|
|
|
|
|
0
|
$self->{'in_tx'}++; |
|
1304
|
|
|
|
|
|
|
} |
|
1305
|
|
|
|
|
|
|
|
|
1306
|
|
|
|
|
|
|
# reset the search path when AutoCommit is turned off (since everything, including |
|
1307
|
|
|
|
|
|
|
# the initial setting of this on connect happens within transactions) |
|
1308
|
1
|
50
|
33
|
|
|
5
|
if ($self->{'autocommit'} == 0 && defined $self->{'config'}->{'primary'}->{'schemas'}) { |
|
1309
|
|
|
|
|
|
|
_set_schema_searchpath($self->{'handles'}->{'primary'}, $self->{'config'}->{'primary'}->{'driver'}, |
|
1310
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'primary'}->{'schemas'}); |
|
1311
|
|
|
|
|
|
|
} |
|
1312
|
|
|
|
|
|
|
|
|
1313
|
1
|
|
|
|
|
5
|
return 1; |
|
1314
|
|
|
|
|
|
|
} |
|
1315
|
|
|
|
|
|
|
|
|
1316
|
|
|
|
|
|
|
sub databases { |
|
1317
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1318
|
|
|
|
|
|
|
|
|
1319
|
0
|
|
|
|
|
0
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1320
|
|
|
|
|
|
|
|
|
1321
|
0
|
|
|
|
|
0
|
my ($sql); |
|
1322
|
|
|
|
|
|
|
|
|
1323
|
0
|
0
|
|
|
|
0
|
if ($driver eq 'mysql') { |
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
1324
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1325
|
|
|
|
|
|
|
select schema_name |
|
1326
|
|
|
|
|
|
|
from information_schema.schemata |
|
1327
|
|
|
|
|
|
|
where schema_name not in ('information_schema','cluster') |
|
1328
|
|
|
|
|
|
|
order by schema_name asc |
|
1329
|
|
|
|
|
|
|
}; |
|
1330
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1331
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1332
|
|
|
|
|
|
|
select d.datname |
|
1333
|
|
|
|
|
|
|
from pg_catalog.pg_database d |
|
1334
|
|
|
|
|
|
|
join pg_catalog.pg_roles r on (d.datdba = r.oid) |
|
1335
|
|
|
|
|
|
|
where datname not in ('postgres') |
|
1336
|
|
|
|
|
|
|
and datname not like 'template%' |
|
1337
|
|
|
|
|
|
|
order by datname asc |
|
1338
|
|
|
|
|
|
|
}; |
|
1339
|
|
|
|
|
|
|
} elsif ($driver eq 'db2') { |
|
1340
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1341
|
|
|
|
|
|
|
select schemaname |
|
1342
|
|
|
|
|
|
|
from syscat.schemata |
|
1343
|
|
|
|
|
|
|
where schemaname not like 'SYS%' |
|
1344
|
|
|
|
|
|
|
order by schemaname asc |
|
1345
|
|
|
|
|
|
|
}; |
|
1346
|
|
|
|
|
|
|
} else { |
|
1347
|
0
|
|
|
|
|
0
|
die dslog("This method is not yet implemented for your database server ($driver)."); |
|
1348
|
|
|
|
|
|
|
} |
|
1349
|
|
|
|
|
|
|
|
|
1350
|
0
|
|
|
|
|
0
|
my $res = $self->do($sql); |
|
1351
|
|
|
|
|
|
|
|
|
1352
|
0
|
0
|
|
|
|
0
|
if ($res) { |
|
1353
|
0
|
|
|
|
|
0
|
my @schemas = (); |
|
1354
|
|
|
|
|
|
|
|
|
1355
|
0
|
|
|
|
|
0
|
while ($res->next) { |
|
1356
|
0
|
|
|
|
|
0
|
push(@schemas, $res->[0]); |
|
1357
|
|
|
|
|
|
|
} |
|
1358
|
|
|
|
|
|
|
|
|
1359
|
0
|
|
|
|
|
0
|
return @schemas; |
|
1360
|
|
|
|
|
|
|
} else { |
|
1361
|
0
|
|
|
|
|
0
|
die dslog("Error encountered when retrieving list of database schemas: $DBI::errstr"); |
|
1362
|
|
|
|
|
|
|
} |
|
1363
|
|
|
|
|
|
|
} |
|
1364
|
|
|
|
|
|
|
|
|
1365
|
|
|
|
|
|
|
sub db_primary { |
|
1366
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1367
|
|
|
|
|
|
|
|
|
1368
|
0
|
|
|
|
|
0
|
my %config = %{$self->{'config'}->{'primary'}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1369
|
0
|
|
|
|
|
0
|
$config{'name'} = 'primary'; |
|
1370
|
|
|
|
|
|
|
|
|
1371
|
|
|
|
|
|
|
# remove "sensitive" fields from config |
|
1372
|
0
|
0
|
|
|
|
0
|
delete $config{'pass'} if defined $config{'pass'}; |
|
1373
|
|
|
|
|
|
|
|
|
1374
|
0
|
|
|
|
|
0
|
return { %config }; |
|
1375
|
|
|
|
|
|
|
} |
|
1376
|
|
|
|
|
|
|
|
|
1377
|
|
|
|
|
|
|
sub db_reader { |
|
1378
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1379
|
|
|
|
|
|
|
|
|
1380
|
|
|
|
|
|
|
return unless defined $self->{'config'}->{'default_reader'} |
|
1381
|
0
|
0
|
0
|
|
|
0
|
&& exists $self->{'config'}->{'readers'}->{$self->{'config'}->{'default_reader'}}; |
|
1382
|
|
|
|
|
|
|
|
|
1383
|
0
|
|
|
|
|
0
|
my %config = %{$self->{'config'}->{'readers'}->{$self->{'config'}->{'default_reader'}}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1384
|
|
|
|
|
|
|
|
|
1385
|
0
|
0
|
|
|
|
0
|
delete $config{'pass'} if defined $config{'pass'}; |
|
1386
|
|
|
|
|
|
|
|
|
1387
|
0
|
|
|
|
|
0
|
return { %config }; |
|
1388
|
|
|
|
|
|
|
} |
|
1389
|
|
|
|
|
|
|
|
|
1390
|
|
|
|
|
|
|
sub do { |
|
1391
|
19
|
|
|
19
|
1
|
9198
|
my ($self, @args) = @_; |
|
1392
|
|
|
|
|
|
|
|
|
1393
|
19
|
|
|
|
|
40
|
my $opts = {}; |
|
1394
|
|
|
|
|
|
|
|
|
1395
|
|
|
|
|
|
|
# check first argument to see if options hashref was passed in before a SQL statement |
|
1396
|
19
|
100
|
100
|
|
|
115
|
if (scalar(@args) > 1 && ref($args[0]) eq 'HASH') { |
|
1397
|
1
|
|
|
|
|
5
|
$opts = shift @args; |
|
1398
|
|
|
|
|
|
|
} |
|
1399
|
|
|
|
|
|
|
|
|
1400
|
|
|
|
|
|
|
# Default the pager settings unless caller passed in their own values. A |
|
1401
|
|
|
|
|
|
|
# "page" value of <=0 denotes no paging to be used (IOW the query will not |
|
1402
|
|
|
|
|
|
|
# be limited (at least by us -- the caller might have their own LIMIT in |
|
1403
|
|
|
|
|
|
|
# there) and all possible results from the database will be available. |
|
1404
|
19
|
100
|
66
|
|
|
108
|
$opts->{'per_page'} = 25 unless defined $opts->{'per_page'} && $opts->{'per_page'} =~ /^\d+$/o; |
|
1405
|
19
|
100
|
66
|
|
|
87
|
$opts->{'page'} = -1 unless defined $opts->{'page'} && $opts->{'page'} =~ /^\d+$/o; |
|
1406
|
|
|
|
|
|
|
|
|
1407
|
|
|
|
|
|
|
# allow for overriding of statement preparation & caching on a per-query basis |
|
1408
|
|
|
|
|
|
|
$opts->{'prepare'} = $self->{'config'}->{'prepare_statements'} |
|
1409
|
19
|
50
|
33
|
|
|
92
|
unless defined $opts->{'prepare'} && $opts->{'prepare'} =~ /^\d+$/o; |
|
1410
|
|
|
|
|
|
|
|
|
1411
|
|
|
|
|
|
|
# pass the rest straight through for conversion from convenient-bindings to normal-bindings |
|
1412
|
19
|
|
|
|
|
64
|
my ($st_type, $sql, @binds) = _transform_bindings(@args); |
|
1413
|
|
|
|
|
|
|
|
|
1414
|
|
|
|
|
|
|
# Figure out which DB handler we'll be using. If we're inside of a transaction, |
|
1415
|
|
|
|
|
|
|
# it has to be the primary DB. But if we're not, then we check whether |
|
1416
|
|
|
|
|
|
|
# reader_failover is turned on; if it is not, we just blindly set the currently |
|
1417
|
|
|
|
|
|
|
# selected reader's handle, otherwise we go through the ping tests (keeping in |
|
1418
|
|
|
|
|
|
|
# mind the flag_bad_readers setting as well) to find a new reader DB if the |
|
1419
|
|
|
|
|
|
|
# current one doesn't succeed |
|
1420
|
19
|
|
|
|
|
28
|
my $dbh; |
|
1421
|
19
|
100
|
100
|
|
|
124
|
if ($self->{'in_tx'} > 0 || $st_type ne 'select') { |
|
1422
|
11
|
|
|
|
|
32
|
$dbh = $self->{'handles'}->{'primary'}; |
|
1423
|
|
|
|
|
|
|
} else { |
|
1424
|
8
|
50
|
33
|
|
|
36
|
if (exists $self->{'config'}->{'reader_failover'} && $self->{'config'}->{'reader_failover'} == 1) { |
|
1425
|
0
|
0
|
0
|
|
|
0
|
my $flag_bad = exists $self->{'config'}->{'flag_bad_readers'} && $self->{'config'}->{'flag_bad_readers'} == 1 |
|
1426
|
|
|
|
|
|
|
? 1 : 0; |
|
1427
|
0
|
0
|
|
|
|
0
|
if (!$self->{'handles'}->{'reader'}->do("select 1")) { |
|
1428
|
0
|
0
|
|
|
|
0
|
if ($self->{'config'}->{'default_reader'} eq 'primary') { |
|
1429
|
|
|
|
|
|
|
# current reader was the primary DB... we're in trouble now |
|
1430
|
0
|
|
|
|
|
0
|
die dslog("Primary database server failed connectivity test."); |
|
1431
|
|
|
|
|
|
|
} |
|
1432
|
|
|
|
|
|
|
|
|
1433
|
0
|
|
|
|
|
0
|
my $reader_found = 0; |
|
1434
|
|
|
|
|
|
|
# if flag_bad_readers is not turned on, we need to have a quasi-reasonable limit to the number |
|
1435
|
|
|
|
|
|
|
# attempts we'll make to find a new reader, since the @new_readers list will never exhaust |
|
1436
|
|
|
|
|
|
|
# itself (all readers will end up in it every single time, just in a random order) |
|
1437
|
0
|
|
|
|
|
0
|
my $check_limit = scalar(keys(%{$self->{'config'}->{'readers'}})) * 2; |
|
|
0
|
|
|
|
|
0
|
|
|
1438
|
|
|
|
|
|
|
|
|
1439
|
|
|
|
|
|
|
CHECK_READER: |
|
1440
|
0
|
|
|
|
|
0
|
while (!$reader_found) { |
|
1441
|
0
|
0
|
|
|
|
0
|
if ($check_limit < 1) { |
|
1442
|
0
|
0
|
|
|
|
0
|
dslog("Exhausted connection attempts to new reader databases. Giving up.") if DEBUG(); |
|
1443
|
0
|
|
|
|
|
0
|
last CHECK_READER; |
|
1444
|
|
|
|
|
|
|
} |
|
1445
|
0
|
0
|
|
|
|
0
|
dslog("Current reader $self->{'config'}->{'default_reader'} failed ping test. Choosing new reader.") |
|
1446
|
|
|
|
|
|
|
if DEBUG(); |
|
1447
|
|
|
|
|
|
|
|
|
1448
|
0
|
|
|
|
|
0
|
my (@new_readers); |
|
1449
|
0
|
0
|
|
|
|
0
|
if ($flag_bad) { |
|
1450
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'readers'}->{ $self->{'config'}->{'default_reader'} }->{'failure'} = time(); |
|
1451
|
0
|
|
|
|
|
0
|
@new_readers = grep { !exists $self->{'config'}->{'readers'}->{$_}->{'failure'} } |
|
1452
|
0
|
|
|
|
|
0
|
keys %{$self->{'config'}->{'readers'}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1453
|
|
|
|
|
|
|
} else { |
|
1454
|
0
|
|
|
|
|
0
|
@new_readers = keys %{$self->{'config'}->{'readers'}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1455
|
|
|
|
|
|
|
} |
|
1456
|
|
|
|
|
|
|
|
|
1457
|
0
|
0
|
|
|
|
0
|
last CHECK_READER if scalar(@new_readers) < 1; |
|
1458
|
|
|
|
|
|
|
|
|
1459
|
|
|
|
|
|
|
# randomly sort the new reader DB list |
|
1460
|
0
|
|
|
|
|
0
|
my $i = $#new_readers; |
|
1461
|
0
|
|
|
|
|
0
|
while ($i--) { |
|
1462
|
0
|
|
|
|
|
0
|
my $j = int rand ($i+1); |
|
1463
|
0
|
|
|
|
|
0
|
@new_readers[$i,$j] = @new_readers[$j,$i]; |
|
1464
|
|
|
|
|
|
|
} |
|
1465
|
|
|
|
|
|
|
|
|
1466
|
0
|
|
|
|
|
0
|
my $reader = $new_readers[0]; |
|
1467
|
|
|
|
|
|
|
|
|
1468
|
0
|
0
|
|
|
|
0
|
if ($dbh = _db_connect( |
|
1469
|
|
|
|
|
|
|
cache => $self->{'config'}->{'cache_connections'}, |
|
1470
|
0
|
|
|
|
|
0
|
%{$self->{'config'}->{'readers'}->{$reader}})) |
|
1471
|
|
|
|
|
|
|
{ |
|
1472
|
|
|
|
|
|
|
# touch of extra paranoia... make sure we really did connect properly (since |
|
1473
|
|
|
|
|
|
|
# there is an ever-so-slight chance that connection caching, if turned on, |
|
1474
|
|
|
|
|
|
|
# might be deceiving us) |
|
1475
|
0
|
0
|
|
|
|
0
|
if ($dbh->do("select 1")) { |
|
1476
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'default_reader'} = $reader; |
|
1477
|
0
|
|
|
|
|
0
|
$self->{'handles'}->{'reader'} = $dbh; |
|
1478
|
0
|
|
|
|
|
0
|
$reader_found = 1; |
|
1479
|
|
|
|
|
|
|
} |
|
1480
|
|
|
|
|
|
|
} |
|
1481
|
0
|
|
|
|
|
0
|
$check_limit--; |
|
1482
|
|
|
|
|
|
|
} |
|
1483
|
|
|
|
|
|
|
|
|
1484
|
|
|
|
|
|
|
# if a new reader wasn't found, make one last attempt by pinging the primary and using it |
|
1485
|
0
|
0
|
|
|
|
0
|
if (!$reader_found) { |
|
1486
|
0
|
0
|
|
|
|
0
|
if ($self->{'handles'}->{'primary'}->do("select 1")) { |
|
1487
|
0
|
|
|
|
|
0
|
$self->{'handles'}->{'reader'} = $self->{'handles'}->{'primary'}; |
|
1488
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'default_reader'} = 'primary'; |
|
1489
|
0
|
|
|
|
|
0
|
$dbh = $self->{'handles'}->{'primary'}; |
|
1490
|
|
|
|
|
|
|
} else { |
|
1491
|
0
|
|
|
|
|
0
|
die dslog("Failure attempting to fall back on primary database for reads after all readers failed."); |
|
1492
|
|
|
|
|
|
|
} |
|
1493
|
|
|
|
|
|
|
} |
|
1494
|
|
|
|
|
|
|
} else { |
|
1495
|
0
|
|
|
|
|
0
|
$dbh = $self->{'handles'}->{'reader'}; |
|
1496
|
|
|
|
|
|
|
} |
|
1497
|
|
|
|
|
|
|
} else { |
|
1498
|
8
|
|
|
|
|
22
|
$dbh = $self->{'handles'}->{'reader'}; |
|
1499
|
|
|
|
|
|
|
} |
|
1500
|
|
|
|
|
|
|
} |
|
1501
|
|
|
|
|
|
|
|
|
1502
|
|
|
|
|
|
|
# if we intend to bypass normal statement preparation with placeholders, we need to |
|
1503
|
|
|
|
|
|
|
# now replace all of them with their actual values (properly quoted, of course) so |
|
1504
|
|
|
|
|
|
|
# the underlying DBD::* driver doesn't spend time on this |
|
1505
|
19
|
50
|
|
|
|
52
|
if (!$opts->{'prepare'}) { |
|
1506
|
19
|
50
|
|
|
|
779
|
dslog("Manually replacing placeholders prior to statement execution.") if DEBUG(); |
|
1507
|
19
|
|
|
|
|
88
|
$sql =~ s{(\s+|,|\(|\=)\?(\s*)}{$1 . $dbh->quote(shift(@binds)) . $2}egsix; |
|
|
15
|
|
|
|
|
138
|
|
|
1508
|
|
|
|
|
|
|
} |
|
1509
|
|
|
|
|
|
|
|
|
1510
|
19
|
|
|
|
|
83
|
my $unpaged_sql = $sql; |
|
1511
|
|
|
|
|
|
|
|
|
1512
|
19
|
100
|
|
|
|
63
|
if ($opts->{'page'} > 0) { |
|
1513
|
|
|
|
|
|
|
# Caller wants auto-paging, so validate that the original query doesn't end with a |
|
1514
|
|
|
|
|
|
|
# LIMIT clause and add our own |
|
1515
|
|
|
|
|
|
|
my $driver = $self->{'in_tx'} > 0 |
|
1516
|
|
|
|
|
|
|
? lc($self->{'config'}->{'primary'}->{'driver'}) |
|
1517
|
1
|
50
|
|
|
|
14
|
: lc($self->{'config'}->{'readers'}->{$self->{'config'}->{'default_reader'}}->{'driver'}); |
|
1518
|
|
|
|
|
|
|
|
|
1519
|
|
|
|
|
|
|
# Warn if it appears there is already a limiting clause in the original query (but in |
|
1520
|
|
|
|
|
|
|
# the event we misidentify something else as a limiting clause, give it a chance to run |
|
1521
|
|
|
|
|
|
|
# and let the database server reject it if it really is invalid). |
|
1522
|
1
|
50
|
33
|
|
|
19
|
if ($sql =~ /limit\s+\d+(\s+offset\s+\d+|\s*,\s*\d+)\s*$/ois |
|
1523
|
|
|
|
|
|
|
|| $sql =~ /rows\s+\d+(\s+to\s+\d+)\s*$/ois |
|
1524
|
|
|
|
|
|
|
) { |
|
1525
|
0
|
0
|
|
|
|
0
|
dslog("Paging requested on a query that appears to already have a limiting clause. Attempting anyway.") |
|
1526
|
|
|
|
|
|
|
if DEBUG(); |
|
1527
|
|
|
|
|
|
|
} |
|
1528
|
|
|
|
|
|
|
|
|
1529
|
1
|
|
|
|
|
6
|
my $limit_offset = ($opts->{'page'} - 1) * $opts->{'per_page'}; |
|
1530
|
1
|
50
|
|
|
|
20
|
$limit_offset = 0 unless $limit_offset > 0; |
|
1531
|
1
|
|
|
|
|
3
|
my $limit_last = $limit_offset + $opts->{'per_page'} - 1; |
|
1532
|
|
|
|
|
|
|
|
|
1533
|
|
|
|
|
|
|
# Add appropriate limiting clause syntax based on current database server |
|
1534
|
1
|
50
|
|
|
|
5
|
if (exists { map { $_ => '' } qw( mysql pg sqlite ) }->{$driver}) { |
|
|
3
|
0
|
|
|
|
15
|
|
|
1535
|
1
|
|
|
|
|
9
|
$sql .= qq{ limit $opts->{'per_page'} offset $limit_offset }; |
|
1536
|
0
|
|
|
|
|
0
|
} elsif (exists { map { $_ => '' } qw( interbase firebird ) }->{$driver}) { |
|
1537
|
0
|
|
|
|
|
0
|
$sql .= qq{ rows $limit_offset to $limit_last }; |
|
1538
|
|
|
|
|
|
|
} else { |
|
1539
|
|
|
|
|
|
|
# TODO: Possibly use SQL::Abstract::Limit to handle other databases (which all pretty |
|
1540
|
|
|
|
|
|
|
# pretty much support much more complicated ways of achieving the same effect). |
|
1541
|
0
|
|
|
|
|
0
|
die dslog("Automated result set paging is not currently supported for this database server ($driver). Sorry."); |
|
1542
|
|
|
|
|
|
|
} |
|
1543
|
|
|
|
|
|
|
} |
|
1544
|
|
|
|
|
|
|
|
|
1545
|
|
|
|
|
|
|
# prepare & execute the query |
|
1546
|
19
|
|
|
|
|
23
|
my ($dbi_res, $sth); |
|
1547
|
|
|
|
|
|
|
|
|
1548
|
19
|
|
|
|
|
28
|
my $error = 0; |
|
1549
|
|
|
|
|
|
|
|
|
1550
|
19
|
50
|
|
|
|
40
|
if ($opts->{'prepare'}) { |
|
1551
|
0
|
0
|
0
|
|
|
0
|
if (defined $self->{'config'}->{'cache_statements'} && $self->{'config'}->{'cache_statements'} == 1) { |
|
1552
|
|
|
|
|
|
|
# DBI prepare_cached's "if_active" argument (the third one) is passed in as 3 since |
|
1553
|
|
|
|
|
|
|
# that is supposed to be the safest method (if there's a cached version of the same |
|
1554
|
|
|
|
|
|
|
# query, but it's currently active, it's removed from the cache and a new statement |
|
1555
|
|
|
|
|
|
|
# is created -- thus the currently-active handle that was in the cache is not |
|
1556
|
|
|
|
|
|
|
# affected in any way by what may be done with the new handle) |
|
1557
|
0
|
0
|
|
|
|
0
|
unless ($sth = $dbh->prepare_cached($sql, $opts, 3)) { |
|
1558
|
0
|
0
|
|
|
|
0
|
dslog("Error encountered when preparing cached SQL statement: $DBI::errstr") if DEBUG(); |
|
1559
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1560
|
|
|
|
|
|
|
} |
|
1561
|
|
|
|
|
|
|
} else { |
|
1562
|
0
|
0
|
|
|
|
0
|
unless ($sth = $dbh->prepare($sql, $opts)) { |
|
1563
|
0
|
0
|
|
|
|
0
|
dslog("Error encountered when preparing SQL statement: $DBI::errstr") if DEBUG(); |
|
1564
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1565
|
|
|
|
|
|
|
} |
|
1566
|
|
|
|
|
|
|
} |
|
1567
|
|
|
|
|
|
|
|
|
1568
|
0
|
0
|
|
|
|
0
|
if ($error == 0) { |
|
1569
|
0
|
0
|
|
|
|
0
|
unless ($dbi_res = $sth->execute(@binds)) { |
|
1570
|
0
|
0
|
|
|
|
0
|
dslog("Error encountered when executing SQL statement: $DBI::errstr") if DEBUG(); |
|
1571
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1572
|
|
|
|
|
|
|
} |
|
1573
|
|
|
|
|
|
|
} |
|
1574
|
|
|
|
|
|
|
} else { |
|
1575
|
|
|
|
|
|
|
# query will run without prior preparation (this can be desired with some databases on |
|
1576
|
|
|
|
|
|
|
# various queries where to come up with the best (or even a reasonable) plan requires |
|
1577
|
|
|
|
|
|
|
# the database's planner to know the actual values instead of having placeholders)... |
|
1578
|
|
|
|
|
|
|
# if we're in here, it also means that _transform_binds() removed all the placeholders |
|
1579
|
|
|
|
|
|
|
# and put in the actual values, so we don't need to pass @binds into execute() |
|
1580
|
19
|
100
|
66
|
|
|
113
|
if ($st_type ne 'select' && $sql !~ /\s+returning\s+\w+(\s*,\s*\w+)*\s*/ois) { |
|
1581
|
|
|
|
|
|
|
# no intermediary statement handler necessary, since we're apparently issuing |
|
1582
|
|
|
|
|
|
|
# non-SELECT DML that does not end with a RETURNING clause |
|
1583
|
8
|
|
|
|
|
34
|
$sth = { NAME => [], NAME_hash => [] }; # dummy these up so we don't die below |
|
1584
|
|
|
|
|
|
|
|
|
1585
|
8
|
50
|
|
|
|
63
|
unless ($dbi_res = $dbh->do($sql)) { |
|
1586
|
0
|
0
|
|
|
|
0
|
dslog("Error calling DBI do() method on pre-bound, unprepared SQL statement: $DBI::errstr") if DEBUG(); |
|
1587
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1588
|
|
|
|
|
|
|
} |
|
1589
|
|
|
|
|
|
|
} else { |
|
1590
|
11
|
50
|
|
|
|
120
|
if ($sth = $dbh->prepare($sql, $opts)) { |
|
1591
|
11
|
50
|
|
|
|
2304
|
unless ($dbi_res = $sth->execute()) { |
|
1592
|
0
|
0
|
|
|
|
0
|
dslog("Error executing pre-bound SQL statement: $DBI::errstr") if DEBUG(); |
|
1593
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1594
|
|
|
|
|
|
|
} |
|
1595
|
|
|
|
|
|
|
} else { |
|
1596
|
0
|
0
|
|
|
|
0
|
dslog("Error encountered preparing pre-bound SQL statement: $DBI::errstr") if DEBUG(); |
|
1597
|
0
|
|
|
|
|
0
|
$error = 1; |
|
1598
|
|
|
|
|
|
|
} |
|
1599
|
|
|
|
|
|
|
} |
|
1600
|
|
|
|
|
|
|
} |
|
1601
|
|
|
|
|
|
|
|
|
1602
|
19
|
100
|
|
|
|
1483
|
$self->{'st_count'}++ unless $st_type eq 'select'; |
|
1603
|
|
|
|
|
|
|
|
|
1604
|
|
|
|
|
|
|
# Set up skeleton for a DBIx::DataStore::ResultSet object |
|
1605
|
19
|
|
|
|
|
133
|
my $results = DBIx::DataStore::ResultRow->new({},{},[]); |
|
1606
|
19
|
|
|
|
|
42
|
bless($results, 'DBIx::DataStore::ResultSet'); |
|
1607
|
|
|
|
|
|
|
|
|
1608
|
19
|
50
|
|
|
|
46
|
$$results->{'error'} = $DBI::errstr if $error; |
|
1609
|
|
|
|
|
|
|
|
|
1610
|
19
|
|
|
|
|
89
|
$$results->{'_st_type'} = $st_type; |
|
1611
|
19
|
|
|
|
|
31
|
$$results->{'_sql'} = $unpaged_sql; |
|
1612
|
19
|
|
|
|
|
48
|
$$results->{'_binds'} = [ @binds ]; |
|
1613
|
|
|
|
|
|
|
|
|
1614
|
19
|
|
|
|
|
34
|
$$results->{'_rows'} = $dbi_res; |
|
1615
|
19
|
|
|
|
|
32
|
$$results->{'_dbh'} = $dbh; |
|
1616
|
19
|
|
|
|
|
55
|
$$results->{'_sth'} = $sth; |
|
1617
|
|
|
|
|
|
|
|
|
1618
|
19
|
|
|
|
|
61
|
$$results->{'_page_num'} = $opts->{'page'}; |
|
1619
|
19
|
|
|
|
|
34
|
$$results->{'_page_per'} = $opts->{'per_page'}; |
|
1620
|
|
|
|
|
|
|
|
|
1621
|
19
|
|
|
|
|
191
|
$$results->{'impl'}->[DBIx::DataStore::ResultRow::KEYS()] = $sth->{'NAME'}; |
|
1622
|
19
|
|
|
|
|
253
|
$$results->{'impl'}->[DBIx::DataStore::ResultRow::INDEX()] = $sth->{'NAME_hash'}; |
|
1623
|
|
|
|
|
|
|
|
|
1624
|
19
|
|
|
|
|
162
|
return $results; |
|
1625
|
|
|
|
|
|
|
} |
|
1626
|
|
|
|
|
|
|
|
|
1627
|
|
|
|
|
|
|
sub in_transaction { |
|
1628
|
1
|
|
|
1
|
1
|
4
|
my ($self) = @_; |
|
1629
|
|
|
|
|
|
|
|
|
1630
|
1
|
50
|
33
|
|
|
18
|
return 1 if defined $self->{'in_tx'} && $self->{'in_tx'} > 0; |
|
1631
|
0
|
|
|
|
|
0
|
return; |
|
1632
|
|
|
|
|
|
|
} |
|
1633
|
|
|
|
|
|
|
|
|
1634
|
|
|
|
|
|
|
sub last_insert_id { |
|
1635
|
2
|
|
|
2
|
1
|
748
|
my ($self, @args) = @_; |
|
1636
|
|
|
|
|
|
|
|
|
1637
|
2
|
|
|
|
|
5
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1638
|
|
|
|
|
|
|
|
|
1639
|
|
|
|
|
|
|
# fill in pass-through args with dummy values, since MySQL & Informix don't actually |
|
1640
|
|
|
|
|
|
|
# support the full possibilities of this DBI function |
|
1641
|
2
|
50
|
|
|
|
4
|
if (exists { map { $_ => '' } qw( mysql informix ) }->{$driver}) { |
|
|
4
|
|
|
|
|
16
|
|
|
1642
|
0
|
|
|
|
|
0
|
$args[$_] = 'X' for (1..3); |
|
1643
|
|
|
|
|
|
|
} |
|
1644
|
|
|
|
|
|
|
|
|
1645
|
2
|
50
|
|
|
|
17
|
if (my $id = $self->{'handles'}->{'primary'}->last_insert_id(@args)) { |
|
1646
|
2
|
|
|
|
|
13
|
return $id; |
|
1647
|
|
|
|
|
|
|
} else { |
|
1648
|
0
|
|
|
|
|
0
|
die dslog("Error obtaining the Last Insert ID: $DBI::errstr"); |
|
1649
|
|
|
|
|
|
|
} |
|
1650
|
|
|
|
|
|
|
} |
|
1651
|
|
|
|
|
|
|
|
|
1652
|
|
|
|
|
|
|
sub ping { |
|
1653
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1654
|
|
|
|
|
|
|
|
|
1655
|
|
|
|
|
|
|
# Make sure we don't double-ping the primary server if it is both primary & reader |
|
1656
|
0
|
0
|
|
|
|
0
|
my @servers = $self->{'config'}->{'default_reader'} eq 'primary' ? qw( primary ) : qw( primary reader ); |
|
1657
|
|
|
|
|
|
|
|
|
1658
|
0
|
|
|
|
|
0
|
foreach my $server (@servers) { |
|
1659
|
0
|
0
|
|
|
|
0
|
if (!$self->{'handles'}->{$server}->do("select 1")) { |
|
1660
|
0
|
0
|
|
|
|
0
|
dslog("Error pinging $server database server: " . $self->{'handles'}->{$server}->errstr) if DEBUG(); |
|
1661
|
0
|
|
|
|
|
0
|
return; |
|
1662
|
|
|
|
|
|
|
} |
|
1663
|
|
|
|
|
|
|
} |
|
1664
|
|
|
|
|
|
|
|
|
1665
|
0
|
|
|
|
|
0
|
return 1; |
|
1666
|
|
|
|
|
|
|
} |
|
1667
|
|
|
|
|
|
|
|
|
1668
|
|
|
|
|
|
|
sub rollback { |
|
1669
|
2
|
|
|
2
|
1
|
7
|
my ($self, $savepoint) = @_; |
|
1670
|
|
|
|
|
|
|
|
|
1671
|
2
|
50
|
|
|
|
10
|
die dslog("Rollback attempted without any open transactions!") unless $self->{'in_tx'} > 0; |
|
1672
|
|
|
|
|
|
|
|
|
1673
|
|
|
|
|
|
|
# If a savepoint name was passed in, we have to issue the rollback statement ourselves, |
|
1674
|
|
|
|
|
|
|
# since DBI doesn't support that syntax through it's rollback() method and an informally |
|
1675
|
|
|
|
|
|
|
# proposed rollbackto() method on dbi-users hasn't been accepted by the DBI devs yet. |
|
1676
|
2
|
100
|
|
|
|
8
|
if (defined $savepoint) { |
|
1677
|
1
|
|
|
|
|
6
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1678
|
1
|
|
|
|
|
4
|
my ($sql); |
|
1679
|
|
|
|
|
|
|
|
|
1680
|
1
|
50
|
|
|
|
9
|
if ($driver eq 'sybase') { |
|
|
|
50
|
|
|
|
|
|
|
1681
|
0
|
|
|
|
|
0
|
die dslog("Savepoints are not supported by Sybase!"); |
|
1682
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1683
|
0
|
0
|
|
|
|
0
|
if (!$self->{'handles'}->{'primary'}->pg_rollback_to($savepoint)) { |
|
1684
|
0
|
|
|
|
|
0
|
die dslog("Error rolling back to savepoint '$savepoint':", $self->{'handles'}->{'primary'}->errstr); |
|
1685
|
|
|
|
|
|
|
} |
|
1686
|
|
|
|
|
|
|
} else { |
|
1687
|
|
|
|
|
|
|
$savepoint = $self->{'handles'}->{'primary'}->quote($savepoint) |
|
1688
|
1
|
|
50
|
|
|
10
|
|| die dslog("Error encountered when safe-quoting savepoint name:", $self->{'handles'}->{'primary'}->errstr); |
|
1689
|
|
|
|
|
|
|
|
|
1690
|
1
|
|
|
|
|
22
|
$sql = qq{ rollback to savepoint $savepoint }; |
|
1691
|
|
|
|
|
|
|
|
|
1692
|
1
|
50
|
|
|
|
11
|
if (!$self->{'handles'}->{'primary'}->do($sql)) { |
|
1693
|
0
|
|
|
|
|
0
|
die dslog("Error rolling back to savepoint '$savepoint':", $self->{'handles'}->{'primary'}->errstr); |
|
1694
|
|
|
|
|
|
|
} |
|
1695
|
|
|
|
|
|
|
} |
|
1696
|
|
|
|
|
|
|
|
|
1697
|
|
|
|
|
|
|
# Note that we do not decrement the transaction level counter, since we rolled |
|
1698
|
|
|
|
|
|
|
# back *within* a transaction, we didn't rollback the transaction itself. |
|
1699
|
|
|
|
|
|
|
} else { |
|
1700
|
1
|
50
|
|
|
|
30
|
$self->{'handles'}->{'primary'}->rollback |
|
1701
|
|
|
|
|
|
|
|| die dslog("Error encountered during attempt to roll back transaction: $DBI::errstr"); |
|
1702
|
|
|
|
|
|
|
|
|
1703
|
1
|
|
|
|
|
3
|
$self->{'in_tx'}--; |
|
1704
|
1
|
|
|
|
|
2
|
$self->{'st_count'} = 0; |
|
1705
|
|
|
|
|
|
|
} |
|
1706
|
|
|
|
|
|
|
|
|
1707
|
|
|
|
|
|
|
# if AutoCommit is turned off on the primary DB, then the closing of a transaction |
|
1708
|
|
|
|
|
|
|
# (either through a rollback or commit) automatically begins a new transaction, in |
|
1709
|
|
|
|
|
|
|
# which case we need to re-increment the in_tx count |
|
1710
|
2
|
50
|
|
|
|
67
|
if ($self->{'autocommit'} == 0) { |
|
1711
|
0
|
|
|
|
|
0
|
$self->{'in_tx'}++; |
|
1712
|
|
|
|
|
|
|
} |
|
1713
|
|
|
|
|
|
|
|
|
1714
|
|
|
|
|
|
|
# reset schema search path if AutoCommit is turned off (since the setting of the |
|
1715
|
|
|
|
|
|
|
# search path on connect would have occurred inside a transaction -- how annoying) |
|
1716
|
2
|
50
|
33
|
|
|
10
|
if ($self->{'autocommit'} == 0 && defined $self->{'config'}->{'primary'}->{'schemas'}) { |
|
1717
|
|
|
|
|
|
|
_set_schema_searchpath($self->{'handles'}->{'primary'}, $self->{'config'}->{'primary'}->{'driver'}, |
|
1718
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'primary'}->{'schemas'}); |
|
1719
|
|
|
|
|
|
|
} |
|
1720
|
|
|
|
|
|
|
|
|
1721
|
2
|
|
|
|
|
11
|
return 1; |
|
1722
|
|
|
|
|
|
|
} |
|
1723
|
|
|
|
|
|
|
|
|
1724
|
|
|
|
|
|
|
sub savepoint { |
|
1725
|
1
|
|
|
1
|
1
|
2
|
my ($self, $savepoint) = @_; |
|
1726
|
|
|
|
|
|
|
|
|
1727
|
1
|
50
|
|
|
|
5
|
die dslog("Cannot create a savepoint outside of a transaction context!") unless $self->{'in_tx'} > 0; |
|
1728
|
1
|
50
|
33
|
|
|
45
|
die dslog("No savepoint name was provided!") unless defined $savepoint && $savepoint =~ /\w+/o; |
|
1729
|
|
|
|
|
|
|
|
|
1730
|
1
|
|
|
|
|
6
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1731
|
|
|
|
|
|
|
|
|
1732
|
|
|
|
|
|
|
# DBI does not provide savepoint related methods (yet, at least -- there's been discussion |
|
1733
|
|
|
|
|
|
|
# on the mailing lists about whether or not it should, and if so what they should be), so |
|
1734
|
|
|
|
|
|
|
# we need to just build the statement ourself and issue it. |
|
1735
|
1
|
|
|
|
|
3
|
my ($sql); |
|
1736
|
|
|
|
|
|
|
|
|
1737
|
1
|
50
|
|
|
|
33
|
if ($driver eq 'sybase') { |
|
|
|
50
|
|
|
|
|
|
|
1738
|
0
|
|
|
|
|
0
|
die dslog("Sybase does not support transaction savepoints!"); |
|
1739
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1740
|
0
|
0
|
|
|
|
0
|
if (!$self->{'handles'}->{'primary'}->pg_savepoint($savepoint)) { |
|
1741
|
0
|
|
|
|
|
0
|
die dslog("Error creating transaction savepoint '$savepoint': " . $self->{'handles'}->{'primary'}->errstr); |
|
1742
|
|
|
|
|
|
|
} |
|
1743
|
|
|
|
|
|
|
} else { |
|
1744
|
|
|
|
|
|
|
$savepoint = $self->{'handles'}->{'primary'}->quote($savepoint) |
|
1745
|
1
|
|
50
|
|
|
11
|
|| die dslog("Error encountered when safe-quoting savepoint name: " . $self->{'handles'}->{'primary'}->errstr); |
|
1746
|
|
|
|
|
|
|
|
|
1747
|
1
|
|
|
|
|
23
|
$sql = qq{ savepoint $savepoint }; |
|
1748
|
|
|
|
|
|
|
|
|
1749
|
1
|
50
|
|
|
|
9
|
if (!$self->{'handles'}->{'primary'}->do($sql)) { |
|
1750
|
0
|
|
|
|
|
0
|
die dslog("Error creating transaction savepoint '$savepoint': " . $self->{'handles'}->{'primary'}->errstr); |
|
1751
|
|
|
|
|
|
|
} |
|
1752
|
|
|
|
|
|
|
} |
|
1753
|
|
|
|
|
|
|
|
|
1754
|
1
|
|
|
|
|
37
|
return 1; |
|
1755
|
|
|
|
|
|
|
} |
|
1756
|
|
|
|
|
|
|
|
|
1757
|
|
|
|
|
|
|
sub schemas { |
|
1758
|
0
|
|
|
0
|
1
|
0
|
my ($self, $schemas) = @_; |
|
1759
|
|
|
|
|
|
|
|
|
1760
|
0
|
0
|
0
|
|
|
0
|
if (defined $schemas && ref($schemas) eq 'ARRAY') { |
|
1761
|
0
|
0
|
|
|
|
0
|
dslog(q{Got request to change schemas on existing connection.}) if DEBUG() >= 2; |
|
1762
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'primary'}->{'schemas'} = [@{$schemas}]; |
|
|
0
|
|
|
|
|
0
|
|
|
1763
|
|
|
|
|
|
|
$self->{'config'}->{'readers'}->{$self->{'reader'}}->{'schemas'} = $self->{'config'}->{'primary'}->{'schemas'} |
|
1764
|
0
|
0
|
|
|
|
0
|
if $self->{'reader'} ne 'primary'; |
|
1765
|
|
|
|
|
|
|
_set_schema_searchpath( |
|
1766
|
|
|
|
|
|
|
$self->{'handles'}->{'primary'}, |
|
1767
|
0
|
|
|
|
|
0
|
$self->{'config'}->{'primary'}->{'driver'}, |
|
1768
|
|
|
|
|
|
|
$schemas |
|
1769
|
|
|
|
|
|
|
); |
|
1770
|
|
|
|
|
|
|
_set_schema_searchpath( |
|
1771
|
|
|
|
|
|
|
$self->{'handles'}->{'reader'}, |
|
1772
|
|
|
|
|
|
|
$self->{'config'}->{'readers'}->{$self->{'reader'}}->{'driver'}, |
|
1773
|
|
|
|
|
|
|
$schemas |
|
1774
|
0
|
0
|
|
|
|
0
|
) if $self->{'reader'} ne 'primary'; |
|
1775
|
|
|
|
|
|
|
} else { |
|
1776
|
0
|
0
|
|
|
|
0
|
dslog(q{Current schema search path requested.}) if DEBUG() >= 4; |
|
1777
|
0
|
|
|
|
|
0
|
return @{$self->{'config'}->{'primary'}->{'schemas'}} |
|
1778
|
|
|
|
|
|
|
if defined $self->{'config'}->{'primary'}->{'schemas'} |
|
1779
|
|
|
|
|
|
|
&& ref($self->{'config'}->{'primary'}->{'schemas'}) eq 'ARRAY' |
|
1780
|
0
|
0
|
0
|
|
|
0
|
&& scalar(@{$self->{'config'}->{'primary'}->{'schemas'}}) > 0; |
|
|
0
|
|
0
|
|
|
0
|
|
|
1781
|
|
|
|
|
|
|
} |
|
1782
|
|
|
|
|
|
|
|
|
1783
|
0
|
|
|
|
|
0
|
return; |
|
1784
|
|
|
|
|
|
|
} |
|
1785
|
|
|
|
|
|
|
|
|
1786
|
|
|
|
|
|
|
sub servers { |
|
1787
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1788
|
|
|
|
|
|
|
|
|
1789
|
0
|
|
|
|
|
0
|
my @servers = (); |
|
1790
|
|
|
|
|
|
|
|
|
1791
|
0
|
|
|
|
|
0
|
my %config = %{$self->{'config'}->{'primary'}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1792
|
0
|
|
|
|
|
0
|
$config{'name'} = 'primary'; |
|
1793
|
0
|
0
|
|
|
|
0
|
delete $config{'password'} if defined $config{'password'}; |
|
1794
|
|
|
|
|
|
|
|
|
1795
|
0
|
|
|
|
|
0
|
push(@servers, { %config }); |
|
1796
|
|
|
|
|
|
|
|
|
1797
|
0
|
|
|
|
|
0
|
foreach my $reader (sort keys %{$self->{'config'}->{'readers'}}) { |
|
|
0
|
|
|
|
|
0
|
|
|
1798
|
0
|
|
|
|
|
0
|
%config = %{$self->{'config'}->{'readers'}->{$reader}}; |
|
|
0
|
|
|
|
|
0
|
|
|
1799
|
0
|
0
|
|
|
|
0
|
delete $config{'password'} if defined $config{'password'}; |
|
1800
|
0
|
|
|
|
|
0
|
$config{'name'} = $reader; |
|
1801
|
0
|
|
|
|
|
0
|
push(@servers, { %config }); |
|
1802
|
|
|
|
|
|
|
} |
|
1803
|
|
|
|
|
|
|
|
|
1804
|
0
|
|
|
|
|
0
|
return @servers; |
|
1805
|
|
|
|
|
|
|
} |
|
1806
|
|
|
|
|
|
|
|
|
1807
|
|
|
|
|
|
|
sub tables { |
|
1808
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1809
|
|
|
|
|
|
|
|
|
1810
|
0
|
|
|
|
|
0
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1811
|
0
|
|
|
|
|
0
|
my $schema = $self->{'handles'}->{'primary'}->quote($self->{'config'}->{'primary'}->{'db'}); |
|
1812
|
|
|
|
|
|
|
|
|
1813
|
0
|
|
|
|
|
0
|
my ($sql); |
|
1814
|
|
|
|
|
|
|
|
|
1815
|
0
|
0
|
|
|
|
0
|
if ($driver eq 'mysql') { |
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
1816
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1817
|
|
|
|
|
|
|
select table_name |
|
1818
|
|
|
|
|
|
|
from information_schema.tables |
|
1819
|
|
|
|
|
|
|
where table_schema in ($schema) |
|
1820
|
|
|
|
|
|
|
order by table_name asc |
|
1821
|
|
|
|
|
|
|
}; |
|
1822
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1823
|
|
|
|
|
|
|
# make sure we only list the relations visible in the current search_path |
|
1824
|
0
|
0
|
0
|
|
|
0
|
if (defined $self->{'config'}->{'primary'}->{'schemas'} |
|
1825
|
|
|
|
|
|
|
&& ref($self->{'config'}->{'primary'}->{'schemas'}) eq 'ARRAY') { |
|
1826
|
0
|
|
|
|
|
0
|
$schema = join(',', @{$self->{'config'}->{'primary'}->{'schemas'}}); |
|
|
0
|
|
|
|
|
0
|
|
|
1827
|
|
|
|
|
|
|
} else { |
|
1828
|
0
|
|
|
|
|
0
|
$schema = q{'public'}; |
|
1829
|
|
|
|
|
|
|
} |
|
1830
|
|
|
|
|
|
|
|
|
1831
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1832
|
|
|
|
|
|
|
select c.relname |
|
1833
|
|
|
|
|
|
|
from pg_catalog.pg_class c |
|
1834
|
|
|
|
|
|
|
join pg_catalog.pg_roles r on (r.oid = c.relowner) |
|
1835
|
|
|
|
|
|
|
left join pg_catalog.pg_namespace n on (n.oid = c.relnamespace) |
|
1836
|
|
|
|
|
|
|
where c.relkind in ('r','v') |
|
1837
|
|
|
|
|
|
|
and n.nspname in ($schema) |
|
1838
|
|
|
|
|
|
|
and pg_catalog.pg_table_is_visible(c.oid) |
|
1839
|
|
|
|
|
|
|
order by relname asc |
|
1840
|
|
|
|
|
|
|
}; |
|
1841
|
|
|
|
|
|
|
} elsif ($driver eq 'oracle') { |
|
1842
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1843
|
|
|
|
|
|
|
select object_name |
|
1844
|
|
|
|
|
|
|
from user_objects |
|
1845
|
|
|
|
|
|
|
where object_type in ('TABLE','VIEW') |
|
1846
|
|
|
|
|
|
|
order by object_name asc |
|
1847
|
|
|
|
|
|
|
}; |
|
1848
|
|
|
|
|
|
|
} elsif ($driver eq 'db2') { |
|
1849
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1850
|
|
|
|
|
|
|
select tabname |
|
1851
|
|
|
|
|
|
|
from syscat.tables |
|
1852
|
|
|
|
|
|
|
where tabschema not like 'SYS%' and type in ('T','V') |
|
1853
|
|
|
|
|
|
|
order by tabname asc |
|
1854
|
|
|
|
|
|
|
}; |
|
1855
|
|
|
|
|
|
|
} else { |
|
1856
|
0
|
|
|
|
|
0
|
die dslog("This method is not yet implemented for your database server ($driver)."); |
|
1857
|
|
|
|
|
|
|
} |
|
1858
|
|
|
|
|
|
|
|
|
1859
|
0
|
|
|
|
|
0
|
my $res = $self->do($sql); |
|
1860
|
|
|
|
|
|
|
|
|
1861
|
0
|
0
|
|
|
|
0
|
if ($res) { |
|
1862
|
0
|
|
|
|
|
0
|
my @tables = (); |
|
1863
|
|
|
|
|
|
|
|
|
1864
|
0
|
|
|
|
|
0
|
while ($res->next) { |
|
1865
|
0
|
|
|
|
|
0
|
push(@tables, $res->[0]); |
|
1866
|
|
|
|
|
|
|
} |
|
1867
|
|
|
|
|
|
|
|
|
1868
|
0
|
|
|
|
|
0
|
return @tables; |
|
1869
|
|
|
|
|
|
|
} else { |
|
1870
|
0
|
|
|
|
|
0
|
die dslog("Error encountered when retrieving list of tables: $DBI::errstr"); |
|
1871
|
|
|
|
|
|
|
} |
|
1872
|
|
|
|
|
|
|
} |
|
1873
|
|
|
|
|
|
|
|
|
1874
|
|
|
|
|
|
|
sub views { |
|
1875
|
0
|
|
|
0
|
1
|
0
|
my ($self) = @_; |
|
1876
|
|
|
|
|
|
|
|
|
1877
|
0
|
|
|
|
|
0
|
my $driver = lc($self->{'config'}->{'primary'}->{'driver'}); |
|
1878
|
0
|
|
|
|
|
0
|
my $schema = $self->{'handles'}->{'primary'}->quote($self->{'config'}->{'primary'}->{'db'}); |
|
1879
|
|
|
|
|
|
|
|
|
1880
|
0
|
|
|
|
|
0
|
my ($sql); |
|
1881
|
|
|
|
|
|
|
|
|
1882
|
0
|
0
|
|
|
|
0
|
if ($driver eq 'mysql') { |
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
1883
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1884
|
|
|
|
|
|
|
select table_name |
|
1885
|
|
|
|
|
|
|
from information_schema.tables |
|
1886
|
|
|
|
|
|
|
where table_schema in ($schema) |
|
1887
|
|
|
|
|
|
|
and table_type = 'VIEW' |
|
1888
|
|
|
|
|
|
|
order by table_name asc |
|
1889
|
|
|
|
|
|
|
}; |
|
1890
|
|
|
|
|
|
|
} elsif ($driver eq 'pg') { |
|
1891
|
|
|
|
|
|
|
# make sure we only list the relations visible in the current search_path |
|
1892
|
0
|
0
|
0
|
|
|
0
|
if (defined $self->{'config'}->{'primary'}->{'schemas'} |
|
1893
|
|
|
|
|
|
|
&& ref($self->{'config'}->{'primary'}->{'schemas'}) eq 'ARRAY') { |
|
1894
|
0
|
|
|
|
|
0
|
$schema = join(',', @{$self->{'config'}->{'primary'}->{'schemas'}}); |
|
|
0
|
|
|
|
|
0
|
|
|
1895
|
|
|
|
|
|
|
} else { |
|
1896
|
0
|
|
|
|
|
0
|
$schema = q{'public'}; |
|
1897
|
|
|
|
|
|
|
} |
|
1898
|
|
|
|
|
|
|
|
|
1899
|
0
|
|
|
|
|
0
|
$sql = qq{ |
|
1900
|
|
|
|
|
|
|
select c.relname |
|
1901
|
|
|
|
|
|
|
from pg_catalog.pg_class c |
|
1902
|
|
|
|
|
|
|
join pg_catalog.pg_roles r on (r.oid = c.relowner) |
|
1903
|
|
|
|
|
|
|
left join pg_catalog.pg_namespace n on (n.oid = c.relnamespace) |
|
1904
|
|
|
|
|
|
|
where c.relkind in ('v') |
|
1905
|
|
|
|
|
|
|
and n.nspname in ($schema) |
|
1906
|
|
|
|
|
|
|
and pg_catalog.pg_table_is_visible(c.oid) |
|
1907
|
|
|
|
|
|
|
order by relname asc |
|
1908
|
|
|
|
|
|
|
}; |
|
1909
|
|
|
|
|
|
|
} elsif ($driver eq 'oracle') { |
|
1910
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1911
|
|
|
|
|
|
|
select object_name |
|
1912
|
|
|
|
|
|
|
from user_objects |
|
1913
|
|
|
|
|
|
|
where object_type = 'VIEW' |
|
1914
|
|
|
|
|
|
|
order by object_name asc |
|
1915
|
|
|
|
|
|
|
}; |
|
1916
|
|
|
|
|
|
|
} elsif ($driver eq 'db2') { |
|
1917
|
0
|
|
|
|
|
0
|
$sql = q{ |
|
1918
|
|
|
|
|
|
|
select tabname |
|
1919
|
|
|
|
|
|
|
from syscat.tables |
|
1920
|
|
|
|
|
|
|
where tabschema not like 'SYS%' and type in ('V') |
|
1921
|
|
|
|
|
|
|
order by tabname asc |
|
1922
|
|
|
|
|
|
|
}; |
|
1923
|
|
|
|
|
|
|
} else { |
|
1924
|
0
|
|
|
|
|
0
|
die dslog("This method is not yet implemented for your database server ($driver)."); |
|
1925
|
|
|
|
|
|
|
} |
|
1926
|
|
|
|
|
|
|
|
|
1927
|
0
|
|
|
|
|
0
|
my $res = $self->do($sql); |
|
1928
|
|
|
|
|
|
|
|
|
1929
|
0
|
0
|
|
|
|
0
|
if ($res) { |
|
1930
|
0
|
|
|
|
|
0
|
my @views = (); |
|
1931
|
|
|
|
|
|
|
|
|
1932
|
0
|
|
|
|
|
0
|
while ($res->next) { |
|
1933
|
0
|
|
|
|
|
0
|
push(@views, $res->[0]); |
|
1934
|
|
|
|
|
|
|
} |
|
1935
|
|
|
|
|
|
|
|
|
1936
|
0
|
|
|
|
|
0
|
return @views; |
|
1937
|
|
|
|
|
|
|
} else { |
|
1938
|
0
|
|
|
|
|
0
|
die dslog("Error encountered when retrieving list of tables: $DBI::errstr"); |
|
1939
|
|
|
|
|
|
|
} |
|
1940
|
|
|
|
|
|
|
} |
|
1941
|
|
|
|
|
|
|
|
|
1942
|
|
|
|
|
|
|
####################################################################### |
|
1943
|
|
|
|
|
|
|
# Internal/Private Subroutines |
|
1944
|
|
|
|
|
|
|
|
|
1945
|
|
|
|
|
|
|
sub DESTROY { |
|
1946
|
1
|
|
|
1
|
|
679
|
my ($self) = shift; |
|
1947
|
|
|
|
|
|
|
|
|
1948
|
|
|
|
|
|
|
# If primary handle is in a transaction, cluck out a warning and issue a rollback |
|
1949
|
|
|
|
|
|
|
# (Note that the while{} is used to support nested-transactions, assuming the |
|
1950
|
|
|
|
|
|
|
# underlying DB supports them -- whether nested transactions are supported is |
|
1951
|
|
|
|
|
|
|
# actually checked elsewhere, so if it isn't this while loop will only ever |
|
1952
|
|
|
|
|
|
|
# have a single iteration). |
|
1953
|
1
|
|
|
|
|
260
|
while ($self->{'in_tx'} > 0) { |
|
1954
|
|
|
|
|
|
|
# Don't issue the warning when AutoCommit is turned off, we're in an implicitly created |
|
1955
|
|
|
|
|
|
|
# transaction and no non-select statements have been issued in the current transaction |
|
1956
|
0
|
0
|
0
|
|
|
0
|
unless ($self->{'st_count'} == 0 && $self->{'in_tx'} == 1 && $self->{'autocommit'} == 0) { |
|
|
|
|
0
|
|
|
|
|
|
1957
|
0
|
0
|
|
|
|
0
|
dslog("Database connection killed during a transaction!") if DEBUG(); |
|
1958
|
|
|
|
|
|
|
} |
|
1959
|
0
|
0
|
|
|
|
0
|
$self->{'handles'}->{'primary'}->rollback |
|
1960
|
|
|
|
|
|
|
|| dslog("Attempted to rollback unclosed transaction but failed: $DBI::errstr"); |
|
1961
|
0
|
|
|
|
|
0
|
$self->{'in_tx'}--; |
|
1962
|
|
|
|
|
|
|
} |
|
1963
|
|
|
|
|
|
|
} |
|
1964
|
|
|
|
|
|
|
|
|
1965
|
|
|
|
|
|
|
sub _db_connect { |
|
1966
|
1
|
|
|
1
|
|
3
|
my %args = ( |
|
1967
|
|
|
|
|
|
|
cache => 0, |
|
1968
|
|
|
|
|
|
|
dbd_opts => {}, |
|
1969
|
|
|
|
|
|
|
@_, |
|
1970
|
|
|
|
|
|
|
); |
|
1971
|
|
|
|
|
|
|
|
|
1972
|
1
|
|
|
|
|
1
|
my ($dsn); |
|
1973
|
|
|
|
|
|
|
|
|
1974
|
|
|
|
|
|
|
# if a custom DSN was present in the configuration, then just use it |
|
1975
|
1
|
50
|
33
|
|
|
6
|
if (defined $args{'dsn'} && length($args{'dsn'}) > 0) { |
|
1976
|
|
|
|
|
|
|
# Required DBI connection arguments when manual DSN specified |
|
1977
|
1
|
|
|
|
|
3
|
foreach (qw( driver )) { |
|
1978
|
1
|
50
|
|
|
|
3
|
if (!exists $args{$_}) { |
|
1979
|
0
|
0
|
|
|
|
0
|
dslog("DBI connection attempted without providing '$_' argument!") if DEBUG() >= 3; |
|
1980
|
0
|
|
|
|
|
0
|
return; |
|
1981
|
|
|
|
|
|
|
} |
|
1982
|
|
|
|
|
|
|
} |
|
1983
|
|
|
|
|
|
|
|
|
1984
|
1
|
|
|
|
|
2
|
$dsn = $args{'dsn'}; |
|
1985
|
|
|
|
|
|
|
# otherwise, build the DSN ourselves |
|
1986
|
|
|
|
|
|
|
} else { |
|
1987
|
|
|
|
|
|
|
# Required DBI connection arguments |
|
1988
|
0
|
|
|
|
|
0
|
foreach (qw( driver database host )) { |
|
1989
|
0
|
0
|
|
|
|
0
|
if (!exists $args{$_}) { |
|
1990
|
0
|
0
|
|
|
|
0
|
dslog("DBI connection attempted without providing '$_' argument!") if DEBUG() >= 3; |
|
1991
|
0
|
|
|
|
|
0
|
return; |
|
1992
|
|
|
|
|
|
|
} |
|
1993
|
|
|
|
|
|
|
} |
|
1994
|
|
|
|
|
|
|
|
|
1995
|
|
|
|
|
|
|
# Technically optional arguments that are almost always actually required for a good connection |
|
1996
|
0
|
|
|
|
|
0
|
foreach (qw( user password )) { |
|
1997
|
0
|
0
|
|
|
|
0
|
if (!exists $args{$_}) { |
|
1998
|
0
|
0
|
|
|
|
0
|
dslog("DBI connection arguments do not contain '$_' argument. We'll try connecting anyway.") if DEBUG() >= 3; |
|
1999
|
0
|
|
|
|
|
0
|
$args{$_} = ''; |
|
2000
|
|
|
|
|
|
|
} |
|
2001
|
|
|
|
|
|
|
} |
|
2002
|
|
|
|
|
|
|
|
|
2003
|
0
|
|
|
|
|
0
|
$dsn = qq|dbi:$args{'driver'}:database=$args{'database'};host=$args{'host'}|; |
|
2004
|
0
|
0
|
0
|
|
|
0
|
$dsn .= qq|;port=$args{'port'}| if defined $args{'port'} && $args{'port'} =~ /^\d+$/; |
|
2005
|
|
|
|
|
|
|
} |
|
2006
|
|
|
|
|
|
|
|
|
2007
|
1
|
50
|
|
|
|
25
|
dslog(q{Connecting with DSN}, $dsn) if DEBUG(); |
|
2008
|
|
|
|
|
|
|
|
|
2009
|
1
|
|
|
|
|
2
|
my ($dbh); |
|
2010
|
|
|
|
|
|
|
|
|
2011
|
|
|
|
|
|
|
# if DBI connection caching is desired, use connect_cached() method instead |
|
2012
|
|
|
|
|
|
|
# also, issue immediate rollback after connecting, just in case we've been |
|
2013
|
|
|
|
|
|
|
# returned a stale cached connection that had never closed its transaction |
|
2014
|
1
|
50
|
33
|
|
|
15
|
if ($args{'cache'} && ($dbh = DBI->connect_cached($dsn, $args{'user'}, $args{'password'}, $args{'dbd_opts'}))) { |
|
|
|
50
|
33
|
|
|
|
|
|
2015
|
0
|
0
|
|
|
|
0
|
$dbh->rollback if $dbh->ping >= 3; |
|
2016
|
0
|
0
|
|
|
|
0
|
dslog(q{Returning DB connection from DBI's connect_cached.}) if DEBUG() >= 3; |
|
2017
|
0
|
|
|
|
|
0
|
return $dbh; |
|
2018
|
|
|
|
|
|
|
} elsif (!$args{'cache'} && ($dbh = DBI->connect($dsn, $args{'user'}, $args{'password'}, $args{'dbd_opts'}))) { |
|
2019
|
1
|
50
|
|
|
|
1277
|
$dbh->rollback if $dbh->ping >= 3; |
|
2020
|
1
|
50
|
|
|
|
60
|
dslog(q{Returning DB connection from DBI's connect.}) if DEBUG() >= 3; |
|
2021
|
1
|
|
|
|
|
6
|
return $dbh; |
|
2022
|
|
|
|
|
|
|
} else { |
|
2023
|
0
|
0
|
|
|
|
0
|
dslog("DBI connection attempt failed: $DBI::errstr") if DEBUG(); |
|
2024
|
0
|
|
|
|
|
0
|
return; |
|
2025
|
|
|
|
|
|
|
} |
|
2026
|
|
|
|
|
|
|
|
|
2027
|
0
|
|
|
|
|
0
|
return; |
|
2028
|
|
|
|
|
|
|
} |
|
2029
|
|
|
|
|
|
|
|
|
2030
|
|
|
|
|
|
|
sub _set_schema_searchpath { |
|
2031
|
1
|
|
|
1
|
|
3
|
my ($dbh, $driver, $schemas) = @_; |
|
2032
|
|
|
|
|
|
|
|
|
2033
|
1
|
50
|
|
|
|
26
|
dslog(q{Search path setter entered.}) if DEBUG() >= 4; |
|
2034
|
|
|
|
|
|
|
|
|
2035
|
1
|
50
|
|
|
|
3
|
if (!defined $driver) { |
|
2036
|
0
|
0
|
|
|
|
0
|
dslog("No driver name supplied during search path configuration") if DEBUG(); |
|
2037
|
0
|
|
|
|
|
0
|
return; |
|
2038
|
|
|
|
|
|
|
} |
|
2039
|
|
|
|
|
|
|
|
|
2040
|
|
|
|
|
|
|
# schema search path support is only available for PostgreSQL for now |
|
2041
|
1
|
50
|
|
|
|
6
|
return $dbh unless lc($driver) eq 'pg'; |
|
2042
|
|
|
|
|
|
|
|
|
2043
|
0
|
0
|
0
|
|
|
0
|
if (!defined $schemas || ref($schemas) ne 'ARRAY' || scalar(@{$schemas}) < 1) { |
|
|
0
|
|
0
|
|
|
0
|
|
|
2044
|
0
|
0
|
|
|
|
0
|
dslog("No schema names provided for inclusion in search path") if DEBUG(); |
|
2045
|
0
|
|
|
|
|
0
|
return; |
|
2046
|
|
|
|
|
|
|
} |
|
2047
|
|
|
|
|
|
|
|
|
2048
|
0
|
0
|
|
|
|
0
|
dslog(q{Changing connection's schema search path to}, join(', ', @{$schemas})) if DEBUG() >= 2; |
|
|
0
|
|
|
|
|
0
|
|
|
2049
|
|
|
|
|
|
|
|
|
2050
|
|
|
|
|
|
|
# quote the schema names for paranoia |
|
2051
|
0
|
|
|
|
|
0
|
my @s; |
|
2052
|
0
|
|
|
|
|
0
|
push(@s, $dbh->quote($_)) for @{$schemas}; |
|
|
0
|
|
|
|
|
0
|
|
|
2053
|
|
|
|
|
|
|
|
|
2054
|
0
|
|
|
|
|
0
|
my ($sql); |
|
2055
|
|
|
|
|
|
|
|
|
2056
|
0
|
0
|
|
|
|
0
|
if (lc($driver) eq 'pg') { |
|
2057
|
0
|
|
|
|
|
0
|
$sql = 'set search_path to ' . join(', ', @s); |
|
2058
|
|
|
|
|
|
|
} |
|
2059
|
|
|
|
|
|
|
|
|
2060
|
0
|
0
|
|
|
|
0
|
if (length($sql) > 0) { |
|
2061
|
0
|
0
|
|
|
|
0
|
if ($dbh->do($sql)) { |
|
2062
|
0
|
|
|
|
|
0
|
return $dbh; |
|
2063
|
|
|
|
|
|
|
} else { |
|
2064
|
0
|
|
|
|
|
0
|
dslog(q{Error occurred when setting schema search path:}, $dbh->errstr); |
|
2065
|
0
|
|
|
|
|
0
|
return; |
|
2066
|
|
|
|
|
|
|
} |
|
2067
|
|
|
|
|
|
|
} else { |
|
2068
|
0
|
0
|
|
|
|
0
|
dslog(q{No SQL to issue for setting schemas.}) if DEBUG() >= 2; |
|
2069
|
0
|
|
|
|
|
0
|
return $dbh; |
|
2070
|
|
|
|
|
|
|
} |
|
2071
|
|
|
|
|
|
|
|
|
2072
|
0
|
|
|
|
|
0
|
return; |
|
2073
|
|
|
|
|
|
|
} |
|
2074
|
|
|
|
|
|
|
|
|
2075
|
|
|
|
|
|
|
sub _transform_bindings { |
|
2076
|
19
|
|
|
19
|
|
38
|
my ($sql, @binds) = @_; |
|
2077
|
|
|
|
|
|
|
|
|
2078
|
|
|
|
|
|
|
# certain SQL statement types allow different styles of binding (i.e. hashrefs for insert/update |
|
2079
|
|
|
|
|
|
|
# but not select, delete, create, etc.) |
|
2080
|
19
|
|
|
|
|
156
|
$sql =~ s/(^\s+|\s+$)//os; |
|
2081
|
19
|
|
|
|
|
103
|
my $st_type = lc( ($sql =~ /^(\w+)\s+/os)[0] ); |
|
2082
|
19
|
50
|
|
|
|
58
|
$st_type = 'select' if $st_type eq 'with'; # ugh (stupid workaround for legacy DataStore - rewrite is/will be much smarter about this and not just take random stabs in the dark) |
|
2083
|
|
|
|
|
|
|
|
|
2084
|
|
|
|
|
|
|
# if no bound variables were passed in, we can save a few cycles by returning right here |
|
2085
|
19
|
100
|
66
|
|
|
109
|
return ($st_type, $sql) if !@binds || scalar(@binds) < 1; |
|
2086
|
|
|
|
|
|
|
|
|
2087
|
6
|
|
|
|
|
8
|
my @final_binds = (); |
|
2088
|
|
|
|
|
|
|
|
|
2089
|
|
|
|
|
|
|
# verify that binds passed in are appropriate for the type of statement being used |
|
2090
|
6
|
50
|
66
|
|
|
120
|
if ($st_type eq 'update' && $sql =~ /\s+set\s+$HASH_PH/ois && (scalar(@binds) < 1 || ref($binds[0]) ne 'HASH')) { |
|
|
|
50
|
33
|
|
|
|
|
|
|
|
100
|
66
|
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
2091
|
0
|
|
|
|
|
0
|
die dslog("First bind on UPDATE statements must be a hash reference when not using an explicit SET clause!"); |
|
2092
|
|
|
|
|
|
|
} elsif ($st_type eq 'insert' && $sql =~ /^\s*insert\s+into\s+\S+\s+(values\s+)?$HASH_PH/ois |
|
2093
|
|
|
|
|
|
|
&& (scalar(@binds) < 1 || !(ref($binds[0]) eq 'ARRAY' || ref($binds[0]) eq 'HASH'))) { |
|
2094
|
0
|
|
|
|
|
0
|
die dslog("First bind on INSERT must be hash reference (or array reference of hash references) when using " |
|
2095
|
|
|
|
|
|
|
. "a hash placeholder in the columns-values clause!"); |
|
2096
|
|
|
|
|
|
|
} elsif ($st_type eq 'select' && scalar(@binds) > 0) { |
|
2097
|
3
|
|
|
|
|
6
|
foreach (@binds) { |
|
2098
|
3
|
50
|
|
|
|
10
|
if (ref($_) eq 'HASH') { |
|
2099
|
0
|
|
|
|
|
0
|
die dslog("Hash reference binds not permitted for SELECT statements!"); |
|
2100
|
|
|
|
|
|
|
} |
|
2101
|
|
|
|
|
|
|
} |
|
2102
|
|
|
|
|
|
|
} |
|
2103
|
|
|
|
|
|
|
|
|
2104
|
|
|
|
|
|
|
# for update statements, rework the first placeholder into a "set key = ?, ..." form and take |
|
2105
|
|
|
|
|
|
|
# it off the list of binds. this is only done if a hashref placeholder was used, though |
|
2106
|
6
|
100
|
66
|
|
|
19
|
if ($st_type eq 'update' && $sql =~ /$HASH_PH/o) { |
|
2107
|
1
|
50
|
|
|
|
16
|
if ($sql =~ s/(set\s+)$HASH_PH(\s*)/ $1 . join(', ', map { "$_ = ?" } sort keys %{$binds[0]}) . $2 /siex) { |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
9
|
|
|
|
1
|
|
|
|
|
4
|
|
|
2108
|
1
|
|
|
|
|
1
|
push(@final_binds, $binds[0]->{$_}) for sort keys %{$binds[0]}; |
|
|
1
|
|
|
|
|
5
|
|
|
2109
|
|
|
|
|
|
|
# remove the bind from the list so that the catch-all bind code down below doesn't |
|
2110
|
|
|
|
|
|
|
# try to reuse it (since updates can, and almost always will, have additional binds |
|
2111
|
|
|
|
|
|
|
# after the hashref in the SET clause) |
|
2112
|
1
|
|
|
|
|
2
|
shift @binds; |
|
2113
|
|
|
|
|
|
|
} |
|
2114
|
|
|
|
|
|
|
} |
|
2115
|
|
|
|
|
|
|
|
|
2116
|
|
|
|
|
|
|
# -- this only happens when a hash placeholder is used in the col-vals clause of an insert -- |
|
2117
|
|
|
|
|
|
|
# for insert statements, we need to figure out which columns we're inserting, place those |
|
2118
|
|
|
|
|
|
|
# into $sql, then add the actual values list(s)' placeholders... however, we only do |
|
2119
|
|
|
|
|
|
|
# any of this if the caller used a '???' placeholder in the query (if the caller passed in |
|
2120
|
|
|
|
|
|
|
# something like "insert into tblX (a,b,c) values (?,?,?)" then we won't do anything |
|
2121
|
|
|
|
|
|
|
# special here, and the placeholders will just be handled by the catch-all code lower down |
|
2122
|
6
|
100
|
66
|
|
|
37
|
if ($st_type eq 'insert' && $sql =~ /^\s*insert\s+into\s+\S+\s+(values\s+)?$HASH_PH/ois) { |
|
2123
|
2
|
|
|
|
|
4
|
my @cols = (); |
|
2124
|
2
|
100
|
|
|
|
7
|
if (ref($binds[0]) eq 'HASH') { |
|
|
|
50
|
|
|
|
|
|
|
2125
|
1
|
|
|
|
|
2
|
@cols = sort keys %{$binds[0]}; |
|
|
1
|
|
|
|
|
5
|
|
|
2126
|
|
|
|
|
|
|
} elsif (ref($binds[0]) eq 'ARRAY') { |
|
2127
|
1
|
|
|
|
|
3
|
@cols = sort keys %{$binds[0]->[0]}; |
|
|
1
|
|
|
|
|
6
|
|
|
2128
|
|
|
|
|
|
|
} |
|
2129
|
|
|
|
|
|
|
|
|
2130
|
2
|
50
|
|
|
|
6
|
die dslog("No columns defined for insert statement!") if scalar(@cols) < 1; |
|
2131
|
|
|
|
|
|
|
|
|
2132
|
2
|
|
|
|
|
5
|
my $ph_replacement = '(' . join(',', @cols) . ') values '; |
|
2133
|
|
|
|
|
|
|
|
|
2134
|
2
|
100
|
|
|
|
5
|
my $rec_count = ref($binds[0]) eq 'ARRAY' ? scalar(@{$binds[0]}) : 1; |
|
|
1
|
|
|
|
|
2
|
|
|
2135
|
|
|
|
|
|
|
|
|
2136
|
2
|
|
|
|
|
15
|
$ph_replacement .= join(', ', ('(' . join(',', ('?') x scalar(@cols)) . ')') x $rec_count); |
|
2137
|
|
|
|
|
|
|
|
|
2138
|
2
|
|
|
|
|
40
|
$sql =~ s/^(\s*insert\s+into\s+\S+)\s+(?:values\s+)?$HASH_PH\s+(.*)/$1 $ph_replacement $2/si; |
|
2139
|
|
|
|
|
|
|
|
|
2140
|
2
|
100
|
|
|
|
7
|
if (ref($binds[0]) eq 'ARRAY') { |
|
2141
|
1
|
|
|
|
|
1
|
foreach my $rec (@{$binds[0]}) { |
|
|
1
|
|
|
|
|
13
|
|
|
2142
|
3
|
|
|
|
|
9
|
push(@final_binds, $rec->{$_}) for @cols; |
|
2143
|
|
|
|
|
|
|
} |
|
2144
|
|
|
|
|
|
|
} else { |
|
2145
|
1
|
|
|
|
|
4
|
push(@final_binds, $binds[0]->{$_}) for @cols; |
|
2146
|
|
|
|
|
|
|
} |
|
2147
|
|
|
|
|
|
|
|
|
2148
|
|
|
|
|
|
|
# remove the first bind from the list in case there are others (almost exclusively |
|
2149
|
|
|
|
|
|
|
# in the case where the insert is getting its values from a select) |
|
2150
|
2
|
|
|
|
|
5
|
shift @binds; |
|
2151
|
|
|
|
|
|
|
} |
|
2152
|
|
|
|
|
|
|
|
|
2153
|
|
|
|
|
|
|
# now that the special cases have been handled, we can loop through the remaining |
|
2154
|
|
|
|
|
|
|
# binds, handling the arrayref ones (for IN (...) lists) as we run into them |
|
2155
|
6
|
|
|
|
|
9
|
foreach my $bind (@binds) { |
|
2156
|
4
|
100
|
|
|
|
8
|
if (ref($bind) eq 'ARRAY') { |
|
2157
|
|
|
|
|
|
|
# arrayref binds can only be used with IN (...) lists or ARRAY[] constructors, so if we don't have |
|
2158
|
|
|
|
|
|
|
# one available in $sql to modify, error out |
|
2159
|
1
|
50
|
|
|
|
37
|
if ($sql =~ /((in\s+)([(]?\s*$ARRAY_PH\s*[)]?)|(array\s*\[\s*$ARRAY_PH\s*\]))/is) { |
|
2160
|
1
|
|
|
|
|
3
|
my $ph_block = $1; |
|
2161
|
1
|
50
|
|
|
|
4
|
if ($ph_block =~ /^in/is) { |
|
|
|
0
|
|
|
|
|
|
|
2162
|
1
|
50
|
|
|
|
20
|
if ($sql =~ s/(in\s+)([(]?\s*$ARRAY_PH\s*[)]?)/ $1 . '( ' . join(', ', ('?') x scalar(@{$bind})) . ' )' /siex) { |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
7
|
|
|
2163
|
1
|
|
|
|
|
1
|
push(@final_binds, @{$bind}); |
|
|
1
|
|
|
|
|
5
|
|
|
2164
|
|
|
|
|
|
|
} |
|
2165
|
|
|
|
|
|
|
} elsif ($ph_block =~ /^array/is) { |
|
2166
|
0
|
0
|
|
|
|
0
|
if ($sql =~ s/\s*array\s*\[\s*$ARRAY_PH\s*\]/ ' array [ ' . join(', ', ('?') x scalar(@{$bind})) . ' ] ' /siex) { |
|
|
0
|
|
|
|
|
0
|
|
|
|
0
|
|
|
|
|
0
|
|
|
2167
|
0
|
|
|
|
|
0
|
push(@final_binds, @{$bind}); |
|
|
0
|
|
|
|
|
0
|
|
|
2168
|
|
|
|
|
|
|
} |
|
2169
|
|
|
|
|
|
|
} else { |
|
2170
|
0
|
|
|
|
|
0
|
die dslog("Encountered arrayref placeholder syntax that cannot be understood at this time"); |
|
2171
|
|
|
|
|
|
|
} |
|
2172
|
|
|
|
|
|
|
} else { |
|
2173
|
0
|
|
|
|
|
0
|
die dslog("Arrayref bind was used without corresponding array placeholder as part of an IN (...) list or an ARRAY[] constructor!"); |
|
2174
|
|
|
|
|
|
|
} |
|
2175
|
|
|
|
|
|
|
} else { |
|
2176
|
3
|
|
|
|
|
13
|
push(@final_binds, $bind); |
|
2177
|
|
|
|
|
|
|
} |
|
2178
|
|
|
|
|
|
|
} |
|
2179
|
|
|
|
|
|
|
|
|
2180
|
|
|
|
|
|
|
# need to "fix" the statement type value now for queries that use a RETURNING |
|
2181
|
|
|
|
|
|
|
# clause at the end of an INSERT, UPDATE or DELETE... for placeholder binding |
|
2182
|
|
|
|
|
|
|
# purposes we treat them as their real type, but for everything else in datastore |
|
2183
|
|
|
|
|
|
|
# they should be treated as SELECTs since that's effectively what comes back |
|
2184
|
|
|
|
|
|
|
# from the database |
|
2185
|
6
|
50
|
66
|
|
|
8
|
if (scalar(grep { $st_type eq $_ } qw( insert update delete )) > 0 && $sql =~ m{\s+returning\s+(\*|\w)}ois) { |
|
|
18
|
|
|
|
|
58
|
|
|
2186
|
0
|
|
|
|
|
0
|
$st_type = 'select'; |
|
2187
|
|
|
|
|
|
|
} |
|
2188
|
|
|
|
|
|
|
|
|
2189
|
6
|
50
|
|
|
|
170
|
dslog(q{Binding transformation completed. SQL is now [[}, $sql, q{]] with bindings [[}, join(', ', @final_binds), q{]]}) |
|
2190
|
|
|
|
|
|
|
if DEBUG() >= 2; |
|
2191
|
|
|
|
|
|
|
|
|
2192
|
6
|
|
|
|
|
23
|
return ($st_type, $sql, @final_binds); |
|
2193
|
|
|
|
|
|
|
} |
|
2194
|
|
|
|
|
|
|
|
|
2195
|
|
|
|
|
|
|
1; |