Project

General

Profile

1
# Database access
2

    
3
import copy
4
import re
5
import time
6
import warnings
7

    
8
import exc
9
import dicts
10
import iters
11
import lists
12
import profiling
13
from Proxy import Proxy
14
import rand
15
import sql_gen
16
import strings
17
import util
18

    
19
##### Exceptions
20

    
21
def get_cur_query(cur, input_query=None):
22
    raw_query = None
23
    if hasattr(cur, 'query'): raw_query = cur.query
24
    elif hasattr(cur, '_last_executed'): raw_query = cur._last_executed
25
    
26
    if raw_query != None: return raw_query
27
    else: return '[input] '+strings.ustr(input_query)
28

    
29
def _add_cursor_info(e, *args, **kw_args):
30
    '''For params, see get_cur_query()'''
31
    exc.add_msg(e, 'query: '+strings.ustr(get_cur_query(*args, **kw_args)))
32

    
33
class DbException(exc.ExceptionWithCause):
34
    def __init__(self, msg, cause=None, cur=None):
35
        exc.ExceptionWithCause.__init__(self, msg, cause, cause_newline=True)
36
        if cur != None: _add_cursor_info(self, cur)
37

    
38
class ExceptionWithName(DbException):
39
    def __init__(self, name, cause=None):
40
        DbException.__init__(self, 'for name: '
41
            +strings.as_tt(strings.ustr(name)), cause)
42
        self.name = name
43

    
44
class ExceptionWithValue(DbException):
45
    def __init__(self, value, cause=None):
46
        DbException.__init__(self, 'for value: '
47
            +strings.as_tt(strings.urepr(value)), cause)
48
        self.value = value
49

    
50
class ExceptionWithNameType(DbException):
51
    def __init__(self, type_, name, cause=None):
52
        DbException.__init__(self, 'for type: '+strings.as_tt(strings.ustr(
53
            type_))+'; name: '+strings.as_tt(name), cause)
54
        self.type = type_
55
        self.name = name
56

    
57
class ConstraintException(DbException):
58
    def __init__(self, name, cond, cols, cause=None):
59
        msg = 'Violated '+strings.as_tt(name)+' constraint'
60
        if cond != None: msg += ' with condition '+strings.as_tt(cond)
61
        if cols != []: msg += ' on columns: '+strings.as_tt(', '.join(cols))
62
        DbException.__init__(self, msg, cause)
63
        self.name = name
64
        self.cond = cond
65
        self.cols = cols
66

    
67
class MissingCastException(DbException):
68
    def __init__(self, type_, col=None, cause=None):
69
        msg = 'Missing cast to type '+strings.as_tt(type_)
70
        if col != None: msg += ' on column: '+strings.as_tt(col)
71
        DbException.__init__(self, msg, cause)
72
        self.type = type_
73
        self.col = col
74

    
75
class EncodingException(ExceptionWithName): pass
76

    
77
class DuplicateKeyException(ConstraintException): pass
78

    
79
class NullValueException(ConstraintException): pass
80

    
81
class CheckException(ConstraintException): pass
82

    
83
class InvalidValueException(ExceptionWithValue): pass
84

    
85
class DuplicateException(ExceptionWithNameType): pass
86

    
87
class DoesNotExistException(ExceptionWithNameType): pass
88

    
89
class EmptyRowException(DbException): pass
90

    
91
##### Warnings
92

    
93
class DbWarning(UserWarning): pass
94

    
95
##### Result retrieval
96

    
97
def col_names(cur): return (col[0] for col in cur.description)
98

    
99
def rows(cur): return iter(lambda: cur.fetchone(), None)
100

    
101
def consume_rows(cur):
102
    '''Used to fetch all rows so result will be cached'''
103
    iters.consume_iter(rows(cur))
104

    
105
def next_row(cur): return rows(cur).next()
106

    
107
def row(cur):
108
    row_ = next_row(cur)
109
    consume_rows(cur)
110
    return row_
111

    
112
def next_value(cur): return next_row(cur)[0]
113

    
114
def value(cur): return row(cur)[0]
115

    
116
def values(cur): return iters.func_iter(lambda: next_value(cur))
117

    
118
def value_or_none(cur):
119
    try: return value(cur)
120
    except StopIteration: return None
121

    
122
##### Escaping
123

    
124
def esc_name_by_module(module, name):
125
    if module == 'psycopg2' or module == None: quote = '"'
126
    elif module == 'MySQLdb': quote = '`'
127
    else: raise NotImplementedError("Can't escape name for "+module+' database')
128
    return sql_gen.esc_name(name, quote)
129

    
130
def esc_name_by_engine(engine, name, **kw_args):
131
    return esc_name_by_module(db_engines[engine][0], name, **kw_args)
132

    
133
def esc_name(db, name, **kw_args):
134
    return esc_name_by_module(util.root_module(db.db), name, **kw_args)
135

    
136
def qual_name(db, schema, table):
137
    def esc_name_(name): return esc_name(db, name)
138
    table = esc_name_(table)
139
    if schema != None: return esc_name_(schema)+'.'+table
140
    else: return table
141

    
142
##### Database connections
143

    
144
db_config_names = ['engine', 'host', 'user', 'password', 'database', 'schemas']
145

    
146
db_engines = {
147
    'MySQL': ('MySQLdb', {'password': 'passwd', 'database': 'db'}),
148
    'PostgreSQL': ('psycopg2', {}),
149
}
150

    
151
DatabaseErrors_set = set([DbException])
152
DatabaseErrors = tuple(DatabaseErrors_set)
153

    
154
def _add_module(module):
155
    DatabaseErrors_set.add(module.DatabaseError)
156
    global DatabaseErrors
157
    DatabaseErrors = tuple(DatabaseErrors_set)
158

    
159
def db_config_str(db_config):
160
    return db_config['engine']+' database '+db_config['database']
161

    
162
log_debug_none = lambda msg, level=2: None
163

    
164
class DbConn:
165
    def __init__(self, db_config, autocommit=True, caching=True,
166
        log_debug=log_debug_none, debug_temp=False, src=None):
167
        '''
168
        @param debug_temp Whether temporary objects should instead be permanent.
169
            This assists in debugging the internal objects used by the program.
170
        @param src In autocommit mode, will be included in a comment in every
171
            query, to help identify the data source in pg_stat_activity.
172
        '''
173
        self.db_config = db_config
174
        self.autocommit = autocommit
175
        self.caching = caching
176
        self.log_debug = log_debug
177
        self.debug = log_debug != log_debug_none
178
        self.debug_temp = debug_temp
179
        self.src = src
180
        self.autoanalyze = False
181
        self.autoexplain = False
182
        self.profile_row_ct = None
183
        
184
        self._savepoint = 0
185
        self._reset()
186
    
187
    def __getattr__(self, name):
188
        if name == '__dict__': raise Exception('getting __dict__')
189
        if name == 'db': return self._db()
190
        else: raise AttributeError()
191
    
192
    def __getstate__(self):
193
        state = copy.copy(self.__dict__) # shallow copy
194
        state['log_debug'] = None # don't pickle the debug callback
195
        state['_DbConn__db'] = None # don't pickle the connection
196
        return state
197
    
198
    def clear_cache(self): self.query_results = {}
199
    
200
    def _reset(self):
201
        self.clear_cache()
202
        assert self._savepoint == 0
203
        self._notices_seen = set()
204
        self.__db = None
205
    
206
    def connected(self): return self.__db != None
207
    
208
    def close(self):
209
        if not self.connected(): return
210
        
211
        # Record that the automatic transaction is now closed
212
        self._savepoint -= 1
213
        
214
        self.db.close()
215
        self._reset()
216
    
217
    def reconnect(self):
218
        # Do not do this in test mode as it would roll back everything
219
        if self.autocommit: self.close()
220
        # Connection will be reopened automatically on first query
221
    
222
    def _db(self):
223
        if self.__db == None:
224
            # Process db_config
225
            db_config = self.db_config.copy() # don't modify input!
226
            schemas = db_config.pop('schemas', None)
227
            module_name, mappings = db_engines[db_config.pop('engine')]
228
            module = __import__(module_name)
229
            _add_module(module)
230
            for orig, new in mappings.iteritems():
231
                try: util.rename_key(db_config, orig, new)
232
                except KeyError: pass
233
            
234
            # Connect
235
            self.__db = module.connect(**db_config)
236
            
237
            # Record that a transaction is already open
238
            self._savepoint += 1
239
            
240
            # Configure connection
241
            if hasattr(self.db, 'set_isolation_level'):
242
                import psycopg2.extensions
243
                self.db.set_isolation_level(
244
                    psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
245
            if schemas != None:
246
                search_path = [self.esc_name(s) for s in schemas.split(',')]
247
                search_path.append(value(run_query(self, 'SHOW search_path',
248
                    log_level=4)))
249
                run_query(self, 'SET search_path TO '+(','.join(search_path)),
250
                    log_level=3)
251
        
252
        return self.__db
253
    
254
    class DbCursor(Proxy):
255
        def __init__(self, outer):
256
            Proxy.__init__(self, outer.db.cursor())
257
            self.outer = outer
258
            self.query_results = outer.query_results
259
            self.query_lookup = None
260
            self.result = []
261
        
262
        def execute(self, query):
263
            self._is_insert = query.startswith('INSERT')
264
            self.query_lookup = query
265
            try:
266
                try: cur = self.inner.execute(query)
267
                finally: self.query = get_cur_query(self.inner, query)
268
            except Exception, e:
269
                self.result = e # cache the exception as the result
270
                self._cache_result()
271
                raise
272
            
273
            # Always cache certain queries
274
            query = sql_gen.lstrip(query)
275
            if query.startswith('CREATE') or query.startswith('ALTER'):
276
                # structural changes
277
                # Rest of query must be unique in the face of name collisions,
278
                # so don't cache ADD COLUMN unless it has distinguishing comment
279
                if query.find('ADD COLUMN') < 0 or query.endswith('*/'):
280
                    self._cache_result()
281
            elif self.rowcount == 0 and query.startswith('SELECT'): # empty
282
                consume_rows(self) # fetch all rows so result will be cached
283
            
284
            return cur
285
        
286
        def fetchone(self):
287
            row = self.inner.fetchone()
288
            if row != None: self.result.append(row)
289
            # otherwise, fetched all rows
290
            else: self._cache_result()
291
            return row
292
        
293
        def _cache_result(self):
294
            # For inserts that return a result set, don't cache result set since
295
            # inserts are not idempotent. Other non-SELECT queries don't have
296
            # their result set read, so only exceptions will be cached (an
297
            # invalid query will always be invalid).
298
            if self.query_results != None and (not self._is_insert
299
                or isinstance(self.result, Exception)):
300
                
301
                assert self.query_lookup != None
302
                self.query_results[self.query_lookup] = self.CacheCursor(
303
                    util.dict_subset(dicts.AttrsDictView(self),
304
                    ['query', 'result', 'rowcount', 'description']))
305
        
306
        class CacheCursor:
307
            def __init__(self, cached_result): self.__dict__ = cached_result
308
            
309
            def execute(self, *args, **kw_args):
310
                if isinstance(self.result, Exception): raise self.result
311
                # otherwise, result is a rows list
312
                self.iter = iter(self.result)
313
            
314
            def fetchone(self):
315
                try: return self.iter.next()
316
                except StopIteration: return None
317
    
318
    def esc_value(self, value):
319
        try: str_ = self.mogrify('%s', [value])
320
        except NotImplementedError, e:
321
            module = util.root_module(self.db)
322
            if module == 'MySQLdb':
323
                import _mysql
324
                str_ = _mysql.escape_string(value)
325
            else: raise e
326
        return strings.to_unicode(str_)
327
    
328
    def esc_name(self, name): return esc_name(self, name) # calls global func
329
    
330
    def std_code(self, str_):
331
        '''Standardizes SQL code.
332
        * Ensures that string literals are prefixed by `E`
333
        '''
334
        if str_.startswith("'"): str_ = 'E'+str_
335
        return str_
336
    
337
    def can_mogrify(self):
338
        module = util.root_module(self.db)
339
        return module == 'psycopg2'
340
    
341
    def mogrify(self, query, params=None):
342
        if self.can_mogrify(): return self.db.cursor().mogrify(query, params)
343
        else: raise NotImplementedError("Can't mogrify query")
344
    
345
    def set_encoding(self, encoding):
346
        encoding_str = sql_gen.Literal(encoding)
347
        run_query(self, 'SET NAMES '+encoding_str.to_str(self))
348
    
349
    def print_notices(self):
350
        if hasattr(self.db, 'notices'):
351
            for msg in self.db.notices:
352
                if msg not in self._notices_seen:
353
                    self._notices_seen.add(msg)
354
                    self.log_debug(msg, level=2)
355
    
356
    def run_query(self, query, cacheable=False, log_level=2,
357
        debug_msg_ref=None):
358
        '''
359
        @param log_ignore_excs The log_level will be increased by 2 if the query
360
            throws one of these exceptions.
361
        @param debug_msg_ref If specified, the log message will be returned in
362
            this instead of being output. This allows you to filter log messages
363
            depending on the result of the query.
364
        '''
365
        assert query != None
366
        
367
        if self.autocommit and self.src != None:
368
            query = sql_gen.esc_comment(self.src)+'\t'+query
369
        
370
        if not self.caching: cacheable = False
371
        used_cache = False
372
        
373
        if self.debug:
374
            profiler = profiling.ItersProfiler(start_now=True, iter_text='row')
375
        try:
376
            # Get cursor
377
            if cacheable:
378
                try: cur = self.query_results[query]
379
                except KeyError: cur = self.DbCursor(self)
380
                else: used_cache = True
381
            else: cur = self.db.cursor()
382
            
383
            # Run query
384
            try: cur.execute(query)
385
            except Exception, e:
386
                _add_cursor_info(e, self, query)
387
                raise
388
            else: self.do_autocommit()
389
        finally:
390
            if self.debug:
391
                profiler.stop(self.profile_row_ct)
392
                
393
                ## Log or return query
394
                
395
                query = strings.ustr(get_cur_query(cur, query))
396
                # Put the src comment on a separate line in the log file
397
                query = query.replace('\t', '\n', 1)
398
                
399
                msg = 'DB query: '
400
                
401
                if used_cache: msg += 'cache hit'
402
                elif cacheable: msg += 'cache miss'
403
                else: msg += 'non-cacheable'
404
                
405
                msg += ':\n'+profiler.msg()+'\n'+strings.as_code(query, 'SQL')
406
                
407
                if debug_msg_ref != None: debug_msg_ref[0] = msg
408
                else: self.log_debug(msg, log_level)
409
                
410
                self.print_notices()
411
        
412
        return cur
413
    
414
    def is_cached(self, query): return query in self.query_results
415
    
416
    def with_autocommit(self, func):
417
        import psycopg2.extensions
418
        
419
        prev_isolation_level = self.db.isolation_level
420
        self.db.set_isolation_level(
421
            psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
422
        try: return func()
423
        finally: self.db.set_isolation_level(prev_isolation_level)
424
    
425
    def with_savepoint(self, func):
426
        top = self._savepoint == 0
427
        savepoint = 'level_'+str(self._savepoint)
428
        
429
        if self.debug:
430
            self.log_debug('Begin transaction', level=4)
431
            profiler = profiling.ItersProfiler(start_now=True, iter_text='row')
432
        
433
        # Must happen before running queries so they don't get autocommitted
434
        self._savepoint += 1
435
        
436
        if top: query = 'START TRANSACTION ISOLATION LEVEL READ COMMITTED'
437
        else: query = 'SAVEPOINT '+savepoint
438
        self.run_query(query, log_level=4)
439
        try:
440
            return func()
441
            if top: self.run_query('COMMIT', log_level=4)
442
        except:
443
            if top: query = 'ROLLBACK'
444
            else: query = 'ROLLBACK TO SAVEPOINT '+savepoint
445
            self.run_query(query, log_level=4)
446
            
447
            raise
448
        finally:
449
            # Always release savepoint, because after ROLLBACK TO SAVEPOINT,
450
            # "The savepoint remains valid and can be rolled back to again"
451
            # (http://www.postgresql.org/docs/8.3/static/sql-rollback-to.html).
452
            if not top:
453
                self.run_query('RELEASE SAVEPOINT '+savepoint, log_level=4)
454
            
455
            self._savepoint -= 1
456
            assert self._savepoint >= 0
457
            
458
            if self.debug:
459
                profiler.stop(self.profile_row_ct)
460
                self.log_debug('End transaction\n'+profiler.msg(), level=4)
461
            
462
            self.do_autocommit() # OK to do this after ROLLBACK TO SAVEPOINT
463
    
464
    def do_autocommit(self):
465
        '''Autocommits if outside savepoint'''
466
        assert self._savepoint >= 1
467
        if self.autocommit and self._savepoint == 1:
468
            self.log_debug('Autocommitting', level=4)
469
            self.db.commit()
470
    
471
    def col_info(self, col, cacheable=True):
472
        table = sql_gen.Table('columns', 'information_schema')
473
        cols = [sql_gen.Col('data_type'), sql_gen.Col('udt_name'),
474
            'column_default', sql_gen.Cast('boolean',
475
            sql_gen.Col('is_nullable'))]
476
        
477
        conds = [('table_name', col.table.name),
478
            ('column_name', strings.ustr(col.name))]
479
        schema = col.table.schema
480
        if schema != None: conds.append(('table_schema', schema))
481
        
482
        cur = select(self, table, cols, conds, order_by='table_schema', limit=1,
483
            cacheable=cacheable, log_level=4) # TODO: order by search_path order
484
        try: type_, extra_type, default, nullable = row(cur)
485
        except StopIteration: raise sql_gen.NoUnderlyingTableException(col)
486
        default = sql_gen.as_Code(default, self)
487
        if type_ == 'USER-DEFINED': type_ = extra_type
488
        elif type_ == 'ARRAY':
489
            type_ = sql_gen.ArrayType(strings.remove_prefix('_', extra_type,
490
                require=True))
491
        
492
        return sql_gen.TypedCol(col.name, type_, default, nullable)
493
    
494
    def TempFunction(self, name):
495
        if self.debug_temp: schema = None
496
        else: schema = 'pg_temp'
497
        return sql_gen.Function(name, schema)
498

    
499
connect = DbConn
500

    
501
##### Recoverable querying
502

    
503
def parse_exception(db, e, recover=False):
504
    msg = strings.ustr(e.args[0])
505
    msg = re.sub(r'^(?:PL/Python: )?ValueError: ', r'', msg)
506
    
507
    match = re.match(r'^invalid byte sequence for encoding "(.+?)":', msg)
508
    if match:
509
        encoding, = match.groups()
510
        raise EncodingException(encoding, e)
511
    
512
    def make_DuplicateKeyException(constraint, e):
513
        cols = []
514
        cond = None
515
        if recover: # need auto-rollback to run index_cols()
516
            try:
517
                cols = index_cols(db, constraint)
518
                cond = index_cond(db, constraint)
519
            except NotImplementedError: pass
520
        return DuplicateKeyException(constraint, cond, cols, e)
521
    
522
    match = re.match(r'^duplicate key value violates unique constraint "(.+?)"',
523
        msg)
524
    if match:
525
        constraint, = match.groups()
526
        raise make_DuplicateKeyException(constraint, e)
527
    
528
    match = re.match(r'^could not create unique index "(.+?)"\n'
529
        r'DETAIL:  Key .+? is duplicated', msg)
530
    if match:
531
        constraint, = match.groups()
532
        raise make_DuplicateKeyException(constraint, e)
533
    
534
    match = re.match(r'^null value in column "(.+?)" violates not-null'
535
        r' constraint', msg)
536
    if match:
537
        col, = match.groups()
538
        raise NullValueException('NOT NULL', None, [col], e)
539
    
540
    match = re.match(r'^new row for relation "(.+?)" violates check '
541
        r'constraint "(.+?)"', msg)
542
    if match:
543
        table, constraint = match.groups()
544
        constraint = sql_gen.Col(constraint, table)
545
        cond = None
546
        if recover: # need auto-rollback to run constraint_cond()
547
            try: cond = constraint_cond(db, constraint)
548
            except NotImplementedError: pass
549
        raise CheckException(constraint.to_str(db), cond, [], e)
550
    
551
    match = re.match(r'^(?:invalid input (?:syntax|value)\b[^:]*'
552
        r'|.+? out of range)(?:: "(.+?)")?', msg)
553
    if match:
554
        value, = match.groups()
555
        value = util.do_ignore_none(strings.to_unicode, value)
556
        raise InvalidValueException(value, e)
557
    
558
    match = re.match(r'^column "(.+?)" is of type (.+?) but expression '
559
        r'is of type', msg)
560
    if match:
561
        col, type_ = match.groups()
562
        raise MissingCastException(type_, col, e)
563
    
564
    match = re.match(r'^could not determine polymorphic type because '
565
        r'input has type "unknown"', msg)
566
    if match: raise MissingCastException('text', None, e)
567
    
568
    match = re.match(r'^.+? types (.+?) and (.+?) cannot be matched', msg)
569
    if match:
570
        type0, type1 = match.groups()
571
        raise MissingCastException(type0, None, e)
572
    
573
    typed_name_re = r'^(\S+) "?(.+?)"?(?: of relation ".+?")?'
574
    
575
    match = re.match(typed_name_re+r'.*? already exists', msg)
576
    if match:
577
        type_, name = match.groups()
578
        raise DuplicateException(type_, name, e)
579
    
580
    match = re.match(r'more than one (\S+) named ""(.+?)""', msg)
581
    if match:
582
        type_, name = match.groups()
583
        raise DuplicateException(type_, name, e)
584
    
585
    match = re.match(typed_name_re+r' does not exist', msg)
586
    if match:
587
        type_, name = match.groups()
588
        if type_ == 'function':
589
            match = re.match(r'^(.+?)\(.*\)$', name)
590
            if match: # includes params, so is call rather than cast to regproc
591
                function_name, = match.groups()
592
                func = sql_gen.Function(function_name)
593
                if function_exists(db, func) and msg.find('CAST') < 0:
594
                    # not found only because of a missing cast
595
                    type_ = function_param0_type(db, func)
596
                    raise MissingCastException(type_, None, e)
597
        raise DoesNotExistException(type_, name, e)
598
    
599
    raise # no specific exception raised
600

    
601
def with_savepoint(db, func): return db.with_savepoint(func)
602

    
603
def run_query(db, query, recover=None, cacheable=False, log_level=2,
604
    log_ignore_excs=None, **kw_args):
605
    '''For params, see DbConn.run_query()'''
606
    if recover == None: recover = False
607
    if log_ignore_excs == None: log_ignore_excs = ()
608
    log_ignore_excs = tuple(log_ignore_excs)
609
    debug_msg_ref = [None]
610
    
611
    query = with_explain_comment(db, query)
612
    
613
    try:
614
        try:
615
            def run(): return db.run_query(query, cacheable, log_level,
616
                debug_msg_ref, **kw_args)
617
            if recover and not db.is_cached(query):
618
                return with_savepoint(db, run)
619
            else: return run() # don't need savepoint if cached
620
        except Exception, e: parse_exception(db, e, recover)
621
    except log_ignore_excs:
622
        log_level += 2
623
        raise
624
    finally:
625
        if debug_msg_ref[0] != None: db.log_debug(debug_msg_ref[0], log_level)
626

    
627
##### Basic queries
628

    
629
def is_explainable(query):
630
    # See <http://www.postgresql.org/docs/8.3/static/sql-explain.html#AEN57749>
631
    return re.match(r'^(?:SELECT|INSERT|UPDATE|DELETE|VALUES|EXECUTE|DECLARE)\b'
632
        , query)
633

    
634
def explain(db, query, **kw_args):
635
    '''
636
    For params, see run_query().
637
    '''
638
    kw_args.setdefault('log_level', 4)
639
    
640
    return strings.ustr(strings.join_lines(values(run_query(db,
641
        'EXPLAIN '+query, recover=True, cacheable=True, **kw_args))))
642
        # not a higher log_level because it's useful to see what query is being
643
        # run before it's executed, which EXPLAIN effectively provides
644

    
645
def has_comment(query): return query.endswith('*/')
646

    
647
def with_explain_comment(db, query, **kw_args):
648
    if db.autoexplain and not has_comment(query) and is_explainable(query):
649
        query += '\n'+sql_gen.esc_comment(' EXPLAIN:\n'
650
            +explain(db, query, **kw_args))
651
    return query
652

    
653
def next_version(name):
654
    version = 1 # first existing name was version 0
655
    match = re.match(r'^(.*)#(\d+)$', name)
656
    if match:
657
        name, version = match.groups()
658
        version = int(version)+1
659
    return sql_gen.concat(name, '#'+str(version))
660

    
661
def lock_table(db, table, mode):
662
    table = sql_gen.as_Table(table)
663
    run_query(db, 'LOCK TABLE '+table.to_str(db)+' IN '+mode+' MODE')
664

    
665
def run_query_into(db, query, into=None, add_pkey_=False, **kw_args):
666
    '''Outputs a query to a temp table.
667
    For params, see run_query().
668
    '''
669
    if into == None: return run_query(db, query, **kw_args)
670
    
671
    assert isinstance(into, sql_gen.Table)
672
    
673
    into.is_temp = True
674
    # "temporary tables cannot specify a schema name", so remove schema
675
    into.schema = None
676
    
677
    kw_args['recover'] = True
678
    kw_args.setdefault('log_ignore_excs', (DuplicateException,))
679
    
680
    temp = not db.debug_temp # tables are permanent in debug_temp mode
681
    
682
    # Create table
683
    while True:
684
        create_query = 'CREATE'
685
        if temp: create_query += ' TEMP'
686
        create_query += ' TABLE '+into.to_str(db)+' AS\n'+query
687
        
688
        try:
689
            cur = run_query(db, create_query, **kw_args)
690
                # CREATE TABLE AS sets rowcount to # rows in query
691
            break
692
        except DuplicateException, e:
693
            into.name = next_version(into.name)
694
            # try again with next version of name
695
    
696
    if add_pkey_: add_pkey(db, into)
697
    
698
    # According to the PostgreSQL doc, "The autovacuum daemon cannot access and
699
    # therefore cannot vacuum or analyze temporary tables. [...] if a temporary
700
    # table is going to be used in complex queries, it is wise to run ANALYZE on
701
    # the temporary table after it is populated."
702
    # (http://www.postgresql.org/docs/9.1/static/sql-createtable.html)
703
    # If into is not a temp table, ANALYZE is useful but not required.
704
    analyze(db, into)
705
    
706
    return cur
707

    
708
order_by_pkey = object() # tells mk_select() to order by the pkey
709

    
710
distinct_on_all = object() # tells mk_select() to SELECT DISTINCT ON all columns
711

    
712
def mk_select(db, tables=None, fields=None, conds=None, distinct_on=[],
713
    limit=None, start=None, order_by=order_by_pkey, default_table=None,
714
    explain=True):
715
    '''
716
    @param tables The single table to select from, or a list of tables to join
717
        together, with tables after the first being sql_gen.Join objects
718
    @param fields Use None to select all fields in the table
719
    @param conds WHERE conditions: [(compare_left_side, compare_right_side),...]
720
        * container can be any iterable type
721
        * compare_left_side: sql_gen.Code|str (for col name)
722
        * compare_right_side: sql_gen.ValueCond|literal value
723
    @param distinct_on The columns to SELECT DISTINCT ON, or distinct_on_all to
724
        use all columns
725
    @return query
726
    '''
727
    # Parse tables param
728
    tables = lists.mk_seq(tables)
729
    tables = list(tables) # don't modify input! (list() copies input)
730
    table0 = sql_gen.as_Table(tables.pop(0)) # first table is separate
731
    
732
    # Parse other params
733
    if conds == None: conds = []
734
    elif dicts.is_dict(conds): conds = conds.items()
735
    conds = list(conds) # don't modify input! (list() copies input)
736
    assert limit == None or isinstance(limit, (int, long))
737
    assert start == None or isinstance(start, (int, long))
738
    if limit == 0: order_by = None
739
    if order_by is order_by_pkey:
740
        if lists.is_seq(distinct_on) and distinct_on: order_by = distinct_on[0]
741
        elif table0 != None: order_by = table_order_by(db, table0, recover=True)
742
        else: order_by = None
743
    
744
    query = 'SELECT'
745
    
746
    def parse_col(col): return sql_gen.as_Col(col, default_table).to_str(db)
747
    
748
    # DISTINCT ON columns
749
    if distinct_on != []:
750
        query += '\nDISTINCT'
751
        if distinct_on is not distinct_on_all:
752
            query += ' ON ('+(', '.join(map(parse_col, distinct_on)))+')'
753
    
754
    # Columns
755
    if query.find('\n') >= 0: whitespace = '\n'
756
    else: whitespace = ' '
757
    if fields == None: query += whitespace+'*'
758
    else:
759
        assert fields != []
760
        if len(fields) > 1: whitespace = '\n'
761
        query += whitespace+('\n, '.join(map(parse_col, fields)))
762
    
763
    # Main table
764
    if query.find('\n') >= 0 or len(tables) > 0: whitespace = '\n'
765
    else: whitespace = ' '
766
    if table0 != None: query += whitespace+'FROM '+table0.to_str(db)
767
    
768
    # Add joins
769
    left_table = table0
770
    for join_ in tables:
771
        table = join_.table
772
        
773
        # Parse special values
774
        if join_.type_ is sql_gen.filter_out: # filter no match
775
            conds.append((sql_gen.Col(table_not_null_col(db, table), table),
776
                sql_gen.CompareCond(None, '~=')))
777
        
778
        query += '\n'+join_.to_str(db, left_table)
779
        
780
        left_table = table
781
    
782
    missing = True
783
    if conds != []:
784
        if len(conds) == 1: whitespace = ' '
785
        else: whitespace = '\n'
786
        query += '\n'+sql_gen.combine_conds([sql_gen.ColValueCond(l, r)
787
            .to_str(db) for l, r in conds], 'WHERE')
788
    if order_by != None:
789
        query += '\nORDER BY '+sql_gen.as_Col(order_by).to_str(db)
790
    if limit != None: query += '\nLIMIT '+str(limit)
791
    if start != None:
792
        if start != 0: query += '\nOFFSET '+str(start)
793
    
794
    if explain: query = with_explain_comment(db, query)
795
    
796
    return query
797

    
798
def select(db, *args, **kw_args):
799
    '''For params, see mk_select() and run_query()'''
800
    recover = kw_args.pop('recover', None)
801
    cacheable = kw_args.pop('cacheable', True)
802
    log_level = kw_args.pop('log_level', 2)
803
    
804
    return run_query(db, mk_select(db, *args, **kw_args), recover, cacheable,
805
        log_level=log_level)
806

    
807
def mk_insert_select(db, table, cols=None, select_query=None, returning=None,
808
    embeddable=False, ignore=False, src=None):
809
    '''
810
    @param returning str|None An inserted column (such as pkey) to return
811
    @param embeddable Whether the query should be embeddable as a nested SELECT.
812
        Warning: If you set this and cacheable=True when the query is run, the
813
        query will be fully cached, not just if it raises an exception.
814
    @param ignore Whether to ignore duplicate keys.
815
    @param src Will be included in the name of any created function, to help
816
        identify the data source in pg_stat_activity.
817
    '''
818
    table = sql_gen.remove_table_rename(sql_gen.as_Table(table))
819
    if cols == []: cols = None # no cols (all defaults) = unknown col names
820
    if cols != None: cols = [sql_gen.to_name_only_col(c, table) for c in cols]
821
    if select_query == None: select_query = 'DEFAULT VALUES'
822
    if returning != None: returning = sql_gen.as_Col(returning, table)
823
    
824
    first_line = 'INSERT INTO '+table.to_str(db)
825
    
826
    def mk_insert(select_query):
827
        query = first_line
828
        if cols != None:
829
            query += '\n('+(', '.join((c.to_str(db) for c in cols)))+')'
830
        query += '\n'+select_query
831
        
832
        if returning != None:
833
            returning_name_col = sql_gen.to_name_only_col(returning)
834
            query += '\nRETURNING '+returning_name_col.to_str(db)
835
        
836
        return query
837
    
838
    return_type = sql_gen.CustomCode('unknown')
839
    if returning != None: return_type = sql_gen.ColType(returning)
840
    
841
    if ignore:
842
        # Always return something to set the correct rowcount
843
        if returning == None: returning = sql_gen.NamedCol('NULL', None)
844
        
845
        embeddable = True # must use function
846
        
847
        if cols == None: row = [sql_gen.Col(sql_gen.all_cols, 'row')]
848
        else: row = [sql_gen.Col(c.name, 'row') for c in cols]
849
        
850
        query = sql_gen.RowExcIgnore(sql_gen.RowType(table), select_query,
851
            sql_gen.ReturnQuery(mk_insert(sql_gen.Values(row).to_str(db))),
852
            cols)
853
    else: query = mk_insert(select_query)
854
    
855
    if embeddable:
856
        # Create function
857
        function_name = sql_gen.clean_name(first_line)
858
        if src != None: function_name = src+': '+function_name
859
        while True:
860
            try:
861
                func = db.TempFunction(function_name)
862
                def_ = sql_gen.FunctionDef(func, sql_gen.SetOf(return_type),
863
                    query)
864
                
865
                run_query(db, def_.to_str(db), recover=True, cacheable=True,
866
                    log_ignore_excs=(DuplicateException,))
867
                break # this version was successful
868
            except DuplicateException, e:
869
                function_name = next_version(function_name)
870
                # try again with next version of name
871
        
872
        # Return query that uses function
873
        cols = None
874
        if returning != None: cols = [returning]
875
        func_table = sql_gen.NamedTable('f', sql_gen.FunctionCall(func), cols)
876
            # AS clause requires function alias
877
        return mk_select(db, func_table, order_by=None)
878
    
879
    return query
880

    
881
def insert_select(db, table, *args, **kw_args):
882
    '''For params, see mk_insert_select() and run_query_into()
883
    @param into sql_gen.Table with suggested name of temp table to put RETURNING
884
        values in
885
    '''
886
    returning = kw_args.get('returning', None)
887
    ignore = kw_args.get('ignore', False)
888
    
889
    into = kw_args.pop('into', None)
890
    if into != None: kw_args['embeddable'] = True
891
    recover = kw_args.pop('recover', None)
892
    if ignore: recover = True
893
    cacheable = kw_args.pop('cacheable', True)
894
    log_level = kw_args.pop('log_level', 2)
895
    
896
    rowcount_only = ignore and returning == None # keep NULL rows on server
897
    if rowcount_only: into = sql_gen.Table('rowcount')
898
    
899
    cur = run_query_into(db, mk_insert_select(db, table, *args, **kw_args),
900
        into, recover=recover, cacheable=cacheable, log_level=log_level)
901
    if rowcount_only: empty_temp(db, into)
902
    autoanalyze(db, table)
903
    return cur
904

    
905
default = sql_gen.default # tells insert() to use the default value for a column
906

    
907
def insert(db, table, row, *args, **kw_args):
908
    '''For params, see insert_select()'''
909
    ignore = kw_args.pop('ignore', False)
910
    if ignore: kw_args.setdefault('recover', True)
911
    
912
    if lists.is_seq(row): cols = None
913
    else:
914
        cols = row.keys()
915
        row = row.values()
916
    row = list(row) # ensure that "== []" works
917
    
918
    if row == []: query = None
919
    else: query = sql_gen.Values(row).to_str(db)
920
    
921
    try: return insert_select(db, table, cols, query, *args, **kw_args)
922
    except (DuplicateKeyException, NullValueException):
923
        if not ignore: raise
924
        return None
925

    
926
def mk_update(db, table, changes=None, cond=None, in_place=False,
927
    cacheable_=True):
928
    '''
929
    @param changes [(col, new_value),...]
930
        * container can be any iterable type
931
        * col: sql_gen.Code|str (for col name)
932
        * new_value: sql_gen.Code|literal value
933
    @param cond sql_gen.Code WHERE condition. e.g. use sql_gen.*Cond objects.
934
    @param in_place If set, locks the table and updates rows in place.
935
        This avoids creating dead rows in PostgreSQL.
936
        * cond must be None
937
    @param cacheable_ Whether column structure information used to generate the
938
        query can be cached
939
    @return str query
940
    '''
941
    table = sql_gen.as_Table(table)
942
    changes = [(sql_gen.to_name_only_col(c, table), sql_gen.as_Value(v))
943
        for c, v in changes]
944
    
945
    if in_place:
946
        assert cond == None
947
        
948
        def col_type(col):
949
            return sql_gen.canon_type(db.col_info(
950
                sql_gen.with_default_table(c, table), cacheable_).type)
951
        changes = [(c, v, col_type(c)) for c, v in changes]
952
        query = 'ALTER TABLE '+table.to_str(db)+'\n'
953
        query += ',\n'.join(('ALTER COLUMN '+c.to_str(db)+' TYPE '+t+'\nUSING '
954
            +v.to_str(db) for c, v, t in changes))
955
    else:
956
        query = 'UPDATE '+table.to_str(db)+'\nSET\n'
957
        query += ',\n'.join((c.to_str(db)+' = '+v.to_str(db)
958
            for c, v in changes))
959
        if cond != None: query += '\nWHERE\n'+cond.to_str(db)
960
    
961
    query = with_explain_comment(db, query)
962
    
963
    return query
964

    
965
def update(db, table, *args, **kw_args):
966
    '''For params, see mk_update() and run_query()'''
967
    recover = kw_args.pop('recover', None)
968
    cacheable = kw_args.pop('cacheable', False)
969
    log_level = kw_args.pop('log_level', 2)
970
    
971
    cur = run_query(db, mk_update(db, table, *args, **kw_args), recover,
972
        cacheable, log_level=log_level)
973
    autoanalyze(db, table)
974
    return cur
975

    
976
def mk_delete(db, table, cond=None):
977
    '''
978
    @param cond sql_gen.Code WHERE condition. e.g. use sql_gen.*Cond objects.
979
    @return str query
980
    '''
981
    query = 'DELETE FROM '+table.to_str(db)
982
    if cond != None: query += '\nWHERE '+cond.to_str(db)
983
    
984
    query = with_explain_comment(db, query)
985
    
986
    return query
987

    
988
def delete(db, table, *args, **kw_args):
989
    '''For params, see mk_delete() and run_query()'''
990
    recover = kw_args.pop('recover', None)
991
    cacheable = kw_args.pop('cacheable', True)
992
    log_level = kw_args.pop('log_level', 2)
993
    
994
    cur = run_query(db, mk_delete(db, table, *args, **kw_args), recover,
995
        cacheable, log_level=log_level)
996
    autoanalyze(db, table)
997
    return cur
998

    
999
def last_insert_id(db):
1000
    module = util.root_module(db.db)
1001
    if module == 'psycopg2': return value(run_query(db, 'SELECT lastval()'))
1002
    elif module == 'MySQLdb': return db.insert_id()
1003
    else: return None
1004

    
1005
def define_func(db, def_):
1006
    func = def_.function
1007
    while True:
1008
        try:
1009
            run_query(db, def_.to_str(db), recover=True, cacheable=True,
1010
                log_ignore_excs=(DuplicateException,))
1011
            break # successful
1012
        except DuplicateException:
1013
            func.name = next_version(func.name)
1014
            # try again with next version of name
1015

    
1016
def mk_flatten_mapping(db, into, cols, preserve=[], as_items=False):
1017
    '''Creates a mapping from original column names (which may have collisions)
1018
    to names that will be distinct among the columns' tables.
1019
    This is meant to be used for several tables that are being joined together.
1020
    @param cols The columns to combine. Duplicates will be removed.
1021
    @param into The table for the new columns.
1022
    @param preserve [sql_gen.Col...] Columns not to rename. Note that these
1023
        columns will be included in the mapping even if they are not in cols.
1024
        The tables of the provided Col objects will be changed to into, so make
1025
        copies of them if you want to keep the original tables.
1026
    @param as_items Whether to return a list of dict items instead of a dict
1027
    @return dict(orig_col=new_col, ...)
1028
        * orig_col: sql_gen.Col(orig_col_name, orig_table)
1029
        * new_col: sql_gen.Col(orig_col_name, into)
1030
        * All mappings use the into table so its name can easily be
1031
          changed for all columns at once
1032
    '''
1033
    cols = lists.uniqify(cols)
1034
    
1035
    items = []
1036
    for col in preserve:
1037
        orig_col = copy.copy(col)
1038
        col.table = into
1039
        items.append((orig_col, col))
1040
    preserve = set(preserve)
1041
    for col in cols:
1042
        if col not in preserve:
1043
            items.append((col, sql_gen.Col(strings.ustr(col), into, col.srcs)))
1044
    
1045
    if not as_items: items = dict(items)
1046
    return items
1047

    
1048
def flatten(db, into, joins, cols, limit=None, start=None, **kw_args):
1049
    '''For params, see mk_flatten_mapping()
1050
    @return See return value of mk_flatten_mapping()
1051
    '''
1052
    items = mk_flatten_mapping(db, into, cols, as_items=True, **kw_args)
1053
    cols = [sql_gen.NamedCol(new.name, old) for old, new in items]
1054
    run_query_into(db, mk_select(db, joins, cols, limit=limit, start=start),
1055
        into=into, add_pkey_=True)
1056
        # don't cache because the temp table will usually be truncated after use
1057
    return dict(items)
1058

    
1059
##### Database structure introspection
1060

    
1061
#### Tables
1062

    
1063
def tables(db, schema_like='public', table_like='%', exact=False,
1064
    cacheable=True):
1065
    if exact: compare = '='
1066
    else: compare = 'LIKE'
1067
    
1068
    module = util.root_module(db.db)
1069
    if module == 'psycopg2':
1070
        conds = [('schemaname', sql_gen.CompareCond(schema_like, compare)),
1071
            ('tablename', sql_gen.CompareCond(table_like, compare))]
1072
        return values(select(db, 'pg_tables', ['tablename'], conds,
1073
            order_by='tablename', cacheable=cacheable, log_level=4))
1074
    elif module == 'MySQLdb':
1075
        return values(run_query(db, 'SHOW TABLES LIKE '+db.esc_value(table_like)
1076
            , cacheable=True, log_level=4))
1077
    else: raise NotImplementedError("Can't list tables for "+module+' database')
1078

    
1079
def table_exists(db, table, cacheable=True):
1080
    table = sql_gen.as_Table(table)
1081
    return list(tables(db, table.schema, table.name, True, cacheable)) != []
1082

    
1083
def table_row_count(db, table, recover=None):
1084
    return value(run_query(db, mk_select(db, table, [sql_gen.row_count],
1085
        order_by=None), recover=recover, log_level=3))
1086

    
1087
def table_col_names(db, table, recover=None):
1088
    return list(col_names(select(db, table, limit=0, recover=recover,
1089
        log_level=4)))
1090

    
1091
def table_cols(db, table, *args, **kw_args):
1092
    return [sql_gen.as_Col(strings.ustr(c), table)
1093
        for c in table_col_names(db, table, *args, **kw_args)]
1094

    
1095
def table_pkey_index(db, table, recover=None):
1096
    table_str = sql_gen.Literal(table.to_str(db))
1097
    try:
1098
        return sql_gen.Table(value(run_query(db, '''\
1099
SELECT relname
1100
FROM pg_index
1101
JOIN pg_class index ON index.oid = indexrelid
1102
WHERE
1103
indrelid = '''+table_str.to_str(db)+'''::regclass
1104
AND indisprimary
1105
'''
1106
            , recover, cacheable=True, log_level=4)), table.schema)
1107
    except StopIteration: raise DoesNotExistException('primary key', '')
1108

    
1109
def table_pkey_col(db, table, recover=None):
1110
    table = sql_gen.as_Table(table)
1111
    
1112
    join_cols = ['table_schema', 'table_name', 'constraint_schema',
1113
        'constraint_name']
1114
    tables = [sql_gen.Table('key_column_usage', 'information_schema'),
1115
        sql_gen.Join(sql_gen.Table('table_constraints', 'information_schema'),
1116
            dict(((c, sql_gen.join_same_not_null) for c in join_cols)))]
1117
    cols = [sql_gen.Col('column_name')]
1118
    
1119
    conds = [('constraint_type', 'PRIMARY KEY'), ('table_name', table.name)]
1120
    schema = table.schema
1121
    if schema != None: conds.append(('table_schema', schema))
1122
    order_by = 'position_in_unique_constraint'
1123
    
1124
    try: return sql_gen.Col(value(select(db, tables, cols, conds,
1125
        order_by=order_by, limit=1, log_level=4)), table)
1126
    except StopIteration: raise DoesNotExistException('primary key', '')
1127

    
1128
def pkey_name(db, table, recover=None):
1129
    '''If no pkey, returns the first column in the table.'''
1130
    return pkey_col(db, table, recover).name
1131

    
1132
def pkey_col(db, table, recover=None):
1133
    '''If no pkey, returns the first column in the table.'''
1134
    try: return table_pkey_col(db, table, recover)
1135
    except DoesNotExistException: return table_cols(db, table, recover)[0]
1136

    
1137
not_null_col = 'not_null_col'
1138

    
1139
def table_not_null_col(db, table, recover=None):
1140
    '''Name assumed to be the value of not_null_col. If not found, uses pkey.'''
1141
    if not_null_col in table_col_names(db, table, recover): return not_null_col
1142
    else: return pkey_name(db, table, recover)
1143

    
1144
def constraint_cond(db, constraint):
1145
    module = util.root_module(db.db)
1146
    if module == 'psycopg2':
1147
        table_str = sql_gen.Literal(constraint.table.to_str(db))
1148
        name_str = sql_gen.Literal(constraint.name)
1149
        return value(run_query(db, '''\
1150
SELECT consrc
1151
FROM pg_constraint
1152
WHERE
1153
conrelid = '''+table_str.to_str(db)+'''::regclass
1154
AND conname = '''+name_str.to_str(db)+'''
1155
'''
1156
            , cacheable=True, log_level=4))
1157
    else: raise NotImplementedError("Can't get constraint condition for "
1158
        +module+' database')
1159

    
1160
def index_exprs(db, index):
1161
    index = sql_gen.as_Table(index)
1162
    module = util.root_module(db.db)
1163
    if module == 'psycopg2':
1164
        qual_index = sql_gen.Literal(index.to_str(db))
1165
        return list(values(run_query(db, '''\
1166
SELECT pg_get_indexdef(indexrelid, generate_series(1, indnatts), true)
1167
FROM pg_index
1168
WHERE indexrelid = '''+qual_index.to_str(db)+'''::regclass
1169
'''
1170
            , cacheable=True, log_level=4)))
1171
    else: raise NotImplementedError()
1172

    
1173
def index_cols(db, index):
1174
    '''Can also use this for UNIQUE constraints, because a UNIQUE index is
1175
    automatically created. When you don't know whether something is a UNIQUE
1176
    constraint or a UNIQUE index, use this function.'''
1177
    return map(sql_gen.parse_expr_col, index_exprs(db, index))
1178

    
1179
def index_cond(db, index):
1180
    index = sql_gen.as_Table(index)
1181
    module = util.root_module(db.db)
1182
    if module == 'psycopg2':
1183
        qual_index = sql_gen.Literal(index.to_str(db))
1184
        return value(run_query(db, '''\
1185
SELECT pg_get_expr(indpred, indrelid, true)
1186
FROM pg_index
1187
WHERE indexrelid = '''+qual_index.to_str(db)+'''::regclass
1188
'''
1189
            , cacheable=True, log_level=4))
1190
    else: raise NotImplementedError()
1191

    
1192
def index_order_by(db, index):
1193
    return sql_gen.CustomCode(', '.join(index_exprs(db, index)))
1194

    
1195
def table_cluster_on(db, table, recover=None):
1196
    '''
1197
    @return The table's cluster index, or its pkey if none is set
1198
    '''
1199
    table_str = sql_gen.Literal(table.to_str(db))
1200
    try:
1201
        return sql_gen.Table(value(run_query(db, '''\
1202
SELECT relname
1203
FROM pg_index
1204
JOIN pg_class index ON index.oid = indexrelid
1205
WHERE
1206
indrelid = '''+table_str.to_str(db)+'''::regclass
1207
AND indisclustered
1208
'''
1209
            , recover, cacheable=True, log_level=4)), table.schema)
1210
    except StopIteration: return table_pkey_index(db, table, recover)
1211

    
1212
def table_order_by(db, table, recover=None):
1213
    if table.order_by == None:
1214
        try: table.order_by = index_order_by(db, table_cluster_on(db, table,
1215
            recover))
1216
        except DoesNotExistException: pass
1217
    return table.order_by
1218

    
1219
#### Functions
1220

    
1221
def function_exists(db, function):
1222
    qual_function = sql_gen.Literal(function.to_str(db))
1223
    try:
1224
        select(db, fields=[sql_gen.Cast('regproc', qual_function)],
1225
            recover=True, cacheable=True, log_level=4)
1226
    except DoesNotExistException: return False
1227
    except DuplicateException: return True # overloaded function
1228
    else: return True
1229

    
1230
def function_param0_type(db, function):
1231
    qual_function = sql_gen.Literal(function.to_str(db))
1232
    return value(run_query(db, '''\
1233
SELECT proargtypes[0]::regtype
1234
FROM pg_proc
1235
WHERE oid = '''+qual_function.to_str(db)+'''::regproc
1236
'''
1237
        , cacheable=True, log_level=4))
1238

    
1239
##### Structural changes
1240

    
1241
#### Columns
1242

    
1243
def add_col(db, table, col, comment=None, if_not_exists=False, **kw_args):
1244
    '''
1245
    @param col TypedCol Name may be versioned, so be sure to propagate any
1246
        renaming back to any source column for the TypedCol.
1247
    @param comment None|str SQL comment used to distinguish columns of the same
1248
        name from each other when they contain different data, to allow the
1249
        ADD COLUMN query to be cached. If not set, query will not be cached.
1250
    '''
1251
    assert isinstance(col, sql_gen.TypedCol)
1252
    
1253
    while True:
1254
        str_ = 'ALTER TABLE '+table.to_str(db)+' ADD COLUMN '+col.to_str(db)
1255
        if comment != None: str_ += ' '+sql_gen.esc_comment(comment)
1256
        
1257
        try:
1258
            run_query(db, str_, recover=True, cacheable=True, **kw_args)
1259
            break
1260
        except DuplicateException:
1261
            if if_not_exists: raise
1262
            col.name = next_version(col.name)
1263
            # try again with next version of name
1264

    
1265
def add_not_null(db, col):
1266
    table = col.table
1267
    col = sql_gen.to_name_only_col(col)
1268
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ALTER COLUMN '
1269
        +col.to_str(db)+' SET NOT NULL', cacheable=True, log_level=3)
1270

    
1271
def drop_not_null(db, col):
1272
    table = col.table
1273
    col = sql_gen.to_name_only_col(col)
1274
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ALTER COLUMN '
1275
        +col.to_str(db)+' DROP NOT NULL', cacheable=True, log_level=3)
1276

    
1277
row_num_col = '_row_num'
1278

    
1279
row_num_col_def = sql_gen.TypedCol('', 'serial', nullable=False,
1280
    constraints='PRIMARY KEY')
1281

    
1282
def add_row_num(db, table, name=row_num_col):
1283
    '''Adds a row number column to a table. Its definition is in
1284
    row_num_col_def. It will be the primary key.'''
1285
    col_def = copy.copy(row_num_col_def)
1286
    col_def.name = name
1287
    add_col(db, table, col_def, comment='', if_not_exists=True, log_level=3)
1288

    
1289
#### Indexes
1290

    
1291
def add_pkey(db, table, cols=None, recover=None):
1292
    '''Adds a primary key.
1293
    @param cols [sql_gen.Col,...] The columns in the primary key.
1294
        Defaults to the first column in the table.
1295
    @pre The table must not already have a primary key.
1296
    '''
1297
    table = sql_gen.as_Table(table)
1298
    if cols == None: cols = [pkey_name(db, table, recover)]
1299
    col_strs = [sql_gen.to_name_only_col(v).to_str(db) for v in cols]
1300
    
1301
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ADD PRIMARY KEY ('
1302
        +(', '.join(col_strs))+')', recover=True, cacheable=True, log_level=3,
1303
        log_ignore_excs=(DuplicateException,))
1304

    
1305
def add_index(db, exprs, table=None, unique=False, ensure_not_null_=True):
1306
    '''Adds an index on column(s) or expression(s) if it doesn't already exist.
1307
    Currently, only function calls and literal values are supported expressions.
1308
    @param ensure_not_null_ If set, translates NULL values to sentinel values.
1309
        This allows indexes to be used for comparisons where NULLs are equal.
1310
    '''
1311
    exprs = lists.mk_seq(exprs)
1312
    
1313
    # Parse exprs
1314
    old_exprs = exprs[:]
1315
    exprs = []
1316
    cols = []
1317
    for i, expr in enumerate(old_exprs):
1318
        expr = sql_gen.as_Col(expr, table)
1319
        
1320
        # Handle nullable columns
1321
        if ensure_not_null_:
1322
            try: expr = sql_gen.ensure_not_null(db, expr)
1323
            except KeyError: pass # unknown type, so just create plain index
1324
        
1325
        # Extract col
1326
        expr = copy.deepcopy(expr) # don't modify input!
1327
        col = expr
1328
        if isinstance(expr, sql_gen.FunctionCall): col = expr.args[0]
1329
        expr = sql_gen.cast_literal(expr)
1330
        if not isinstance(expr, (sql_gen.Expr, sql_gen.Col)):
1331
            expr = sql_gen.Expr(expr)
1332
            
1333
        
1334
        # Extract table
1335
        if table == None:
1336
            assert sql_gen.is_table_col(col)
1337
            table = col.table
1338
        
1339
        if isinstance(col, sql_gen.Col): col.table = None
1340
        
1341
        exprs.append(expr)
1342
        cols.append(col)
1343
    
1344
    table = sql_gen.as_Table(table)
1345
    
1346
    # Add index
1347
    str_ = 'CREATE'
1348
    if unique: str_ += ' UNIQUE'
1349
    str_ += ' INDEX ON '+table.to_str(db)+' ('+(
1350
        ', '.join((v.to_str(db) for v in exprs)))+')'
1351
    run_query(db, str_, recover=True, cacheable=True, log_level=3)
1352

    
1353
def add_pkey_or_index(db, table, cols=None, recover=None, warn=False):
1354
    try: add_pkey(db, table, cols, recover)
1355
    except DuplicateKeyException, e:
1356
        if warn: warnings.warn(UserWarning(exc.str_(e)))
1357
        add_index(db, pkey_col(db, table), table)
1358

    
1359
already_indexed = object() # tells add_indexes() the pkey has already been added
1360

    
1361
def add_indexes(db, table, has_pkey=True):
1362
    '''Adds an index on all columns in a table.
1363
    @param has_pkey bool|already_indexed Whether a pkey instead of a regular
1364
        index should be added on the first column.
1365
        * If already_indexed, the pkey is assumed to have already been added
1366
    '''
1367
    cols = table_col_names(db, table)
1368
    if has_pkey:
1369
        if has_pkey is not already_indexed: add_pkey(db, table)
1370
        cols = cols[1:]
1371
    for col in cols: add_index(db, col, table)
1372

    
1373
#### Tables
1374

    
1375
### Maintenance
1376

    
1377
def analyze(db, table):
1378
    table = sql_gen.as_Table(table)
1379
    run_query(db, 'ANALYZE '+table.to_str(db), log_level=3)
1380

    
1381
def autoanalyze(db, table):
1382
    if db.autoanalyze: analyze(db, table)
1383

    
1384
def vacuum(db, table):
1385
    table = sql_gen.as_Table(table)
1386
    db.with_autocommit(lambda: run_query(db, 'VACUUM ANALYZE '+table.to_str(db),
1387
        log_level=3))
1388

    
1389
### Lifecycle
1390

    
1391
def drop(db, type_, name):
1392
    name = sql_gen.as_Name(name)
1393
    run_query(db, 'DROP '+type_+' IF EXISTS '+name.to_str(db)+' CASCADE')
1394

    
1395
def drop_table(db, table): drop(db, 'TABLE', table)
1396

    
1397
def create_table(db, table, cols=[], has_pkey=True, col_indexes=True,
1398
    like=None):
1399
    '''Creates a table.
1400
    @param cols [sql_gen.TypedCol,...] The column names and types
1401
    @param has_pkey If set, the first column becomes the primary key.
1402
    @param col_indexes bool|[ref]
1403
        * If True, indexes will be added on all non-pkey columns.
1404
        * If a list reference, [0] will be set to a function to do this.
1405
          This can be used to delay index creation until the table is populated.
1406
    '''
1407
    table = sql_gen.as_Table(table)
1408
    
1409
    if like != None:
1410
        cols = [sql_gen.CustomCode('LIKE '+like.to_str(db)+' INCLUDING ALL')
1411
            ]+cols
1412
        table.order_by = like.order_by
1413
    if has_pkey:
1414
        cols[0] = pkey = copy.copy(cols[0]) # don't modify input!
1415
        pkey.constraints = 'PRIMARY KEY'
1416
    
1417
    temp = table.is_temp and not db.debug_temp
1418
        # temp tables permanent in debug_temp mode
1419
    
1420
    # Create table
1421
    def create():
1422
        str_ = 'CREATE'
1423
        if temp: str_ += ' TEMP'
1424
        str_ += ' TABLE '+table.to_str(db)+' (\n'
1425
        str_ += '\n, '.join(c.to_str(db) for c in cols)
1426
        str_ += '\n);'
1427
        
1428
        run_query(db, str_, recover=True, cacheable=True, log_level=2,
1429
            log_ignore_excs=(DuplicateException,))
1430
    if table.is_temp:
1431
        while True:
1432
            try:
1433
                create()
1434
                break
1435
            except DuplicateException:
1436
                table.name = next_version(table.name)
1437
                # try again with next version of name
1438
    else: create()
1439
    
1440
    # Add indexes
1441
    if has_pkey: has_pkey = already_indexed
1442
    def add_indexes_(): add_indexes(db, table, has_pkey)
1443
    if isinstance(col_indexes, list): col_indexes[0] = add_indexes_ # defer
1444
    elif col_indexes: add_indexes_() # add now
1445

    
1446
def copy_table_struct(db, src, dest):
1447
    '''Creates a structure-only copy of a table. (Does not copy data.)'''
1448
    create_table(db, dest, has_pkey=False, col_indexes=False, like=src)
1449

    
1450
def copy_table(db, src, dest):
1451
    '''Creates a copy of a table, including data'''
1452
    copy_table_struct(db, src, dest)
1453
    insert_select(db, dest, None, mk_select(db, src))
1454

    
1455
### Data
1456

    
1457
def truncate(db, table, schema='public', **kw_args):
1458
    '''For params, see run_query()'''
1459
    table = sql_gen.as_Table(table, schema)
1460
    return run_query(db, 'TRUNCATE '+table.to_str(db)+' CASCADE', **kw_args)
1461

    
1462
def empty_temp(db, tables):
1463
    tables = lists.mk_seq(tables)
1464
    for table in tables: truncate(db, table, log_level=3)
1465

    
1466
def empty_db(db, schema='public', **kw_args):
1467
    '''For kw_args, see tables()'''
1468
    for table in tables(db, schema, **kw_args): truncate(db, table, schema)
1469

    
1470
def distinct_table(db, table, distinct_on):
1471
    '''Creates a copy of a temp table which is distinct on the given columns.
1472
    The old and new tables will both get an index on these columns, to
1473
    facilitate merge joins.
1474
    @param distinct_on If empty, creates a table with one row. This is useful if
1475
        your distinct_on columns are all literal values.
1476
    @return The new table.
1477
    '''
1478
    new_table = sql_gen.suffixed_table(table, '_distinct')
1479
    distinct_on = filter(sql_gen.is_table_col, distinct_on)
1480
    
1481
    copy_table_struct(db, table, new_table)
1482
    
1483
    limit = None
1484
    if distinct_on == []: limit = 1 # one sample row
1485
    else:
1486
        add_index(db, distinct_on, new_table, unique=True)
1487
        add_index(db, distinct_on, table) # for join optimization
1488
    
1489
    insert_select(db, new_table, None, mk_select(db, table, order_by=None,
1490
        limit=limit), ignore=True)
1491
    analyze(db, new_table)
1492
    
1493
    return new_table
(28-28/42)