Project

General

Profile

1
# Database access
2

    
3
import copy
4
import re
5
import time
6
import warnings
7

    
8
import exc
9
import dicts
10
import iters
11
import lists
12
import profiling
13
from Proxy import Proxy
14
import rand
15
import sql_gen
16
import strings
17
import util
18

    
19
##### Exceptions
20

    
21
def get_cur_query(cur, input_query=None):
22
    raw_query = None
23
    if hasattr(cur, 'query'): raw_query = cur.query
24
    elif hasattr(cur, '_last_executed'): raw_query = cur._last_executed
25
    
26
    if raw_query != None: return raw_query
27
    else: return '[input] '+strings.ustr(input_query)
28

    
29
def _add_cursor_info(e, *args, **kw_args):
30
    '''For params, see get_cur_query()'''
31
    exc.add_msg(e, 'query: '+strings.ustr(get_cur_query(*args, **kw_args)))
32

    
33
class DbException(exc.ExceptionWithCause):
34
    def __init__(self, msg, cause=None, cur=None):
35
        exc.ExceptionWithCause.__init__(self, msg, cause, cause_newline=True)
36
        if cur != None: _add_cursor_info(self, cur)
37

    
38
class ExceptionWithName(DbException):
39
    def __init__(self, name, cause=None):
40
        DbException.__init__(self, 'for name: '
41
            +strings.as_tt(strings.ustr(name)), cause)
42
        self.name = name
43

    
44
class ExceptionWithValue(DbException):
45
    def __init__(self, value, cause=None):
46
        DbException.__init__(self, 'for value: '
47
            +strings.as_tt(strings.urepr(value)), cause)
48
        self.value = value
49

    
50
class ExceptionWithNameType(DbException):
51
    def __init__(self, type_, name, cause=None):
52
        DbException.__init__(self, 'for type: '+strings.as_tt(strings.ustr(
53
            type_))+'; name: '+strings.as_tt(name), cause)
54
        self.type = type_
55
        self.name = name
56

    
57
class ConstraintException(DbException):
58
    def __init__(self, name, cond, cols, cause=None):
59
        msg = 'Violated '+strings.as_tt(name)+' constraint'
60
        if cond != None: msg += ' with condition '+strings.as_tt(cond)
61
        if cols != []: msg += ' on columns: '+strings.as_tt(', '.join(cols))
62
        DbException.__init__(self, msg, cause)
63
        self.name = name
64
        self.cond = cond
65
        self.cols = cols
66

    
67
class MissingCastException(DbException):
68
    def __init__(self, type_, col=None, cause=None):
69
        msg = 'Missing cast to type '+strings.as_tt(type_)
70
        if col != None: msg += ' on column: '+strings.as_tt(col)
71
        DbException.__init__(self, msg, cause)
72
        self.type = type_
73
        self.col = col
74

    
75
class EncodingException(ExceptionWithName): pass
76

    
77
class DuplicateKeyException(ConstraintException): pass
78

    
79
class NullValueException(ConstraintException): pass
80

    
81
class CheckException(ConstraintException): pass
82

    
83
class InvalidValueException(ExceptionWithValue): pass
84

    
85
class DuplicateException(ExceptionWithNameType): pass
86

    
87
class DoesNotExistException(ExceptionWithNameType): pass
88

    
89
class EmptyRowException(DbException): pass
90

    
91
##### Warnings
92

    
93
class DbWarning(UserWarning): pass
94

    
95
##### Result retrieval
96

    
97
def col_names(cur): return (col[0] for col in cur.description)
98

    
99
def rows(cur): return iter(lambda: cur.fetchone(), None)
100

    
101
def consume_rows(cur):
102
    '''Used to fetch all rows so result will be cached'''
103
    iters.consume_iter(rows(cur))
104

    
105
def next_row(cur): return rows(cur).next()
106

    
107
def row(cur):
108
    row_ = next_row(cur)
109
    consume_rows(cur)
110
    return row_
111

    
112
def next_value(cur): return next_row(cur)[0]
113

    
114
def value(cur): return row(cur)[0]
115

    
116
def values(cur): return iters.func_iter(lambda: next_value(cur))
117

    
118
def value_or_none(cur):
119
    try: return value(cur)
120
    except StopIteration: return None
121

    
122
##### Escaping
123

    
124
def esc_name_by_module(module, name):
125
    if module == 'psycopg2' or module == None: quote = '"'
126
    elif module == 'MySQLdb': quote = '`'
127
    else: raise NotImplementedError("Can't escape name for "+module+' database')
128
    return sql_gen.esc_name(name, quote)
129

    
130
def esc_name_by_engine(engine, name, **kw_args):
131
    return esc_name_by_module(db_engines[engine][0], name, **kw_args)
132

    
133
def esc_name(db, name, **kw_args):
134
    return esc_name_by_module(util.root_module(db.db), name, **kw_args)
135

    
136
def qual_name(db, schema, table):
137
    def esc_name_(name): return esc_name(db, name)
138
    table = esc_name_(table)
139
    if schema != None: return esc_name_(schema)+'.'+table
140
    else: return table
141

    
142
##### Database connections
143

    
144
db_config_names = ['engine', 'host', 'user', 'password', 'database', 'schemas']
145

    
146
db_engines = {
147
    'MySQL': ('MySQLdb', {'password': 'passwd', 'database': 'db'}),
148
    'PostgreSQL': ('psycopg2', {}),
149
}
150

    
151
DatabaseErrors_set = set([DbException])
152
DatabaseErrors = tuple(DatabaseErrors_set)
153

    
154
def _add_module(module):
155
    DatabaseErrors_set.add(module.DatabaseError)
156
    global DatabaseErrors
157
    DatabaseErrors = tuple(DatabaseErrors_set)
158

    
159
def db_config_str(db_config):
160
    return db_config['engine']+' database '+db_config['database']
161

    
162
log_debug_none = lambda msg, level=2: None
163

    
164
class DbConn:
165
    def __init__(self, db_config, autocommit=True, caching=True,
166
        log_debug=log_debug_none, debug_temp=False, src=None):
167
        '''
168
        @param debug_temp Whether temporary objects should instead be permanent.
169
            This assists in debugging the internal objects used by the program.
170
        @param src In autocommit mode, will be included in a comment in every
171
            query, to help identify the data source in pg_stat_activity.
172
        '''
173
        self.db_config = db_config
174
        self.autocommit = autocommit
175
        self.caching = caching
176
        self.log_debug = log_debug
177
        self.debug = log_debug != log_debug_none
178
        self.debug_temp = debug_temp
179
        self.src = src
180
        self.autoanalyze = False
181
        self.autoexplain = False
182
        self.profile_row_ct = None
183
        
184
        self._savepoint = 0
185
        self._reset()
186
    
187
    def __getattr__(self, name):
188
        if name == '__dict__': raise Exception('getting __dict__')
189
        if name == 'db': return self._db()
190
        else: raise AttributeError()
191
    
192
    def __getstate__(self):
193
        state = copy.copy(self.__dict__) # shallow copy
194
        state['log_debug'] = None # don't pickle the debug callback
195
        state['_DbConn__db'] = None # don't pickle the connection
196
        return state
197
    
198
    def clear_cache(self): self.query_results = {}
199
    
200
    def _reset(self):
201
        self.clear_cache()
202
        assert self._savepoint == 0
203
        self._notices_seen = set()
204
        self.__db = None
205
    
206
    def connected(self): return self.__db != None
207
    
208
    def close(self):
209
        if not self.connected(): return
210
        
211
        # Record that the automatic transaction is now closed
212
        self._savepoint -= 1
213
        
214
        self.db.close()
215
        self._reset()
216
    
217
    def reconnect(self):
218
        # Do not do this in test mode as it would roll back everything
219
        if self.autocommit: self.close()
220
        # Connection will be reopened automatically on first query
221
    
222
    def _db(self):
223
        if self.__db == None:
224
            # Process db_config
225
            db_config = self.db_config.copy() # don't modify input!
226
            schemas = db_config.pop('schemas', None)
227
            module_name, mappings = db_engines[db_config.pop('engine')]
228
            module = __import__(module_name)
229
            _add_module(module)
230
            for orig, new in mappings.iteritems():
231
                try: util.rename_key(db_config, orig, new)
232
                except KeyError: pass
233
            
234
            # Connect
235
            self.__db = module.connect(**db_config)
236
            
237
            # Record that a transaction is already open
238
            self._savepoint += 1
239
            
240
            # Configure connection
241
            if hasattr(self.db, 'set_isolation_level'):
242
                import psycopg2.extensions
243
                self.db.set_isolation_level(
244
                    psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
245
            if schemas != None:
246
                search_path = [self.esc_name(s) for s in schemas.split(',')]
247
                search_path.append(value(run_query(self, 'SHOW search_path',
248
                    log_level=4)))
249
                run_query(self, 'SET search_path TO '+(','.join(search_path)),
250
                    log_level=3)
251
        
252
        return self.__db
253
    
254
    class DbCursor(Proxy):
255
        def __init__(self, outer):
256
            Proxy.__init__(self, outer.db.cursor())
257
            self.outer = outer
258
            self.query_results = outer.query_results
259
            self.query_lookup = None
260
            self.result = []
261
        
262
        def execute(self, query):
263
            self._is_insert = query.startswith('INSERT')
264
            self.query_lookup = query
265
            try:
266
                try: cur = self.inner.execute(query)
267
                finally: self.query = get_cur_query(self.inner, query)
268
            except Exception, e:
269
                self.result = e # cache the exception as the result
270
                self._cache_result()
271
                raise
272
            
273
            # Always cache certain queries
274
            query = sql_gen.lstrip(query)
275
            if query.startswith('CREATE') or query.startswith('ALTER'):
276
                # structural changes
277
                # Rest of query must be unique in the face of name collisions,
278
                # so don't cache ADD COLUMN unless it has distinguishing comment
279
                if query.find('ADD COLUMN') < 0 or query.endswith('*/'):
280
                    self._cache_result()
281
            elif self.rowcount == 0 and query.startswith('SELECT'): # empty
282
                consume_rows(self) # fetch all rows so result will be cached
283
            
284
            return cur
285
        
286
        def fetchone(self):
287
            row = self.inner.fetchone()
288
            if row != None: self.result.append(row)
289
            # otherwise, fetched all rows
290
            else: self._cache_result()
291
            return row
292
        
293
        def _cache_result(self):
294
            # For inserts that return a result set, don't cache result set since
295
            # inserts are not idempotent. Other non-SELECT queries don't have
296
            # their result set read, so only exceptions will be cached (an
297
            # invalid query will always be invalid).
298
            if self.query_results != None and (not self._is_insert
299
                or isinstance(self.result, Exception)):
300
                
301
                assert self.query_lookup != None
302
                self.query_results[self.query_lookup] = self.CacheCursor(
303
                    util.dict_subset(dicts.AttrsDictView(self),
304
                    ['query', 'result', 'rowcount', 'description']))
305
        
306
        class CacheCursor:
307
            def __init__(self, cached_result): self.__dict__ = cached_result
308
            
309
            def execute(self, *args, **kw_args):
310
                if isinstance(self.result, Exception): raise self.result
311
                # otherwise, result is a rows list
312
                self.iter = iter(self.result)
313
            
314
            def fetchone(self):
315
                try: return self.iter.next()
316
                except StopIteration: return None
317
    
318
    def esc_value(self, value):
319
        try: str_ = self.mogrify('%s', [value])
320
        except NotImplementedError, e:
321
            module = util.root_module(self.db)
322
            if module == 'MySQLdb':
323
                import _mysql
324
                str_ = _mysql.escape_string(value)
325
            else: raise e
326
        return strings.to_unicode(str_)
327
    
328
    def esc_name(self, name): return esc_name(self, name) # calls global func
329
    
330
    def std_code(self, str_):
331
        '''Standardizes SQL code.
332
        * Ensures that string literals are prefixed by `E`
333
        '''
334
        if str_.startswith("'"): str_ = 'E'+str_
335
        return str_
336
    
337
    def can_mogrify(self):
338
        module = util.root_module(self.db)
339
        return module == 'psycopg2'
340
    
341
    def mogrify(self, query, params=None):
342
        if self.can_mogrify(): return self.db.cursor().mogrify(query, params)
343
        else: raise NotImplementedError("Can't mogrify query")
344
    
345
    def set_encoding(self, encoding):
346
        encoding_str = sql_gen.Literal(encoding)
347
        run_query(self, 'SET NAMES '+encoding_str.to_str(self))
348
    
349
    def print_notices(self):
350
        if hasattr(self.db, 'notices'):
351
            for msg in self.db.notices:
352
                if msg not in self._notices_seen:
353
                    self._notices_seen.add(msg)
354
                    self.log_debug(msg, level=2)
355
    
356
    def run_query(self, query, cacheable=False, log_level=2,
357
        debug_msg_ref=None):
358
        '''
359
        @param log_ignore_excs The log_level will be increased by 2 if the query
360
            throws one of these exceptions.
361
        @param debug_msg_ref If specified, the log message will be returned in
362
            this instead of being output. This allows you to filter log messages
363
            depending on the result of the query.
364
        '''
365
        assert query != None
366
        
367
        if self.autocommit and self.src != None:
368
            query = sql_gen.esc_comment(self.src)+'\t'+query
369
        
370
        if not self.caching: cacheable = False
371
        used_cache = False
372
        
373
        if self.debug:
374
            profiler = profiling.ItersProfiler(start_now=True, iter_text='row')
375
        try:
376
            # Get cursor
377
            if cacheable:
378
                try: cur = self.query_results[query]
379
                except KeyError: cur = self.DbCursor(self)
380
                else: used_cache = True
381
            else: cur = self.db.cursor()
382
            
383
            # Run query
384
            try: cur.execute(query)
385
            except Exception, e:
386
                _add_cursor_info(e, self, query)
387
                raise
388
            else: self.do_autocommit()
389
        finally:
390
            if self.debug:
391
                profiler.stop(self.profile_row_ct)
392
                
393
                ## Log or return query
394
                
395
                query = strings.ustr(get_cur_query(cur, query))
396
                # Put the src comment on a separate line in the log file
397
                query = query.replace('\t', '\n', 1)
398
                
399
                msg = 'DB query: '
400
                
401
                if used_cache: msg += 'cache hit'
402
                elif cacheable: msg += 'cache miss'
403
                else: msg += 'non-cacheable'
404
                
405
                msg += ':\n'+profiler.msg()+'\n'+strings.as_code(query, 'SQL')
406
                
407
                if debug_msg_ref != None: debug_msg_ref[0] = msg
408
                else: self.log_debug(msg, log_level)
409
                
410
                self.print_notices()
411
        
412
        return cur
413
    
414
    def is_cached(self, query): return query in self.query_results
415
    
416
    def with_autocommit(self, func):
417
        import psycopg2.extensions
418
        
419
        prev_isolation_level = self.db.isolation_level
420
        self.db.set_isolation_level(
421
            psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
422
        try: return func()
423
        finally: self.db.set_isolation_level(prev_isolation_level)
424
    
425
    def with_savepoint(self, func):
426
        top = self._savepoint == 0
427
        savepoint = 'level_'+str(self._savepoint)
428
        
429
        if self.debug:
430
            self.log_debug('Begin transaction', level=4)
431
            profiler = profiling.ItersProfiler(start_now=True, iter_text='row')
432
        
433
        # Must happen before running queries so they don't get autocommitted
434
        self._savepoint += 1
435
        
436
        if top: query = 'START TRANSACTION ISOLATION LEVEL READ COMMITTED'
437
        else: query = 'SAVEPOINT '+savepoint
438
        self.run_query(query, log_level=4)
439
        try:
440
            return func()
441
            if top: self.run_query('COMMIT', log_level=4)
442
        except:
443
            if top: query = 'ROLLBACK'
444
            else: query = 'ROLLBACK TO SAVEPOINT '+savepoint
445
            self.run_query(query, log_level=4)
446
            
447
            raise
448
        finally:
449
            # Always release savepoint, because after ROLLBACK TO SAVEPOINT,
450
            # "The savepoint remains valid and can be rolled back to again"
451
            # (http://www.postgresql.org/docs/8.3/static/sql-rollback-to.html).
452
            if not top:
453
                self.run_query('RELEASE SAVEPOINT '+savepoint, log_level=4)
454
            
455
            self._savepoint -= 1
456
            assert self._savepoint >= 0
457
            
458
            if self.debug:
459
                profiler.stop(self.profile_row_ct)
460
                self.log_debug('End transaction\n'+profiler.msg(), level=4)
461
            
462
            self.do_autocommit() # OK to do this after ROLLBACK TO SAVEPOINT
463
    
464
    def do_autocommit(self):
465
        '''Autocommits if outside savepoint'''
466
        assert self._savepoint >= 1
467
        if self.autocommit and self._savepoint == 1:
468
            self.log_debug('Autocommitting', level=4)
469
            self.db.commit()
470
    
471
    def col_info(self, col, cacheable=True):
472
        table = sql_gen.Table('columns', 'information_schema')
473
        cols = [sql_gen.Col('data_type'), sql_gen.Col('udt_name'),
474
            'column_default', sql_gen.Cast('boolean',
475
            sql_gen.Col('is_nullable'))]
476
        
477
        conds = [('table_name', col.table.name),
478
            ('column_name', strings.ustr(col.name))]
479
        schema = col.table.schema
480
        if schema != None: conds.append(('table_schema', schema))
481
        
482
        cur = select(self, table, cols, conds, order_by='table_schema', limit=1,
483
            cacheable=cacheable, log_level=4) # TODO: order by search_path order
484
        try: type_, extra_type, default, nullable = row(cur)
485
        except StopIteration: raise sql_gen.NoUnderlyingTableException(col)
486
        default = sql_gen.as_Code(default, self)
487
        if type_ == 'USER-DEFINED': type_ = extra_type
488
        elif type_ == 'ARRAY':
489
            type_ = sql_gen.ArrayType(strings.remove_prefix('_', extra_type,
490
                require=True))
491
        
492
        return sql_gen.TypedCol(col.name, type_, default, nullable)
493
    
494
    def TempFunction(self, name):
495
        if self.debug_temp: schema = None
496
        else: schema = 'pg_temp'
497
        return sql_gen.Function(name, schema)
498

    
499
connect = DbConn
500

    
501
##### Recoverable querying
502

    
503
def parse_exception(db, e, recover=False):
504
    msg = strings.ustr(e.args[0])
505
    msg = re.sub(r'^(?:PL/Python: )?ValueError: ', r'', msg)
506
    
507
    match = re.match(r'^invalid byte sequence for encoding "(.+?)":', msg)
508
    if match:
509
        encoding, = match.groups()
510
        raise EncodingException(encoding, e)
511
    
512
    match = re.match(r'^duplicate key value violates unique constraint "(.+?)"',
513
        msg)
514
    if match:
515
        constraint, = match.groups()
516
        cols = []
517
        cond = None
518
        if recover: # need auto-rollback to run index_cols()
519
            try:
520
                cols = index_cols(db, constraint)
521
                cond = index_cond(db, constraint)
522
            except NotImplementedError: pass
523
        raise DuplicateKeyException(constraint, cond, cols, e)
524
    
525
    match = re.match(r'^null value in column "(.+?)" violates not-null'
526
        r' constraint', msg)
527
    if match:
528
        col, = match.groups()
529
        raise NullValueException('NOT NULL', None, [col], e)
530
    
531
    match = re.match(r'^new row for relation "(.+?)" violates check '
532
        r'constraint "(.+?)"', msg)
533
    if match:
534
        table, constraint = match.groups()
535
        constraint = sql_gen.Col(constraint, table)
536
        cond = None
537
        if recover: # need auto-rollback to run constraint_cond()
538
            try: cond = constraint_cond(db, constraint)
539
            except NotImplementedError: pass
540
        raise CheckException(constraint.to_str(db), cond, [], e)
541
    
542
    match = re.match(r'^(?:invalid input (?:syntax|value)\b.*?'
543
        r'|.+? out of range): "(.+?)"', msg)
544
    if match:
545
        value, = match.groups()
546
        raise InvalidValueException(strings.to_unicode(value), e)
547
    
548
    match = re.match(r'^column "(.+?)" is of type (.+?) but expression '
549
        r'is of type', msg)
550
    if match:
551
        col, type_ = match.groups()
552
        raise MissingCastException(type_, col, e)
553
    
554
    match = re.match(r'^could not determine polymorphic type because '
555
        r'input has type "unknown"', msg)
556
    if match: raise MissingCastException('text', None, e)
557
    
558
    match = re.match(r'^.+? types .+? and .+? cannot be matched', msg)
559
    if match: raise MissingCastException('text', None, e)
560
    
561
    typed_name_re = r'^(\S+) "?(.+?)"?(?: of relation ".+?")?'
562
    
563
    match = re.match(typed_name_re+r'.*? already exists', msg)
564
    if match:
565
        type_, name = match.groups()
566
        raise DuplicateException(type_, name, e)
567
    
568
    match = re.match(r'more than one (\S+) named ""(.+?)""', msg)
569
    if match:
570
        type_, name = match.groups()
571
        raise DuplicateException(type_, name, e)
572
    
573
    match = re.match(typed_name_re+r' does not exist', msg)
574
    if match:
575
        type_, name = match.groups()
576
        if type_ == 'function':
577
            match = re.match(r'^"?(.+?)"?\(.*\)$', name)
578
            if match:
579
                function_name, = match.groups()
580
                if msg.split('\n')[1].find(function_name) >= 0: # also on line 2
581
                    # not found only because of a missing cast
582
                    raise MissingCastException('text', function_name, e)
583
        raise DoesNotExistException(type_, name, e)
584
    
585
    raise # no specific exception raised
586

    
587
def with_savepoint(db, func): return db.with_savepoint(func)
588

    
589
def run_query(db, query, recover=None, cacheable=False, log_level=2,
590
    log_ignore_excs=None, **kw_args):
591
    '''For params, see DbConn.run_query()'''
592
    if recover == None: recover = False
593
    if log_ignore_excs == None: log_ignore_excs = ()
594
    log_ignore_excs = tuple(log_ignore_excs)
595
    debug_msg_ref = [None]
596
    
597
    query = with_explain_comment(db, query)
598
    
599
    try:
600
        try:
601
            def run(): return db.run_query(query, cacheable, log_level,
602
                debug_msg_ref, **kw_args)
603
            if recover and not db.is_cached(query):
604
                return with_savepoint(db, run)
605
            else: return run() # don't need savepoint if cached
606
        except Exception, e: parse_exception(db, e, recover)
607
    except log_ignore_excs:
608
        log_level += 2
609
        raise
610
    finally:
611
        if debug_msg_ref[0] != None: db.log_debug(debug_msg_ref[0], log_level)
612

    
613
##### Basic queries
614

    
615
def is_explainable(query):
616
    # See <http://www.postgresql.org/docs/8.3/static/sql-explain.html#AEN57749>
617
    return re.match(r'^(?:SELECT|INSERT|UPDATE|DELETE|VALUES|EXECUTE|DECLARE)\b'
618
        , query)
619

    
620
def explain(db, query, **kw_args):
621
    '''
622
    For params, see run_query().
623
    '''
624
    kw_args.setdefault('log_level', 4)
625
    
626
    return strings.ustr(strings.join_lines(values(run_query(db,
627
        'EXPLAIN '+query, recover=True, cacheable=True, **kw_args))))
628
        # not a higher log_level because it's useful to see what query is being
629
        # run before it's executed, which EXPLAIN effectively provides
630

    
631
def has_comment(query): return query.endswith('*/')
632

    
633
def with_explain_comment(db, query, **kw_args):
634
    if db.autoexplain and not has_comment(query) and is_explainable(query):
635
        query += '\n'+sql_gen.esc_comment(' EXPLAIN:\n'
636
            +explain(db, query, **kw_args))
637
    return query
638

    
639
def next_version(name):
640
    version = 1 # first existing name was version 0
641
    match = re.match(r'^(.*)#(\d+)$', name)
642
    if match:
643
        name, version = match.groups()
644
        version = int(version)+1
645
    return sql_gen.concat(name, '#'+str(version))
646

    
647
def lock_table(db, table, mode):
648
    table = sql_gen.as_Table(table)
649
    run_query(db, 'LOCK TABLE '+table.to_str(db)+' IN '+mode+' MODE')
650

    
651
def run_query_into(db, query, into=None, add_pkey_=False, **kw_args):
652
    '''Outputs a query to a temp table.
653
    For params, see run_query().
654
    '''
655
    if into == None: return run_query(db, query, **kw_args)
656
    
657
    assert isinstance(into, sql_gen.Table)
658
    
659
    into.is_temp = True
660
    # "temporary tables cannot specify a schema name", so remove schema
661
    into.schema = None
662
    
663
    kw_args['recover'] = True
664
    kw_args.setdefault('log_ignore_excs', (DuplicateException,))
665
    
666
    temp = not db.debug_temp # tables are permanent in debug_temp mode
667
    
668
    # Create table
669
    while True:
670
        create_query = 'CREATE'
671
        if temp: create_query += ' TEMP'
672
        create_query += ' TABLE '+into.to_str(db)+' AS\n'+query
673
        
674
        try:
675
            cur = run_query(db, create_query, **kw_args)
676
                # CREATE TABLE AS sets rowcount to # rows in query
677
            break
678
        except DuplicateException, e:
679
            into.name = next_version(into.name)
680
            # try again with next version of name
681
    
682
    if add_pkey_: add_pkey(db, into)
683
    
684
    # According to the PostgreSQL doc, "The autovacuum daemon cannot access and
685
    # therefore cannot vacuum or analyze temporary tables. [...] if a temporary
686
    # table is going to be used in complex queries, it is wise to run ANALYZE on
687
    # the temporary table after it is populated."
688
    # (http://www.postgresql.org/docs/9.1/static/sql-createtable.html)
689
    # If into is not a temp table, ANALYZE is useful but not required.
690
    analyze(db, into)
691
    
692
    return cur
693

    
694
order_by_pkey = object() # tells mk_select() to order by the pkey
695

    
696
distinct_on_all = object() # tells mk_select() to SELECT DISTINCT ON all columns
697

    
698
def mk_select(db, tables=None, fields=None, conds=None, distinct_on=[],
699
    limit=None, start=None, order_by=order_by_pkey, default_table=None,
700
    explain=True):
701
    '''
702
    @param tables The single table to select from, or a list of tables to join
703
        together, with tables after the first being sql_gen.Join objects
704
    @param fields Use None to select all fields in the table
705
    @param conds WHERE conditions: [(compare_left_side, compare_right_side),...]
706
        * container can be any iterable type
707
        * compare_left_side: sql_gen.Code|str (for col name)
708
        * compare_right_side: sql_gen.ValueCond|literal value
709
    @param distinct_on The columns to SELECT DISTINCT ON, or distinct_on_all to
710
        use all columns
711
    @return query
712
    '''
713
    # Parse tables param
714
    tables = lists.mk_seq(tables)
715
    tables = list(tables) # don't modify input! (list() copies input)
716
    table0 = sql_gen.as_Table(tables.pop(0)) # first table is separate
717
    
718
    # Parse other params
719
    if conds == None: conds = []
720
    elif dicts.is_dict(conds): conds = conds.items()
721
    conds = list(conds) # don't modify input! (list() copies input)
722
    assert limit == None or isinstance(limit, (int, long))
723
    assert start == None or isinstance(start, (int, long))
724
    if limit == 0: order_by = None
725
    if order_by is order_by_pkey:
726
        if lists.is_seq(distinct_on) and distinct_on: order_by = distinct_on[0]
727
        elif table0 != None: order_by = table_order_by(db, table0, recover=True)
728
        else: order_by = None
729
    
730
    query = 'SELECT'
731
    
732
    def parse_col(col): return sql_gen.as_Col(col, default_table).to_str(db)
733
    
734
    # DISTINCT ON columns
735
    if distinct_on != []:
736
        query += '\nDISTINCT'
737
        if distinct_on is not distinct_on_all:
738
            query += ' ON ('+(', '.join(map(parse_col, distinct_on)))+')'
739
    
740
    # Columns
741
    if query.find('\n') >= 0: whitespace = '\n'
742
    else: whitespace = ' '
743
    if fields == None: query += whitespace+'*'
744
    else:
745
        assert fields != []
746
        if len(fields) > 1: whitespace = '\n'
747
        query += whitespace+('\n, '.join(map(parse_col, fields)))
748
    
749
    # Main table
750
    if query.find('\n') >= 0 or len(tables) > 0: whitespace = '\n'
751
    else: whitespace = ' '
752
    if table0 != None: query += whitespace+'FROM '+table0.to_str(db)
753
    
754
    # Add joins
755
    left_table = table0
756
    for join_ in tables:
757
        table = join_.table
758
        
759
        # Parse special values
760
        if join_.type_ is sql_gen.filter_out: # filter no match
761
            conds.append((sql_gen.Col(table_not_null_col(db, table), table),
762
                sql_gen.CompareCond(None, '~=')))
763
        
764
        query += '\n'+join_.to_str(db, left_table)
765
        
766
        left_table = table
767
    
768
    missing = True
769
    if conds != []:
770
        if len(conds) == 1: whitespace = ' '
771
        else: whitespace = '\n'
772
        query += '\n'+sql_gen.combine_conds([sql_gen.ColValueCond(l, r)
773
            .to_str(db) for l, r in conds], 'WHERE')
774
    if order_by != None:
775
        query += '\nORDER BY '+sql_gen.as_Col(order_by).to_str(db)
776
    if limit != None: query += '\nLIMIT '+str(limit)
777
    if start != None:
778
        if start != 0: query += '\nOFFSET '+str(start)
779
    
780
    if explain: query = with_explain_comment(db, query)
781
    
782
    return query
783

    
784
def select(db, *args, **kw_args):
785
    '''For params, see mk_select() and run_query()'''
786
    recover = kw_args.pop('recover', None)
787
    cacheable = kw_args.pop('cacheable', True)
788
    log_level = kw_args.pop('log_level', 2)
789
    
790
    return run_query(db, mk_select(db, *args, **kw_args), recover, cacheable,
791
        log_level=log_level)
792

    
793
def mk_insert_select(db, table, cols=None, select_query=None, returning=None,
794
    embeddable=False, ignore=False, src=None):
795
    '''
796
    @param returning str|None An inserted column (such as pkey) to return
797
    @param embeddable Whether the query should be embeddable as a nested SELECT.
798
        Warning: If you set this and cacheable=True when the query is run, the
799
        query will be fully cached, not just if it raises an exception.
800
    @param ignore Whether to ignore duplicate keys.
801
    @param src Will be included in the name of any created function, to help
802
        identify the data source in pg_stat_activity.
803
    '''
804
    table = sql_gen.remove_table_rename(sql_gen.as_Table(table))
805
    if cols == []: cols = None # no cols (all defaults) = unknown col names
806
    if cols != None: cols = [sql_gen.to_name_only_col(c, table) for c in cols]
807
    if select_query == None: select_query = 'DEFAULT VALUES'
808
    if returning != None: returning = sql_gen.as_Col(returning, table)
809
    
810
    first_line = 'INSERT INTO '+table.to_str(db)
811
    
812
    def mk_insert(select_query):
813
        query = first_line
814
        if cols != None:
815
            query += '\n('+(', '.join((c.to_str(db) for c in cols)))+')'
816
        query += '\n'+select_query
817
        
818
        if returning != None:
819
            returning_name_col = sql_gen.to_name_only_col(returning)
820
            query += '\nRETURNING '+returning_name_col.to_str(db)
821
        
822
        return query
823
    
824
    return_type = sql_gen.CustomCode('unknown')
825
    if returning != None: return_type = sql_gen.ColType(returning)
826
    
827
    if ignore:
828
        # Always return something to set the correct rowcount
829
        if returning == None: returning = sql_gen.NamedCol('NULL', None)
830
        
831
        embeddable = True # must use function
832
        
833
        if cols == None: row = [sql_gen.Col(sql_gen.all_cols, 'row')]
834
        else: row = [sql_gen.Col(c.name, 'row') for c in cols]
835
        
836
        query = sql_gen.RowExcIgnore(sql_gen.RowType(table), select_query,
837
            sql_gen.ReturnQuery(mk_insert(sql_gen.Values(row).to_str(db))),
838
            cols)
839
    else: query = mk_insert(select_query)
840
    
841
    if embeddable:
842
        # Create function
843
        function_name = sql_gen.clean_name(first_line)
844
        if src != None: function_name = src+': '+function_name
845
        while True:
846
            try:
847
                func = db.TempFunction(function_name)
848
                def_ = sql_gen.FunctionDef(func, sql_gen.SetOf(return_type),
849
                    query)
850
                
851
                run_query(db, def_.to_str(db), recover=True, cacheable=True,
852
                    log_ignore_excs=(DuplicateException,))
853
                break # this version was successful
854
            except DuplicateException, e:
855
                function_name = next_version(function_name)
856
                # try again with next version of name
857
        
858
        # Return query that uses function
859
        cols = None
860
        if returning != None: cols = [returning]
861
        func_table = sql_gen.NamedTable('f', sql_gen.FunctionCall(func), cols)
862
            # AS clause requires function alias
863
        return mk_select(db, func_table, order_by=None)
864
    
865
    return query
866

    
867
def insert_select(db, table, *args, **kw_args):
868
    '''For params, see mk_insert_select() and run_query_into()
869
    @param into sql_gen.Table with suggested name of temp table to put RETURNING
870
        values in
871
    '''
872
    returning = kw_args.get('returning', None)
873
    ignore = kw_args.get('ignore', False)
874
    
875
    into = kw_args.pop('into', None)
876
    if into != None: kw_args['embeddable'] = True
877
    recover = kw_args.pop('recover', None)
878
    if ignore: recover = True
879
    cacheable = kw_args.pop('cacheable', True)
880
    log_level = kw_args.pop('log_level', 2)
881
    
882
    rowcount_only = ignore and returning == None # keep NULL rows on server
883
    if rowcount_only: into = sql_gen.Table('rowcount')
884
    
885
    cur = run_query_into(db, mk_insert_select(db, table, *args, **kw_args),
886
        into, recover=recover, cacheable=cacheable, log_level=log_level)
887
    if rowcount_only: empty_temp(db, into)
888
    autoanalyze(db, table)
889
    return cur
890

    
891
default = sql_gen.default # tells insert() to use the default value for a column
892

    
893
def insert(db, table, row, *args, **kw_args):
894
    '''For params, see insert_select()'''
895
    ignore = kw_args.pop('ignore', False)
896
    if ignore: kw_args.setdefault('recover', True)
897
    
898
    if lists.is_seq(row): cols = None
899
    else:
900
        cols = row.keys()
901
        row = row.values()
902
    row = list(row) # ensure that "== []" works
903
    
904
    if row == []: query = None
905
    else: query = sql_gen.Values(row).to_str(db)
906
    
907
    try: return insert_select(db, table, cols, query, *args, **kw_args)
908
    except (DuplicateKeyException, NullValueException):
909
        if not ignore: raise
910
        return None
911

    
912
def mk_update(db, table, changes=None, cond=None, in_place=False,
913
    cacheable_=True):
914
    '''
915
    @param changes [(col, new_value),...]
916
        * container can be any iterable type
917
        * col: sql_gen.Code|str (for col name)
918
        * new_value: sql_gen.Code|literal value
919
    @param cond sql_gen.Code WHERE condition. e.g. use sql_gen.*Cond objects.
920
    @param in_place If set, locks the table and updates rows in place.
921
        This avoids creating dead rows in PostgreSQL.
922
        * cond must be None
923
    @param cacheable_ Whether column structure information used to generate the
924
        query can be cached
925
    @return str query
926
    '''
927
    table = sql_gen.as_Table(table)
928
    changes = [(sql_gen.to_name_only_col(c, table), sql_gen.as_Value(v))
929
        for c, v in changes]
930
    
931
    if in_place:
932
        assert cond == None
933
        
934
        def col_type(col):
935
            return sql_gen.canon_type(db.col_info(
936
                sql_gen.with_default_table(c, table), cacheable_).type)
937
        changes = [(c, v, col_type(c)) for c, v in changes]
938
        query = 'ALTER TABLE '+table.to_str(db)+'\n'
939
        query += ',\n'.join(('ALTER COLUMN '+c.to_str(db)+' TYPE '+t+'\nUSING '
940
            +v.to_str(db) for c, v, t in changes))
941
    else:
942
        query = 'UPDATE '+table.to_str(db)+'\nSET\n'
943
        query += ',\n'.join((c.to_str(db)+' = '+v.to_str(db)
944
            for c, v in changes))
945
        if cond != None: query += '\nWHERE\n'+cond.to_str(db)
946
    
947
    query = with_explain_comment(db, query)
948
    
949
    return query
950

    
951
def update(db, table, *args, **kw_args):
952
    '''For params, see mk_update() and run_query()'''
953
    recover = kw_args.pop('recover', None)
954
    cacheable = kw_args.pop('cacheable', False)
955
    log_level = kw_args.pop('log_level', 2)
956
    
957
    cur = run_query(db, mk_update(db, table, *args, **kw_args), recover,
958
        cacheable, log_level=log_level)
959
    autoanalyze(db, table)
960
    return cur
961

    
962
def mk_delete(db, table, cond=None):
963
    '''
964
    @param cond sql_gen.Code WHERE condition. e.g. use sql_gen.*Cond objects.
965
    @return str query
966
    '''
967
    query = 'DELETE FROM '+table.to_str(db)
968
    if cond != None: query += '\nWHERE '+cond.to_str(db)
969
    
970
    query = with_explain_comment(db, query)
971
    
972
    return query
973

    
974
def delete(db, table, *args, **kw_args):
975
    '''For params, see mk_delete() and run_query()'''
976
    recover = kw_args.pop('recover', None)
977
    cacheable = kw_args.pop('cacheable', True)
978
    log_level = kw_args.pop('log_level', 2)
979
    
980
    cur = run_query(db, mk_delete(db, table, *args, **kw_args), recover,
981
        cacheable, log_level=log_level)
982
    autoanalyze(db, table)
983
    return cur
984

    
985
def last_insert_id(db):
986
    module = util.root_module(db.db)
987
    if module == 'psycopg2': return value(run_query(db, 'SELECT lastval()'))
988
    elif module == 'MySQLdb': return db.insert_id()
989
    else: return None
990

    
991
def define_func(db, def_):
992
    func = def_.function
993
    while True:
994
        try:
995
            run_query(db, def_.to_str(db), recover=True, cacheable=True,
996
                log_ignore_excs=(DuplicateException,))
997
            break # successful
998
        except DuplicateException:
999
            func.name = next_version(func.name)
1000
            # try again with next version of name
1001

    
1002
def mk_flatten_mapping(db, into, cols, preserve=[], as_items=False):
1003
    '''Creates a mapping from original column names (which may have collisions)
1004
    to names that will be distinct among the columns' tables.
1005
    This is meant to be used for several tables that are being joined together.
1006
    @param cols The columns to combine. Duplicates will be removed.
1007
    @param into The table for the new columns.
1008
    @param preserve [sql_gen.Col...] Columns not to rename. Note that these
1009
        columns will be included in the mapping even if they are not in cols.
1010
        The tables of the provided Col objects will be changed to into, so make
1011
        copies of them if you want to keep the original tables.
1012
    @param as_items Whether to return a list of dict items instead of a dict
1013
    @return dict(orig_col=new_col, ...)
1014
        * orig_col: sql_gen.Col(orig_col_name, orig_table)
1015
        * new_col: sql_gen.Col(orig_col_name, into)
1016
        * All mappings use the into table so its name can easily be
1017
          changed for all columns at once
1018
    '''
1019
    cols = lists.uniqify(cols)
1020
    
1021
    items = []
1022
    for col in preserve:
1023
        orig_col = copy.copy(col)
1024
        col.table = into
1025
        items.append((orig_col, col))
1026
    preserve = set(preserve)
1027
    for col in cols:
1028
        if col not in preserve:
1029
            items.append((col, sql_gen.Col(strings.ustr(col), into, col.srcs)))
1030
    
1031
    if not as_items: items = dict(items)
1032
    return items
1033

    
1034
def flatten(db, into, joins, cols, limit=None, start=None, **kw_args):
1035
    '''For params, see mk_flatten_mapping()
1036
    @return See return value of mk_flatten_mapping()
1037
    '''
1038
    items = mk_flatten_mapping(db, into, cols, as_items=True, **kw_args)
1039
    cols = [sql_gen.NamedCol(new.name, old) for old, new in items]
1040
    run_query_into(db, mk_select(db, joins, cols, limit=limit, start=start),
1041
        into=into, add_pkey_=True)
1042
        # don't cache because the temp table will usually be truncated after use
1043
    return dict(items)
1044

    
1045
##### Database structure introspection
1046

    
1047
#### Tables
1048

    
1049
def tables(db, schema_like='public', table_like='%', exact=False,
1050
    cacheable=True):
1051
    if exact: compare = '='
1052
    else: compare = 'LIKE'
1053
    
1054
    module = util.root_module(db.db)
1055
    if module == 'psycopg2':
1056
        conds = [('schemaname', sql_gen.CompareCond(schema_like, compare)),
1057
            ('tablename', sql_gen.CompareCond(table_like, compare))]
1058
        return values(select(db, 'pg_tables', ['tablename'], conds,
1059
            order_by='tablename', cacheable=cacheable, log_level=4))
1060
    elif module == 'MySQLdb':
1061
        return values(run_query(db, 'SHOW TABLES LIKE '+db.esc_value(table_like)
1062
            , cacheable=True, log_level=4))
1063
    else: raise NotImplementedError("Can't list tables for "+module+' database')
1064

    
1065
def table_exists(db, table, cacheable=True):
1066
    table = sql_gen.as_Table(table)
1067
    return list(tables(db, table.schema, table.name, True, cacheable)) != []
1068

    
1069
def table_row_count(db, table, recover=None):
1070
    return value(run_query(db, mk_select(db, table, [sql_gen.row_count],
1071
        order_by=None), recover=recover, log_level=3))
1072

    
1073
def table_col_names(db, table, recover=None):
1074
    return list(col_names(select(db, table, limit=0, recover=recover,
1075
        log_level=4)))
1076

    
1077
def table_cols(db, table, *args, **kw_args):
1078
    return [sql_gen.as_Col(strings.ustr(c), table)
1079
        for c in table_col_names(db, table, *args, **kw_args)]
1080

    
1081
def table_pkey_index(db, table, recover=None):
1082
    table_str = sql_gen.Literal(table.to_str(db))
1083
    try:
1084
        return sql_gen.Table(value(run_query(db, '''\
1085
SELECT relname
1086
FROM pg_index
1087
JOIN pg_class index ON index.oid = indexrelid
1088
WHERE
1089
indrelid = '''+table_str.to_str(db)+'''::regclass
1090
AND indisprimary
1091
'''
1092
            , recover, cacheable=True, log_level=4)), table.schema)
1093
    except StopIteration: raise DoesNotExistException('primary key', '')
1094

    
1095
def table_pkey_col(db, table, recover=None):
1096
    table = sql_gen.as_Table(table)
1097
    
1098
    join_cols = ['table_schema', 'table_name', 'constraint_schema',
1099
        'constraint_name']
1100
    tables = [sql_gen.Table('key_column_usage', 'information_schema'),
1101
        sql_gen.Join(sql_gen.Table('table_constraints', 'information_schema'),
1102
            dict(((c, sql_gen.join_same_not_null) for c in join_cols)))]
1103
    cols = [sql_gen.Col('column_name')]
1104
    
1105
    conds = [('constraint_type', 'PRIMARY KEY'), ('table_name', table.name)]
1106
    schema = table.schema
1107
    if schema != None: conds.append(('table_schema', schema))
1108
    order_by = 'position_in_unique_constraint'
1109
    
1110
    try: return sql_gen.Col(value(select(db, tables, cols, conds,
1111
        order_by=order_by, limit=1, log_level=4)), table)
1112
    except StopIteration: raise DoesNotExistException('primary key', '')
1113

    
1114
def pkey_name(db, table, recover=None):
1115
    '''If no pkey, returns the first column in the table.'''
1116
    return pkey_col(db, table, recover).name
1117

    
1118
def pkey_col(db, table, recover=None):
1119
    '''If no pkey, returns the first column in the table.'''
1120
    try: return table_pkey_col(db, table, recover)
1121
    except DoesNotExistException: return table_cols(db, table, recover)[0]
1122

    
1123
not_null_col = 'not_null_col'
1124

    
1125
def table_not_null_col(db, table, recover=None):
1126
    '''Name assumed to be the value of not_null_col. If not found, uses pkey.'''
1127
    if not_null_col in table_col_names(db, table, recover): return not_null_col
1128
    else: return pkey_name(db, table, recover)
1129

    
1130
def constraint_cond(db, constraint):
1131
    module = util.root_module(db.db)
1132
    if module == 'psycopg2':
1133
        table_str = sql_gen.Literal(constraint.table.to_str(db))
1134
        name_str = sql_gen.Literal(constraint.name)
1135
        return value(run_query(db, '''\
1136
SELECT consrc
1137
FROM pg_constraint
1138
WHERE
1139
conrelid = '''+table_str.to_str(db)+'''::regclass
1140
AND conname = '''+name_str.to_str(db)+'''
1141
'''
1142
            , cacheable=True, log_level=4))
1143
    else: raise NotImplementedError("Can't get constraint condition for "
1144
        +module+' database')
1145

    
1146
def index_exprs(db, index):
1147
    index = sql_gen.as_Table(index)
1148
    module = util.root_module(db.db)
1149
    if module == 'psycopg2':
1150
        qual_index = sql_gen.Literal(index.to_str(db))
1151
        return list(values(run_query(db, '''\
1152
SELECT pg_get_indexdef(indexrelid, generate_series(1, indnatts), true)
1153
FROM pg_index
1154
WHERE indexrelid = '''+qual_index.to_str(db)+'''::regclass
1155
'''
1156
            , cacheable=True, log_level=4)))
1157
    else: raise NotImplementedError()
1158

    
1159
def index_cols(db, index):
1160
    '''Can also use this for UNIQUE constraints, because a UNIQUE index is
1161
    automatically created. When you don't know whether something is a UNIQUE
1162
    constraint or a UNIQUE index, use this function.'''
1163
    return map(sql_gen.parse_expr_col, index_exprs(db, index))
1164

    
1165
def index_cond(db, index):
1166
    index = sql_gen.as_Table(index)
1167
    module = util.root_module(db.db)
1168
    if module == 'psycopg2':
1169
        qual_index = sql_gen.Literal(index.to_str(db))
1170
        return value(run_query(db, '''\
1171
SELECT pg_get_expr(indpred, indrelid, true)
1172
FROM pg_index
1173
WHERE indexrelid = '''+qual_index.to_str(db)+'''::regclass
1174
'''
1175
            , cacheable=True, log_level=4))
1176
    else: raise NotImplementedError()
1177

    
1178
def index_order_by(db, index):
1179
    return sql_gen.CustomCode(', '.join(index_exprs(db, index)))
1180

    
1181
def table_cluster_on(db, table, recover=None):
1182
    '''
1183
    @return The table's cluster index, or its pkey if none is set
1184
    '''
1185
    table_str = sql_gen.Literal(table.to_str(db))
1186
    try:
1187
        return sql_gen.Table(value(run_query(db, '''\
1188
SELECT relname
1189
FROM pg_index
1190
JOIN pg_class index ON index.oid = indexrelid
1191
WHERE
1192
indrelid = '''+table_str.to_str(db)+'''::regclass
1193
AND indisclustered
1194
'''
1195
            , recover, cacheable=True, log_level=4)), table.schema)
1196
    except StopIteration: return table_pkey_index(db, table, recover)
1197

    
1198
def table_order_by(db, table, recover=None):
1199
    if table.order_by == None:
1200
        try: table.order_by = index_order_by(db, table_cluster_on(db, table,
1201
            recover))
1202
        except DoesNotExistException: pass
1203
    return table.order_by
1204

    
1205
#### Functions
1206

    
1207
def function_exists(db, function):
1208
    qual_function = sql_gen.Literal(function.to_str(db))
1209
    try:
1210
        select(db, fields=[sql_gen.Cast('regproc', qual_function)],
1211
            recover=True, cacheable=True, log_level=4)
1212
    except DoesNotExistException: return False
1213
    except DuplicateException: return True # overloaded function
1214
    else: return True
1215

    
1216
##### Structural changes
1217

    
1218
#### Columns
1219

    
1220
def add_col(db, table, col, comment=None, if_not_exists=False, **kw_args):
1221
    '''
1222
    @param col TypedCol Name may be versioned, so be sure to propagate any
1223
        renaming back to any source column for the TypedCol.
1224
    @param comment None|str SQL comment used to distinguish columns of the same
1225
        name from each other when they contain different data, to allow the
1226
        ADD COLUMN query to be cached. If not set, query will not be cached.
1227
    '''
1228
    assert isinstance(col, sql_gen.TypedCol)
1229
    
1230
    while True:
1231
        str_ = 'ALTER TABLE '+table.to_str(db)+' ADD COLUMN '+col.to_str(db)
1232
        if comment != None: str_ += ' '+sql_gen.esc_comment(comment)
1233
        
1234
        try:
1235
            run_query(db, str_, recover=True, cacheable=True, **kw_args)
1236
            break
1237
        except DuplicateException:
1238
            if if_not_exists: raise
1239
            col.name = next_version(col.name)
1240
            # try again with next version of name
1241

    
1242
def add_not_null(db, col):
1243
    table = col.table
1244
    col = sql_gen.to_name_only_col(col)
1245
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ALTER COLUMN '
1246
        +col.to_str(db)+' SET NOT NULL', cacheable=True, log_level=3)
1247

    
1248
def drop_not_null(db, col):
1249
    table = col.table
1250
    col = sql_gen.to_name_only_col(col)
1251
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ALTER COLUMN '
1252
        +col.to_str(db)+' DROP NOT NULL', cacheable=True, log_level=3)
1253

    
1254
row_num_col = '_row_num'
1255

    
1256
row_num_col_def = sql_gen.TypedCol('', 'serial', nullable=False,
1257
    constraints='PRIMARY KEY')
1258

    
1259
def add_row_num(db, table, name=row_num_col):
1260
    '''Adds a row number column to a table. Its definition is in
1261
    row_num_col_def. It will be the primary key.'''
1262
    col_def = copy.copy(row_num_col_def)
1263
    col_def.name = name
1264
    add_col(db, table, col_def, comment='', if_not_exists=True, log_level=3)
1265

    
1266
#### Indexes
1267

    
1268
def add_pkey(db, table, cols=None, recover=None):
1269
    '''Adds a primary key.
1270
    @param cols [sql_gen.Col,...] The columns in the primary key.
1271
        Defaults to the first column in the table.
1272
    @pre The table must not already have a primary key.
1273
    '''
1274
    table = sql_gen.as_Table(table)
1275
    if cols == None: cols = [pkey_name(db, table, recover)]
1276
    col_strs = [sql_gen.to_name_only_col(v).to_str(db) for v in cols]
1277
    
1278
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ADD PRIMARY KEY ('
1279
        +(', '.join(col_strs))+')', recover=True, cacheable=True, log_level=3,
1280
        log_ignore_excs=(DuplicateException,))
1281

    
1282
def add_index(db, exprs, table=None, unique=False, ensure_not_null_=True):
1283
    '''Adds an index on column(s) or expression(s) if it doesn't already exist.
1284
    Currently, only function calls and literal values are supported expressions.
1285
    @param ensure_not_null_ If set, translates NULL values to sentinel values.
1286
        This allows indexes to be used for comparisons where NULLs are equal.
1287
    '''
1288
    exprs = lists.mk_seq(exprs)
1289
    
1290
    # Parse exprs
1291
    old_exprs = exprs[:]
1292
    exprs = []
1293
    cols = []
1294
    for i, expr in enumerate(old_exprs):
1295
        expr = sql_gen.as_Col(expr, table)
1296
        
1297
        # Handle nullable columns
1298
        if ensure_not_null_:
1299
            try: expr = sql_gen.ensure_not_null(db, expr)
1300
            except KeyError: pass # unknown type, so just create plain index
1301
        
1302
        # Extract col
1303
        expr = copy.deepcopy(expr) # don't modify input!
1304
        col = expr
1305
        if isinstance(expr, sql_gen.FunctionCall): col = expr.args[0]
1306
        expr = sql_gen.cast_literal(expr)
1307
        if not isinstance(expr, (sql_gen.Expr, sql_gen.Col)):
1308
            expr = sql_gen.Expr(expr)
1309
            
1310
        
1311
        # Extract table
1312
        if table == None:
1313
            assert sql_gen.is_table_col(col)
1314
            table = col.table
1315
        
1316
        if isinstance(col, sql_gen.Col): col.table = None
1317
        
1318
        exprs.append(expr)
1319
        cols.append(col)
1320
    
1321
    table = sql_gen.as_Table(table)
1322
    
1323
    # Add index
1324
    str_ = 'CREATE'
1325
    if unique: str_ += ' UNIQUE'
1326
    str_ += ' INDEX ON '+table.to_str(db)+' ('+(
1327
        ', '.join((v.to_str(db) for v in exprs)))+')'
1328
    run_query(db, str_, recover=True, cacheable=True, log_level=3)
1329

    
1330
already_indexed = object() # tells add_indexes() the pkey has already been added
1331

    
1332
def add_indexes(db, table, has_pkey=True):
1333
    '''Adds an index on all columns in a table.
1334
    @param has_pkey bool|already_indexed Whether a pkey instead of a regular
1335
        index should be added on the first column.
1336
        * If already_indexed, the pkey is assumed to have already been added
1337
    '''
1338
    cols = table_col_names(db, table)
1339
    if has_pkey:
1340
        if has_pkey is not already_indexed: add_pkey(db, table)
1341
        cols = cols[1:]
1342
    for col in cols: add_index(db, col, table)
1343

    
1344
#### Tables
1345

    
1346
### Maintenance
1347

    
1348
def analyze(db, table):
1349
    table = sql_gen.as_Table(table)
1350
    run_query(db, 'ANALYZE '+table.to_str(db), log_level=3)
1351

    
1352
def autoanalyze(db, table):
1353
    if db.autoanalyze: analyze(db, table)
1354

    
1355
def vacuum(db, table):
1356
    table = sql_gen.as_Table(table)
1357
    db.with_autocommit(lambda: run_query(db, 'VACUUM ANALYZE '+table.to_str(db),
1358
        log_level=3))
1359

    
1360
### Lifecycle
1361

    
1362
def drop(db, type_, name):
1363
    name = sql_gen.as_Name(name)
1364
    run_query(db, 'DROP '+type_+' IF EXISTS '+name.to_str(db)+' CASCADE')
1365

    
1366
def drop_table(db, table): drop(db, 'TABLE', table)
1367

    
1368
def create_table(db, table, cols=[], has_pkey=True, col_indexes=True,
1369
    like=None):
1370
    '''Creates a table.
1371
    @param cols [sql_gen.TypedCol,...] The column names and types
1372
    @param has_pkey If set, the first column becomes the primary key.
1373
    @param col_indexes bool|[ref]
1374
        * If True, indexes will be added on all non-pkey columns.
1375
        * If a list reference, [0] will be set to a function to do this.
1376
          This can be used to delay index creation until the table is populated.
1377
    '''
1378
    table = sql_gen.as_Table(table)
1379
    
1380
    if like != None:
1381
        cols = [sql_gen.CustomCode('LIKE '+like.to_str(db)+' INCLUDING ALL')
1382
            ]+cols
1383
        table.order_by = like.order_by
1384
    if has_pkey:
1385
        cols[0] = pkey = copy.copy(cols[0]) # don't modify input!
1386
        pkey.constraints = 'PRIMARY KEY'
1387
    
1388
    temp = table.is_temp and not db.debug_temp
1389
        # temp tables permanent in debug_temp mode
1390
    
1391
    # Create table
1392
    def create():
1393
        str_ = 'CREATE'
1394
        if temp: str_ += ' TEMP'
1395
        str_ += ' TABLE '+table.to_str(db)+' (\n'
1396
        str_ += '\n, '.join(c.to_str(db) for c in cols)
1397
        str_ += '\n);'
1398
        
1399
        run_query(db, str_, recover=True, cacheable=True, log_level=2,
1400
            log_ignore_excs=(DuplicateException,))
1401
    if table.is_temp:
1402
        while True:
1403
            try:
1404
                create()
1405
                break
1406
            except DuplicateException:
1407
                table.name = next_version(table.name)
1408
                # try again with next version of name
1409
    else: create()
1410
    
1411
    # Add indexes
1412
    if has_pkey: has_pkey = already_indexed
1413
    def add_indexes_(): add_indexes(db, table, has_pkey)
1414
    if isinstance(col_indexes, list): col_indexes[0] = add_indexes_ # defer
1415
    elif col_indexes: add_indexes_() # add now
1416

    
1417
def copy_table_struct(db, src, dest):
1418
    '''Creates a structure-only copy of a table. (Does not copy data.)'''
1419
    create_table(db, dest, has_pkey=False, col_indexes=False, like=src)
1420

    
1421
def copy_table(db, src, dest):
1422
    '''Creates a copy of a table, including data'''
1423
    copy_table_struct(db, src, dest)
1424
    insert_select(db, dest, None, mk_select(db, src))
1425

    
1426
### Data
1427

    
1428
def truncate(db, table, schema='public', **kw_args):
1429
    '''For params, see run_query()'''
1430
    table = sql_gen.as_Table(table, schema)
1431
    return run_query(db, 'TRUNCATE '+table.to_str(db)+' CASCADE', **kw_args)
1432

    
1433
def empty_temp(db, tables):
1434
    tables = lists.mk_seq(tables)
1435
    for table in tables: truncate(db, table, log_level=3)
1436

    
1437
def empty_db(db, schema='public', **kw_args):
1438
    '''For kw_args, see tables()'''
1439
    for table in tables(db, schema, **kw_args): truncate(db, table, schema)
1440

    
1441
def distinct_table(db, table, distinct_on):
1442
    '''Creates a copy of a temp table which is distinct on the given columns.
1443
    The old and new tables will both get an index on these columns, to
1444
    facilitate merge joins.
1445
    @param distinct_on If empty, creates a table with one row. This is useful if
1446
        your distinct_on columns are all literal values.
1447
    @return The new table.
1448
    '''
1449
    new_table = sql_gen.suffixed_table(table, '_distinct')
1450
    distinct_on = filter(sql_gen.is_table_col, distinct_on)
1451
    
1452
    copy_table_struct(db, table, new_table)
1453
    
1454
    limit = None
1455
    if distinct_on == []: limit = 1 # one sample row
1456
    else:
1457
        add_index(db, distinct_on, new_table, unique=True)
1458
        add_index(db, distinct_on, table) # for join optimization
1459
    
1460
    insert_select(db, new_table, None, mk_select(db, table, order_by=None,
1461
        limit=limit), ignore=True)
1462
    analyze(db, new_table)
1463
    
1464
    return new_table
(28-28/42)