Project

General

Profile

1
# Database access
2

    
3
import copy
4
import re
5
import warnings
6

    
7
import exc
8
import dicts
9
import iters
10
import lists
11
from Proxy import Proxy
12
import rand
13
import sql_gen
14
import strings
15
import util
16

    
17
##### Exceptions
18

    
19
def get_cur_query(cur, input_query=None):
20
    raw_query = None
21
    if hasattr(cur, 'query'): raw_query = cur.query
22
    elif hasattr(cur, '_last_executed'): raw_query = cur._last_executed
23
    
24
    if raw_query != None: return raw_query
25
    else: return '[input] '+strings.ustr(input_query)
26

    
27
def _add_cursor_info(e, *args, **kw_args):
28
    '''For params, see get_cur_query()'''
29
    exc.add_msg(e, 'query: '+strings.ustr(get_cur_query(*args, **kw_args)))
30

    
31
class DbException(exc.ExceptionWithCause):
32
    def __init__(self, msg, cause=None, cur=None):
33
        exc.ExceptionWithCause.__init__(self, msg, cause, cause_newline=True)
34
        if cur != None: _add_cursor_info(self, cur)
35

    
36
class ExceptionWithName(DbException):
37
    def __init__(self, name, cause=None):
38
        DbException.__init__(self, 'for name: '+strings.as_tt(str(name)), cause)
39
        self.name = name
40

    
41
class ExceptionWithValue(DbException):
42
    def __init__(self, value, cause=None):
43
        DbException.__init__(self, 'for value: '+strings.as_tt(repr(value)),
44
            cause)
45
        self.value = value
46

    
47
class ExceptionWithNameType(DbException):
48
    def __init__(self, type_, name, cause=None):
49
        DbException.__init__(self, 'for type: '+strings.as_tt(str(type_))
50
            +'; name: '+strings.as_tt(name), cause)
51
        self.type = type_
52
        self.name = name
53

    
54
class ConstraintException(DbException):
55
    def __init__(self, name, cols, cause=None):
56
        DbException.__init__(self, 'Violated '+strings.as_tt(name)
57
            +' constraint on columns: '+strings.as_tt(', '.join(cols)), cause)
58
        self.name = name
59
        self.cols = cols
60

    
61
class MissingCastException(DbException):
62
    def __init__(self, type_, col, cause=None):
63
        DbException.__init__(self, 'Missing cast to type '+strings.as_tt(type_)
64
            +' on column: '+strings.as_tt(col), cause)
65
        self.type = type_
66
        self.col = col
67

    
68
class NameException(DbException): pass
69

    
70
class DuplicateKeyException(ConstraintException): pass
71

    
72
class NullValueException(ConstraintException): pass
73

    
74
class InvalidValueException(ExceptionWithValue): pass
75

    
76
class DuplicateException(ExceptionWithNameType): pass
77

    
78
class EmptyRowException(DbException): pass
79

    
80
##### Warnings
81

    
82
class DbWarning(UserWarning): pass
83

    
84
##### Result retrieval
85

    
86
def col_names(cur): return (col[0] for col in cur.description)
87

    
88
def rows(cur): return iter(lambda: cur.fetchone(), None)
89

    
90
def consume_rows(cur):
91
    '''Used to fetch all rows so result will be cached'''
92
    iters.consume_iter(rows(cur))
93

    
94
def next_row(cur): return rows(cur).next()
95

    
96
def row(cur):
97
    row_ = next_row(cur)
98
    consume_rows(cur)
99
    return row_
100

    
101
def next_value(cur): return next_row(cur)[0]
102

    
103
def value(cur): return row(cur)[0]
104

    
105
def values(cur): return iters.func_iter(lambda: next_value(cur))
106

    
107
def value_or_none(cur):
108
    try: return value(cur)
109
    except StopIteration: return None
110

    
111
##### Escaping
112

    
113
def esc_name_by_module(module, name):
114
    if module == 'psycopg2' or module == None: quote = '"'
115
    elif module == 'MySQLdb': quote = '`'
116
    else: raise NotImplementedError("Can't escape name for "+module+' database')
117
    return sql_gen.esc_name(name, quote)
118

    
119
def esc_name_by_engine(engine, name, **kw_args):
120
    return esc_name_by_module(db_engines[engine][0], name, **kw_args)
121

    
122
def esc_name(db, name, **kw_args):
123
    return esc_name_by_module(util.root_module(db.db), name, **kw_args)
124

    
125
def qual_name(db, schema, table):
126
    def esc_name_(name): return esc_name(db, name)
127
    table = esc_name_(table)
128
    if schema != None: return esc_name_(schema)+'.'+table
129
    else: return table
130

    
131
##### Database connections
132

    
133
db_config_names = ['engine', 'host', 'user', 'password', 'database', 'schemas']
134

    
135
db_engines = {
136
    'MySQL': ('MySQLdb', {'password': 'passwd', 'database': 'db'}),
137
    'PostgreSQL': ('psycopg2', {}),
138
}
139

    
140
DatabaseErrors_set = set([DbException])
141
DatabaseErrors = tuple(DatabaseErrors_set)
142

    
143
def _add_module(module):
144
    DatabaseErrors_set.add(module.DatabaseError)
145
    global DatabaseErrors
146
    DatabaseErrors = tuple(DatabaseErrors_set)
147

    
148
def db_config_str(db_config):
149
    return db_config['engine']+' database '+db_config['database']
150

    
151
log_debug_none = lambda msg, level=2: None
152

    
153
class DbConn:
154
    def __init__(self, db_config, autocommit=True, caching=True,
155
        log_debug=log_debug_none, debug_temp=False, src=None):
156
        '''
157
        @param debug_temp Whether temporary objects should instead be permanent.
158
            This assists in debugging the internal objects used by the program.
159
        @param src In autocommit mode, will be included in a comment in every
160
            query, to help identify the data source in pg_stat_activity.
161
        '''
162
        self.db_config = db_config
163
        self.autocommit = autocommit
164
        self.caching = caching
165
        self.log_debug = log_debug
166
        self.debug = log_debug != log_debug_none
167
        self.debug_temp = debug_temp
168
        self.src = src
169
        self.autoanalyze = False
170
        
171
        self._savepoint = 0
172
        self._reset()
173
    
174
    def __getattr__(self, name):
175
        if name == '__dict__': raise Exception('getting __dict__')
176
        if name == 'db': return self._db()
177
        else: raise AttributeError()
178
    
179
    def __getstate__(self):
180
        state = copy.copy(self.__dict__) # shallow copy
181
        state['log_debug'] = None # don't pickle the debug callback
182
        state['_DbConn__db'] = None # don't pickle the connection
183
        return state
184
    
185
    def clear_cache(self): self.query_results = {}
186
    
187
    def _reset(self):
188
        self.clear_cache()
189
        assert self._savepoint == 0
190
        self._notices_seen = set()
191
        self.__db = None
192
    
193
    def connected(self): return self.__db != None
194
    
195
    def close(self):
196
        if not self.connected(): return
197
        
198
        # Record that the automatic transaction is now closed
199
        self._savepoint -= 1
200
        
201
        self.db.close()
202
        self._reset()
203
    
204
    def reconnect(self):
205
        # Do not do this in test mode as it would roll back everything
206
        if self.autocommit: self.close()
207
        # Connection will be reopened automatically on first query
208
    
209
    def _db(self):
210
        if self.__db == None:
211
            # Process db_config
212
            db_config = self.db_config.copy() # don't modify input!
213
            schemas = db_config.pop('schemas', None)
214
            module_name, mappings = db_engines[db_config.pop('engine')]
215
            module = __import__(module_name)
216
            _add_module(module)
217
            for orig, new in mappings.iteritems():
218
                try: util.rename_key(db_config, orig, new)
219
                except KeyError: pass
220
            
221
            # Connect
222
            self.__db = module.connect(**db_config)
223
            
224
            # Record that a transaction is already open
225
            self._savepoint += 1
226
            
227
            # Configure connection
228
            if hasattr(self.db, 'set_isolation_level'):
229
                import psycopg2.extensions
230
                self.db.set_isolation_level(
231
                    psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
232
            if schemas != None:
233
                search_path = [self.esc_name(s) for s in schemas.split(',')]
234
                search_path.append(value(run_query(self, 'SHOW search_path',
235
                    log_level=4)))
236
                run_query(self, 'SET search_path TO '+(','.join(search_path)),
237
                    log_level=3)
238
        
239
        return self.__db
240
    
241
    class DbCursor(Proxy):
242
        def __init__(self, outer):
243
            Proxy.__init__(self, outer.db.cursor())
244
            self.outer = outer
245
            self.query_results = outer.query_results
246
            self.query_lookup = None
247
            self.result = []
248
        
249
        def execute(self, query):
250
            self._is_insert = query.startswith('INSERT')
251
            self.query_lookup = query
252
            try:
253
                try: cur = self.inner.execute(query)
254
                finally: self.query = get_cur_query(self.inner, query)
255
            except Exception, e:
256
                self.result = e # cache the exception as the result
257
                self._cache_result()
258
                raise
259
            
260
            # Always cache certain queries
261
            query = sql_gen.lstrip(query)
262
            if query.startswith('CREATE') or query.startswith('ALTER'):
263
                # structural changes
264
                # Rest of query must be unique in the face of name collisions,
265
                # so don't cache ADD COLUMN unless it has distinguishing comment
266
                if query.find('ADD COLUMN') < 0 or query.endswith('*/'):
267
                    self._cache_result()
268
            elif self.rowcount == 0 and query.startswith('SELECT'): # empty
269
                consume_rows(self) # fetch all rows so result will be cached
270
            
271
            return cur
272
        
273
        def fetchone(self):
274
            row = self.inner.fetchone()
275
            if row != None: self.result.append(row)
276
            # otherwise, fetched all rows
277
            else: self._cache_result()
278
            return row
279
        
280
        def _cache_result(self):
281
            # For inserts that return a result set, don't cache result set since
282
            # inserts are not idempotent. Other non-SELECT queries don't have
283
            # their result set read, so only exceptions will be cached (an
284
            # invalid query will always be invalid).
285
            if self.query_results != None and (not self._is_insert
286
                or isinstance(self.result, Exception)):
287
                
288
                assert self.query_lookup != None
289
                self.query_results[self.query_lookup] = self.CacheCursor(
290
                    util.dict_subset(dicts.AttrsDictView(self),
291
                    ['query', 'result', 'rowcount', 'description']))
292
        
293
        class CacheCursor:
294
            def __init__(self, cached_result): self.__dict__ = cached_result
295
            
296
            def execute(self, *args, **kw_args):
297
                if isinstance(self.result, Exception): raise self.result
298
                # otherwise, result is a rows list
299
                self.iter = iter(self.result)
300
            
301
            def fetchone(self):
302
                try: return self.iter.next()
303
                except StopIteration: return None
304
    
305
    def esc_value(self, value):
306
        try: str_ = self.mogrify('%s', [value])
307
        except NotImplementedError, e:
308
            module = util.root_module(self.db)
309
            if module == 'MySQLdb':
310
                import _mysql
311
                str_ = _mysql.escape_string(value)
312
            else: raise e
313
        return strings.to_unicode(str_)
314
    
315
    def esc_name(self, name): return esc_name(self, name) # calls global func
316
    
317
    def std_code(self, str_):
318
        '''Standardizes SQL code.
319
        * Ensures that string literals are prefixed by `E`
320
        '''
321
        if str_.startswith("'"): str_ = 'E'+str_
322
        return str_
323
    
324
    def can_mogrify(self):
325
        module = util.root_module(self.db)
326
        return module == 'psycopg2'
327
    
328
    def mogrify(self, query, params=None):
329
        if self.can_mogrify(): return self.db.cursor().mogrify(query, params)
330
        else: raise NotImplementedError("Can't mogrify query")
331
    
332
    def print_notices(self):
333
        if hasattr(self.db, 'notices'):
334
            for msg in self.db.notices:
335
                if msg not in self._notices_seen:
336
                    self._notices_seen.add(msg)
337
                    self.log_debug(msg, level=2)
338
    
339
    def run_query(self, query, cacheable=False, log_level=2,
340
        debug_msg_ref=None):
341
        '''
342
        @param log_ignore_excs The log_level will be increased by 2 if the query
343
            throws one of these exceptions.
344
        @param debug_msg_ref If specified, the log message will be returned in
345
            this instead of being output. This allows you to filter log messages
346
            depending on the result of the query.
347
        '''
348
        assert query != None
349
        
350
        if self.autocommit and self.src != None:
351
            query = sql_gen.esc_comment(self.src)+'\t'+query
352
        
353
        if not self.caching: cacheable = False
354
        used_cache = False
355
        
356
        def log_msg(query):
357
            if used_cache: cache_status = 'cache hit'
358
            elif cacheable: cache_status = 'cache miss'
359
            else: cache_status = 'non-cacheable'
360
            
361
            # So that the src comment is hidden when cating the file, and put
362
            # on a separate line when viewed in a text editor.
363
            query = query.replace('\t', '\r', 1)
364
            
365
            return 'DB query: '+cache_status+':\n'+strings.as_code(query, 'SQL')
366
        
367
        try:
368
            # Get cursor
369
            if cacheable:
370
                try:
371
                    cur = self.query_results[query]
372
                    used_cache = True
373
                except KeyError: cur = self.DbCursor(self)
374
            else: cur = self.db.cursor()
375
            
376
            # Log query
377
            if self.debug and debug_msg_ref == None: # log before running
378
                self.log_debug(log_msg(query), log_level)
379
            
380
            # Run query
381
            try:
382
                cur.execute(query)
383
                self.do_autocommit()
384
            except Exception, e:
385
                _add_cursor_info(e, self, query)
386
                raise
387
        finally:
388
            self.print_notices()
389
            if self.debug and debug_msg_ref != None: # return after running
390
                debug_msg_ref[0] = log_msg(str(get_cur_query(cur, query)))
391
        
392
        return cur
393
    
394
    def is_cached(self, query): return query in self.query_results
395
    
396
    def with_autocommit(self, func):
397
        import psycopg2.extensions
398
        
399
        prev_isolation_level = self.db.isolation_level
400
        self.db.set_isolation_level(
401
            psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
402
        try: return func()
403
        finally: self.db.set_isolation_level(prev_isolation_level)
404
    
405
    def with_savepoint(self, func):
406
        top = self._savepoint == 0
407
        savepoint = 'level_'+str(self._savepoint)
408
        
409
        # Must happen before running queries so they don't get autocommitted
410
        self._savepoint += 1
411
        
412
        if top: query = 'START TRANSACTION ISOLATION LEVEL READ COMMITTED'
413
        else: query = 'SAVEPOINT '+savepoint
414
        self.run_query(query, log_level=4)
415
        try:
416
            return func()
417
            if top: self.run_query('COMMIT', log_level=4)
418
        except:
419
            if top: query = 'ROLLBACK'
420
            else: query = 'ROLLBACK TO SAVEPOINT '+savepoint
421
            self.run_query(query, log_level=4)
422
            
423
            raise
424
        finally:
425
            # Always release savepoint, because after ROLLBACK TO SAVEPOINT,
426
            # "The savepoint remains valid and can be rolled back to again"
427
            # (http://www.postgresql.org/docs/8.3/static/sql-rollback-to.html).
428
            if not top:
429
                self.run_query('RELEASE SAVEPOINT '+savepoint, log_level=4)
430
            
431
            self._savepoint -= 1
432
            assert self._savepoint >= 0
433
            
434
            self.do_autocommit() # OK to do this after ROLLBACK TO SAVEPOINT
435
    
436
    def do_autocommit(self):
437
        '''Autocommits if outside savepoint'''
438
        assert self._savepoint >= 1
439
        if self.autocommit and self._savepoint == 1:
440
            self.log_debug('Autocommitting', level=4)
441
            self.db.commit()
442
    
443
    def col_info(self, col, cacheable=True):
444
        table = sql_gen.Table('columns', 'information_schema')
445
        type_ = sql_gen.Coalesce(sql_gen.Nullif(sql_gen.Col('data_type'),
446
            'USER-DEFINED'), sql_gen.Col('udt_name'))
447
        cols = [type_, 'column_default',
448
            sql_gen.Cast('boolean', sql_gen.Col('is_nullable'))]
449
        
450
        conds = [('table_name', col.table.name), ('column_name', col.name)]
451
        schema = col.table.schema
452
        if schema != None: conds.append(('table_schema', schema))
453
        
454
        type_, default, nullable = row(select(self, table, cols, conds,
455
            order_by='table_schema', limit=1, cacheable=cacheable, log_level=4))
456
            # TODO: order_by search_path schema order
457
        default = sql_gen.as_Code(default, self)
458
        
459
        return sql_gen.TypedCol(col.name, type_, default, nullable)
460
    
461
    def TempFunction(self, name):
462
        if self.debug_temp: schema = None
463
        else: schema = 'pg_temp'
464
        return sql_gen.Function(name, schema)
465

    
466
connect = DbConn
467

    
468
##### Recoverable querying
469

    
470
def with_savepoint(db, func): return db.with_savepoint(func)
471

    
472
def run_query(db, query, recover=None, cacheable=False, log_level=2,
473
    log_ignore_excs=None, **kw_args):
474
    '''For params, see DbConn.run_query()'''
475
    if recover == None: recover = False
476
    if log_ignore_excs == None: log_ignore_excs = ()
477
    log_ignore_excs = tuple(log_ignore_excs)
478
    
479
    debug_msg_ref = None # usually, db.run_query() logs query before running it
480
    # But if filtering with log_ignore_excs, wait until after exception parsing
481
    if log_ignore_excs != () or not db.can_mogrify(): debug_msg_ref = [None]
482
    
483
    try:
484
        try:
485
            def run(): return db.run_query(query, cacheable, log_level,
486
                debug_msg_ref, **kw_args)
487
            if recover and not db.is_cached(query):
488
                return with_savepoint(db, run)
489
            else: return run() # don't need savepoint if cached
490
        except Exception, e:
491
            msg = strings.ustr(e.args[0])
492
            
493
            match = re.match(r'^duplicate key value violates unique constraint '
494
                r'"((_?[^\W_]+(?=[._]))?.+?)"', msg)
495
            if match:
496
                constraint, table = match.groups()
497
                cols = []
498
                if recover: # need auto-rollback to run index_cols()
499
                    try: cols = index_cols(db, table, constraint)
500
                    except NotImplementedError: pass
501
                raise DuplicateKeyException(constraint, cols, e)
502
            
503
            match = re.match(r'^null value in column "(.+?)" violates not-null'
504
                r' constraint', msg)
505
            if match: raise NullValueException('NOT NULL', [match.group(1)], e)
506
            
507
            match = re.match(r'^(?:invalid input (?:syntax|value)\b.*?'
508
                r'|.+? field value out of range): "(.+?)"', msg)
509
            if match:
510
                value, = match.groups()
511
                raise InvalidValueException(strings.to_unicode(value), e)
512
            
513
            match = re.match(r'^column "(.+?)" is of type (.+?) but expression '
514
                r'is of type', msg)
515
            if match:
516
                col, type_ = match.groups()
517
                raise MissingCastException(type_, col, e)
518
            
519
            match = re.match(r'^(\S+) "(.+?)".*? already exists', msg)
520
            if match:
521
                type_, name = match.groups()
522
                raise DuplicateException(type_, name, e)
523
            
524
            raise # no specific exception raised
525
    except log_ignore_excs:
526
        log_level += 2
527
        raise
528
    finally:
529
        if debug_msg_ref != None and debug_msg_ref[0] != None:
530
            db.log_debug(debug_msg_ref[0], log_level)
531

    
532
##### Basic queries
533

    
534
def next_version(name):
535
    version = 1 # first existing name was version 0
536
    match = re.match(r'^(.*)#(\d+)$', name)
537
    if match:
538
        name, version = match.groups()
539
        version = int(version)+1
540
    return sql_gen.concat(name, '#'+str(version))
541

    
542
def lock_table(db, table, mode):
543
    table = sql_gen.as_Table(table)
544
    run_query(db, 'LOCK TABLE '+table.to_str(db)+' IN '+mode+' MODE')
545

    
546
def run_query_into(db, query, into=None, add_indexes_=False, **kw_args):
547
    '''Outputs a query to a temp table.
548
    For params, see run_query().
549
    '''
550
    if into == None: return run_query(db, query, **kw_args)
551
    
552
    assert isinstance(into, sql_gen.Table)
553
    
554
    into.is_temp = True
555
    # "temporary tables cannot specify a schema name", so remove schema
556
    into.schema = None
557
    
558
    kw_args['recover'] = True
559
    kw_args.setdefault('log_ignore_excs', (DuplicateException,))
560
    
561
    temp = not db.debug_temp # tables are permanent in debug_temp mode
562
    
563
    # Create table
564
    while True:
565
        create_query = 'CREATE'
566
        if temp: create_query += ' TEMP'
567
        create_query += ' TABLE '+into.to_str(db)+' AS\n'+query
568
        
569
        try:
570
            cur = run_query(db, create_query, **kw_args)
571
                # CREATE TABLE AS sets rowcount to # rows in query
572
            break
573
        except DuplicateException, e:
574
            into.name = next_version(into.name)
575
            # try again with next version of name
576
    
577
    if add_indexes_: add_indexes(db, into)
578
    
579
    # According to the PostgreSQL doc, "The autovacuum daemon cannot access and
580
    # therefore cannot vacuum or analyze temporary tables. [...] if a temporary
581
    # table is going to be used in complex queries, it is wise to run ANALYZE on
582
    # the temporary table after it is populated."
583
    # (http://www.postgresql.org/docs/9.1/static/sql-createtable.html)
584
    # If into is not a temp table, ANALYZE is useful but not required.
585
    analyze(db, into)
586
    
587
    return cur
588

    
589
order_by_pkey = object() # tells mk_select() to order by the pkey
590

    
591
distinct_on_all = object() # tells mk_select() to SELECT DISTINCT ON all columns
592

    
593
def mk_select(db, tables, fields=None, conds=None, distinct_on=[], limit=None,
594
    start=None, order_by=order_by_pkey, default_table=None):
595
    '''
596
    @param tables The single table to select from, or a list of tables to join
597
        together, with tables after the first being sql_gen.Join objects
598
    @param fields Use None to select all fields in the table
599
    @param conds WHERE conditions: [(compare_left_side, compare_right_side),...]
600
        * container can be any iterable type
601
        * compare_left_side: sql_gen.Code|str (for col name)
602
        * compare_right_side: sql_gen.ValueCond|literal value
603
    @param distinct_on The columns to SELECT DISTINCT ON, or distinct_on_all to
604
        use all columns
605
    @return query
606
    '''
607
    # Parse tables param
608
    tables = lists.mk_seq(tables)
609
    tables = list(tables) # don't modify input! (list() copies input)
610
    table0 = sql_gen.as_Table(tables.pop(0)) # first table is separate
611
    
612
    # Parse other params
613
    if conds == None: conds = []
614
    elif dicts.is_dict(conds): conds = conds.items()
615
    conds = list(conds) # don't modify input! (list() copies input)
616
    assert limit == None or isinstance(limit, (int, long))
617
    assert start == None or isinstance(start, (int, long))
618
    if order_by is order_by_pkey:
619
        if distinct_on != []: order_by = None
620
        else: order_by = pkey(db, table0, recover=True)
621
    
622
    query = 'SELECT'
623
    
624
    def parse_col(col): return sql_gen.as_Col(col, default_table).to_str(db)
625
    
626
    # DISTINCT ON columns
627
    if distinct_on != []:
628
        query += '\nDISTINCT'
629
        if distinct_on is not distinct_on_all:
630
            query += ' ON ('+(', '.join(map(parse_col, distinct_on)))+')'
631
    
632
    # Columns
633
    if query.find('\n') >= 0: whitespace = '\n'
634
    else: whitespace = ' '
635
    if fields == None: query += whitespace+'*'
636
    else:
637
        assert fields != []
638
        if len(fields) > 1: whitespace = '\n'
639
        query += whitespace+('\n, '.join(map(parse_col, fields)))
640
    
641
    # Main table
642
    if query.find('\n') >= 0 or len(tables) > 0: whitespace = '\n'
643
    else: whitespace = ' '
644
    query += whitespace+'FROM '+table0.to_str(db)
645
    
646
    # Add joins
647
    left_table = table0
648
    for join_ in tables:
649
        table = join_.table
650
        
651
        # Parse special values
652
        if join_.type_ is sql_gen.filter_out: # filter no match
653
            conds.append((sql_gen.Col(table_not_null_col(db, table), table),
654
                sql_gen.CompareCond(None, '~=')))
655
        
656
        query += '\n'+join_.to_str(db, left_table)
657
        
658
        left_table = table
659
    
660
    missing = True
661
    if conds != []:
662
        if len(conds) == 1: whitespace = ' '
663
        else: whitespace = '\n'
664
        query += '\n'+sql_gen.combine_conds([sql_gen.ColValueCond(l, r)
665
            .to_str(db) for l, r in conds], 'WHERE')
666
        missing = False
667
    if order_by != None:
668
        query += '\nORDER BY '+sql_gen.as_Col(order_by, table0).to_str(db)
669
    if limit != None: query += '\nLIMIT '+str(limit); missing = False
670
    if start != None:
671
        if start != 0: query += '\nOFFSET '+str(start)
672
        missing = False
673
    if missing: warnings.warn(DbWarning(
674
        'SELECT statement missing a WHERE, LIMIT, or OFFSET clause: '+query))
675
    
676
    return query
677

    
678
def select(db, *args, **kw_args):
679
    '''For params, see mk_select() and run_query()'''
680
    recover = kw_args.pop('recover', None)
681
    cacheable = kw_args.pop('cacheable', True)
682
    log_level = kw_args.pop('log_level', 2)
683
    
684
    return run_query(db, mk_select(db, *args, **kw_args), recover, cacheable,
685
        log_level=log_level)
686

    
687
def mk_insert_select(db, table, cols=None, select_query=None, returning=None,
688
    embeddable=False, ignore=False, src=None):
689
    '''
690
    @param returning str|None An inserted column (such as pkey) to return
691
    @param embeddable Whether the query should be embeddable as a nested SELECT.
692
        Warning: If you set this and cacheable=True when the query is run, the
693
        query will be fully cached, not just if it raises an exception.
694
    @param ignore Whether to ignore duplicate keys.
695
    @param src Will be included in the name of any created function, to help
696
        identify the data source in pg_stat_activity.
697
    '''
698
    table = sql_gen.remove_table_rename(sql_gen.as_Table(table))
699
    if cols == []: cols = None # no cols (all defaults) = unknown col names
700
    if cols != None: cols = [sql_gen.to_name_only_col(c, table) for c in cols]
701
    if select_query == None: select_query = 'DEFAULT VALUES'
702
    if returning != None: returning = sql_gen.as_Col(returning, table)
703
    
704
    first_line = 'INSERT INTO '+table.to_str(db)
705
    
706
    def mk_insert(select_query):
707
        query = first_line
708
        if cols != None:
709
            query += '\n('+(', '.join((c.to_str(db) for c in cols)))+')'
710
        query += '\n'+select_query
711
        
712
        if returning != None:
713
            returning_name_col = sql_gen.to_name_only_col(returning)
714
            query += '\nRETURNING '+returning_name_col.to_str(db)
715
        
716
        return query
717
    
718
    return_type = 'unknown'
719
    if returning != None: return_type = returning.to_str(db)+'%TYPE'
720
    
721
    lang = 'sql'
722
    if ignore:
723
        # Always return something to set the correct rowcount
724
        if returning == None: returning = sql_gen.NamedCol('NULL', None)
725
        
726
        embeddable = True # must use function
727
        lang = 'plpgsql'
728
        
729
        if cols == None:
730
            row = [sql_gen.Col(sql_gen.all_cols, 'row')]
731
            row_vars = [sql_gen.Table('row')]
732
        else:
733
            row_vars = row = [sql_gen.Col(c.name, 'row') for c in cols]
734
        
735
        query = '''\
736
DECLARE
737
    row '''+table.to_str(db)+'''%ROWTYPE;
738
BEGIN
739
    /* Need an EXCEPTION block for each individual row because "When an error is
740
    caught by an EXCEPTION clause, [...] all changes to persistent database
741
    state within the block are rolled back."
742
    This is unfortunate because "A block containing an EXCEPTION clause is
743
    significantly more expensive to enter and exit than a block without one."
744
    (http://www.postgresql.org/docs/8.3/static/plpgsql-control-structures.html\
745
#PLPGSQL-ERROR-TRAPPING)
746
    */
747
    FOR '''+(', '.join((v.to_str(db) for v in row_vars)))+''' IN
748
'''+select_query+'''
749
    LOOP
750
        BEGIN
751
            RETURN QUERY
752
'''+mk_insert(sql_gen.Values(row).to_str(db))+'''
753
;
754
        EXCEPTION
755
            WHEN unique_violation THEN NULL; -- continue to next row
756
        END;
757
    END LOOP;
758
END;\
759
'''
760
    else: query = mk_insert(select_query)
761
    
762
    if embeddable:
763
        # Create function
764
        function_name = sql_gen.clean_name(first_line)
765
        if src != None: function_name = src+': '+function_name
766
        while True:
767
            try:
768
                function = db.TempFunction(function_name)
769
                
770
                function_query = '''\
771
CREATE FUNCTION '''+function.to_str(db)+'''()
772
RETURNS SETOF '''+return_type+'''
773
LANGUAGE '''+lang+'''
774
AS $$
775
'''+query+'''
776
$$;
777
'''
778
                run_query(db, function_query, recover=True, cacheable=True,
779
                    log_ignore_excs=(DuplicateException,))
780
                break # this version was successful
781
            except DuplicateException, e:
782
                function_name = next_version(function_name)
783
                # try again with next version of name
784
        
785
        # Return query that uses function
786
        cols = None
787
        if returning != None: cols = [returning]
788
        func_table = sql_gen.NamedTable('f', sql_gen.FunctionCall(function),
789
            cols) # AS clause requires function alias
790
        return mk_select(db, func_table, start=0, order_by=None)
791
    
792
    return query
793

    
794
def insert_select(db, table, *args, **kw_args):
795
    '''For params, see mk_insert_select() and run_query_into()
796
    @param into sql_gen.Table with suggested name of temp table to put RETURNING
797
        values in
798
    '''
799
    returning = kw_args.get('returning', None)
800
    ignore = kw_args.get('ignore', False)
801
    
802
    into = kw_args.pop('into', None)
803
    if into != None: kw_args['embeddable'] = True
804
    recover = kw_args.pop('recover', None)
805
    if ignore: recover = True
806
    cacheable = kw_args.pop('cacheable', True)
807
    log_level = kw_args.pop('log_level', 2)
808
    
809
    rowcount_only = ignore and returning == None # keep NULL rows on server
810
    if rowcount_only: into = sql_gen.Table('rowcount')
811
    
812
    cur = run_query_into(db, mk_insert_select(db, table, *args, **kw_args),
813
        into, recover=recover, cacheable=cacheable, log_level=log_level)
814
    if rowcount_only: empty_temp(db, into)
815
    autoanalyze(db, table)
816
    return cur
817

    
818
default = sql_gen.default # tells insert() to use the default value for a column
819

    
820
def insert(db, table, row, *args, **kw_args):
821
    '''For params, see insert_select()'''
822
    if lists.is_seq(row): cols = None
823
    else:
824
        cols = row.keys()
825
        row = row.values()
826
    row = list(row) # ensure that "== []" works
827
    
828
    if row == []: query = None
829
    else: query = sql_gen.Values(row).to_str(db)
830
    
831
    return insert_select(db, table, cols, query, *args, **kw_args)
832

    
833
def mk_update(db, table, changes=None, cond=None, in_place=False,
834
    cacheable_=True):
835
    '''
836
    @param changes [(col, new_value),...]
837
        * container can be any iterable type
838
        * col: sql_gen.Code|str (for col name)
839
        * new_value: sql_gen.Code|literal value
840
    @param cond sql_gen.Code WHERE condition. e.g. use sql_gen.*Cond objects.
841
    @param in_place If set, locks the table and updates rows in place.
842
        This avoids creating dead rows in PostgreSQL.
843
        * cond must be None
844
    @param cacheable_ Whether column structure information used to generate the
845
        query can be cached
846
    @return str query
847
    '''
848
    table = sql_gen.as_Table(table)
849
    changes = [(sql_gen.to_name_only_col(c, table), sql_gen.as_Value(v))
850
        for c, v in changes]
851
    
852
    if in_place:
853
        assert cond == None
854
        
855
        query = 'ALTER TABLE '+table.to_str(db)+'\n'
856
        query += ',\n'.join(('ALTER COLUMN '+c.to_str(db)+' TYPE '
857
            +db.col_info(sql_gen.with_default_table(c, table), cacheable_).type
858
            +'\nUSING '+v.to_str(db) for c, v in changes))
859
    else:
860
        query = 'UPDATE '+table.to_str(db)+'\nSET\n'
861
        query += ',\n'.join((c.to_str(db)+' = '+v.to_str(db)
862
            for c, v in changes))
863
        if cond != None: query += '\nWHERE\n'+cond.to_str(db)
864
    
865
    return query
866

    
867
def update(db, table, *args, **kw_args):
868
    '''For params, see mk_update() and run_query()'''
869
    recover = kw_args.pop('recover', None)
870
    cacheable = kw_args.pop('cacheable', False)
871
    log_level = kw_args.pop('log_level', 2)
872
    
873
    cur = run_query(db, mk_update(db, table, *args, **kw_args), recover,
874
        cacheable, log_level=log_level)
875
    autoanalyze(db, table)
876
    return cur
877

    
878
def last_insert_id(db):
879
    module = util.root_module(db.db)
880
    if module == 'psycopg2': return value(run_query(db, 'SELECT lastval()'))
881
    elif module == 'MySQLdb': return db.insert_id()
882
    else: return None
883

    
884
def mk_flatten_mapping(db, into, cols, preserve=[], as_items=False):
885
    '''Creates a mapping from original column names (which may have collisions)
886
    to names that will be distinct among the columns' tables.
887
    This is meant to be used for several tables that are being joined together.
888
    @param cols The columns to combine. Duplicates will be removed.
889
    @param into The table for the new columns.
890
    @param preserve [sql_gen.Col...] Columns not to rename. Note that these
891
        columns will be included in the mapping even if they are not in cols.
892
        The tables of the provided Col objects will be changed to into, so make
893
        copies of them if you want to keep the original tables.
894
    @param as_items Whether to return a list of dict items instead of a dict
895
    @return dict(orig_col=new_col, ...)
896
        * orig_col: sql_gen.Col(orig_col_name, orig_table)
897
        * new_col: sql_gen.Col(orig_col_name, into)
898
        * All mappings use the into table so its name can easily be
899
          changed for all columns at once
900
    '''
901
    cols = lists.uniqify(cols)
902
    
903
    items = []
904
    for col in preserve:
905
        orig_col = copy.copy(col)
906
        col.table = into
907
        items.append((orig_col, col))
908
    preserve = set(preserve)
909
    for col in cols:
910
        if col not in preserve:
911
            items.append((col, sql_gen.Col(str(col), into, col.srcs)))
912
    
913
    if not as_items: items = dict(items)
914
    return items
915

    
916
def flatten(db, into, joins, cols, limit=None, start=None, **kw_args):
917
    '''For params, see mk_flatten_mapping()
918
    @return See return value of mk_flatten_mapping()
919
    '''
920
    items = mk_flatten_mapping(db, into, cols, as_items=True, **kw_args)
921
    cols = [sql_gen.NamedCol(new.name, old) for old, new in items]
922
    run_query_into(db, mk_select(db, joins, cols, limit=limit, start=start),
923
        into=into)
924
    return dict(items)
925

    
926
##### Database structure introspection
927

    
928
#### Tables
929

    
930
def tables(db, schema_like='public', table_like='%', exact=False):
931
    if exact: compare = '='
932
    else: compare = 'LIKE'
933
    
934
    module = util.root_module(db.db)
935
    if module == 'psycopg2':
936
        conds = [('schemaname', sql_gen.CompareCond(schema_like, compare)),
937
            ('tablename', sql_gen.CompareCond(table_like, compare))]
938
        return values(select(db, 'pg_tables', ['tablename'], conds,
939
            order_by='tablename', log_level=4))
940
    elif module == 'MySQLdb':
941
        return values(run_query(db, 'SHOW TABLES LIKE '+db.esc_value(table_like)
942
            , cacheable=True, log_level=4))
943
    else: raise NotImplementedError("Can't list tables for "+module+' database')
944

    
945
def table_exists(db, table):
946
    table = sql_gen.as_Table(table)
947
    return list(tables(db, table.schema, table.name, exact=True)) != []
948

    
949
def table_row_count(db, table, recover=None):
950
    return value(run_query(db, mk_select(db, table, [sql_gen.row_count],
951
        order_by=None, start=0), recover=recover, log_level=3))
952

    
953
def table_cols(db, table, recover=None):
954
    return list(col_names(select(db, table, limit=0, order_by=None,
955
        recover=recover, log_level=4)))
956

    
957
def pkey(db, table, recover=None):
958
    '''Assumed to be first column in table'''
959
    return table_cols(db, table, recover)[0]
960

    
961
not_null_col = 'not_null_col'
962

    
963
def table_not_null_col(db, table, recover=None):
964
    '''Name assumed to be the value of not_null_col. If not found, uses pkey.'''
965
    if not_null_col in table_cols(db, table, recover): return not_null_col
966
    else: return pkey(db, table, recover)
967

    
968
def index_cols(db, table, index):
969
    '''Can also use this for UNIQUE constraints, because a UNIQUE index is
970
    automatically created. When you don't know whether something is a UNIQUE
971
    constraint or a UNIQUE index, use this function.'''
972
    module = util.root_module(db.db)
973
    if module == 'psycopg2':
974
        return list(values(run_query(db, '''\
975
SELECT attname
976
FROM
977
(
978
        SELECT attnum, attname
979
        FROM pg_index
980
        JOIN pg_class index ON index.oid = indexrelid
981
        JOIN pg_class table_ ON table_.oid = indrelid
982
        JOIN pg_attribute ON attrelid = indrelid AND attnum = ANY (indkey)
983
        WHERE
984
            table_.relname = '''+db.esc_value(table)+'''
985
            AND index.relname = '''+db.esc_value(index)+'''
986
    UNION
987
        SELECT attnum, attname
988
        FROM
989
        (
990
            SELECT
991
                indrelid
992
                , (regexp_matches(indexprs, E':varattno (\\\\d+)', 'g'))[1]::int
993
                    AS indkey
994
            FROM pg_index
995
            JOIN pg_class index ON index.oid = indexrelid
996
            JOIN pg_class table_ ON table_.oid = indrelid
997
            WHERE
998
                table_.relname = '''+db.esc_value(table)+'''
999
                AND index.relname = '''+db.esc_value(index)+'''
1000
        ) s
1001
        JOIN pg_attribute ON attrelid = indrelid AND attnum = indkey
1002
) s
1003
ORDER BY attnum
1004
'''
1005
            , cacheable=True, log_level=4)))
1006
    else: raise NotImplementedError("Can't list index columns for "+module+
1007
        ' database')
1008

    
1009
def constraint_cols(db, table, constraint):
1010
    module = util.root_module(db.db)
1011
    if module == 'psycopg2':
1012
        return list(values(run_query(db, '''\
1013
SELECT attname
1014
FROM pg_constraint
1015
JOIN pg_class ON pg_class.oid = conrelid
1016
JOIN pg_attribute ON attrelid = conrelid AND attnum = ANY (conkey)
1017
WHERE
1018
    relname = '''+db.esc_value(table)+'''
1019
    AND conname = '''+db.esc_value(constraint)+'''
1020
ORDER BY attnum
1021
'''
1022
            )))
1023
    else: raise NotImplementedError("Can't list constraint columns for "+module+
1024
        ' database')
1025

    
1026
#### Functions
1027

    
1028
def function_exists(db, function):
1029
    function = sql_gen.as_Function(function)
1030
    
1031
    info_table = sql_gen.Table('routines', 'information_schema')
1032
    conds = [('routine_name', function.name)]
1033
    schema = function.schema
1034
    if schema != None: conds.append(('routine_schema', schema))
1035
    # Exclude trigger functions, since they cannot be called directly
1036
    conds.append(('data_type', sql_gen.CompareCond('trigger', '!=')))
1037
    
1038
    return list(values(select(db, info_table, ['routine_name'], conds,
1039
        order_by='routine_schema', limit=1, log_level=4))) != []
1040
        # TODO: order_by search_path schema order
1041

    
1042
##### Structural changes
1043

    
1044
#### Columns
1045

    
1046
def add_col(db, table, col, comment=None, **kw_args):
1047
    '''
1048
    @param col TypedCol Name may be versioned, so be sure to propagate any
1049
        renaming back to any source column for the TypedCol.
1050
    @param comment None|str SQL comment used to distinguish columns of the same
1051
        name from each other when they contain different data, to allow the
1052
        ADD COLUMN query to be cached. If not set, query will not be cached.
1053
    '''
1054
    assert isinstance(col, sql_gen.TypedCol)
1055
    
1056
    while True:
1057
        str_ = 'ALTER TABLE '+table.to_str(db)+' ADD COLUMN '+col.to_str(db)
1058
        if comment != None: str_ += ' '+sql_gen.esc_comment(comment)
1059
        
1060
        try:
1061
            run_query(db, str_, recover=True, cacheable=True, **kw_args)
1062
            break
1063
        except DuplicateException:
1064
            col.name = next_version(col.name)
1065
            # try again with next version of name
1066

    
1067
def add_not_null(db, col):
1068
    table = col.table
1069
    col = sql_gen.to_name_only_col(col)
1070
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ALTER COLUMN '
1071
        +col.to_str(db)+' SET NOT NULL', cacheable=True, log_level=3)
1072

    
1073
row_num_col = '_row_num'
1074

    
1075
row_num_typed_col = sql_gen.TypedCol(row_num_col, 'serial', nullable=False,
1076
    constraints='PRIMARY KEY')
1077

    
1078
def add_row_num(db, table):
1079
    '''Adds a row number column to a table. Its name is in row_num_col. It will
1080
    be the primary key.'''
1081
    add_col(db, table, row_num_typed_col, log_level=3)
1082

    
1083
#### Indexes
1084

    
1085
def add_pkey(db, table, cols=None, recover=None):
1086
    '''Adds a primary key.
1087
    @param cols [sql_gen.Col,...] The columns in the primary key.
1088
        Defaults to the first column in the table.
1089
    @pre The table must not already have a primary key.
1090
    '''
1091
    table = sql_gen.as_Table(table)
1092
    if cols == None: cols = [pkey(db, table, recover)]
1093
    col_strs = [sql_gen.to_name_only_col(v).to_str(db) for v in cols]
1094
    
1095
    run_query(db, 'ALTER TABLE '+table.to_str(db)+' ADD PRIMARY KEY ('
1096
        +(', '.join(col_strs))+')', recover=True, cacheable=True, log_level=3,
1097
        log_ignore_excs=(DuplicateException,))
1098

    
1099
def add_index(db, exprs, table=None, unique=False, ensure_not_null_=True):
1100
    '''Adds an index on column(s) or expression(s) if it doesn't already exist.
1101
    Currently, only function calls are supported as expressions.
1102
    @param ensure_not_null_ If set, translates NULL values to sentinel values.
1103
        This allows indexes to be used for comparisons where NULLs are equal.
1104
    '''
1105
    exprs = lists.mk_seq(exprs)
1106
    
1107
    # Parse exprs
1108
    old_exprs = exprs[:]
1109
    exprs = []
1110
    cols = []
1111
    for i, expr in enumerate(old_exprs):
1112
        expr = sql_gen.as_Col(expr, table)
1113
        
1114
        # Handle nullable columns
1115
        if ensure_not_null_:
1116
            try: expr = sql_gen.ensure_not_null(db, expr)
1117
            except KeyError: pass # unknown type, so just create plain index
1118
        
1119
        # Extract col
1120
        expr = copy.deepcopy(expr) # don't modify input!
1121
        if isinstance(expr, sql_gen.FunctionCall):
1122
            col = expr.args[0]
1123
            expr = sql_gen.Expr(expr)
1124
        else: col = expr
1125
        assert isinstance(col, sql_gen.Col)
1126
        
1127
        # Extract table
1128
        if table == None:
1129
            assert sql_gen.is_table_col(col)
1130
            table = col.table
1131
        
1132
        col.table = None
1133
        
1134
        exprs.append(expr)
1135
        cols.append(col)
1136
    
1137
    table = sql_gen.as_Table(table)
1138
    
1139
    # Add index
1140
    str_ = 'CREATE'
1141
    if unique: str_ += ' UNIQUE'
1142
    str_ += ' INDEX ON '+table.to_str(db)+' ('+(
1143
        ', '.join((v.to_str(db) for v in exprs)))+')'
1144
    run_query(db, str_, recover=True, cacheable=True, log_level=3)
1145

    
1146
already_indexed = object() # tells add_indexes() the pkey has already been added
1147

    
1148
def add_indexes(db, table, has_pkey=True):
1149
    '''Adds an index on all columns in a table.
1150
    @param has_pkey bool|already_indexed Whether a pkey instead of a regular
1151
        index should be added on the first column.
1152
        * If already_indexed, the pkey is assumed to have already been added
1153
    '''
1154
    cols = table_cols(db, table)
1155
    if has_pkey:
1156
        if has_pkey is not already_indexed: add_pkey(db, table)
1157
        cols = cols[1:]
1158
    for col in cols: add_index(db, col, table)
1159

    
1160
#### Tables
1161

    
1162
### Maintenance
1163

    
1164
def analyze(db, table):
1165
    table = sql_gen.as_Table(table)
1166
    run_query(db, 'ANALYZE '+table.to_str(db), log_level=3)
1167

    
1168
def autoanalyze(db, table):
1169
    if db.autoanalyze: analyze(db, table)
1170

    
1171
def vacuum(db, table):
1172
    table = sql_gen.as_Table(table)
1173
    db.with_autocommit(lambda: run_query(db, 'VACUUM ANALYZE '+table.to_str(db),
1174
        log_level=3))
1175

    
1176
### Lifecycle
1177

    
1178
def drop_table(db, table):
1179
    table = sql_gen.as_Table(table)
1180
    return run_query(db, 'DROP TABLE IF EXISTS '+table.to_str(db)+' CASCADE')
1181

    
1182
def create_table(db, table, cols=[], has_pkey=True, col_indexes=True,
1183
    like=None):
1184
    '''Creates a table.
1185
    @param cols [sql_gen.TypedCol,...] The column names and types
1186
    @param has_pkey If set, the first column becomes the primary key.
1187
    @param col_indexes bool|[ref]
1188
        * If True, indexes will be added on all non-pkey columns.
1189
        * If a list reference, [0] will be set to a function to do this.
1190
          This can be used to delay index creation until the table is populated.
1191
    '''
1192
    table = sql_gen.as_Table(table)
1193
    
1194
    if like != None:
1195
        cols = [sql_gen.CustomCode('LIKE '+like.to_str(db)+' INCLUDING ALL')
1196
            ]+cols
1197
    if has_pkey:
1198
        cols[0] = pkey = copy.copy(cols[0]) # don't modify input!
1199
        pkey.constraints = 'PRIMARY KEY'
1200
    
1201
    temp = table.is_temp and not db.debug_temp
1202
        # temp tables permanent in debug_temp mode
1203
    
1204
    # Create table
1205
    while True:
1206
        str_ = 'CREATE'
1207
        if temp: str_ += ' TEMP'
1208
        str_ += ' TABLE '+table.to_str(db)+' (\n'
1209
        str_ += '\n, '.join(c.to_str(db) for c in cols)
1210
        str_ += '\n);'
1211
        
1212
        try:
1213
            run_query(db, str_, recover=True, cacheable=True, log_level=2,
1214
                log_ignore_excs=(DuplicateException,))
1215
            break
1216
        except DuplicateException:
1217
            table.name = next_version(table.name)
1218
            # try again with next version of name
1219
    
1220
    # Add indexes
1221
    if has_pkey: has_pkey = already_indexed
1222
    def add_indexes_(): add_indexes(db, table, has_pkey)
1223
    if isinstance(col_indexes, list): col_indexes[0] = add_indexes_ # defer
1224
    elif col_indexes: add_indexes_() # add now
1225

    
1226
def copy_table_struct(db, src, dest):
1227
    '''Creates a structure-only copy of a table. (Does not copy data.)'''
1228
    create_table(db, dest, has_pkey=False, col_indexes=False, like=src)
1229

    
1230
### Data
1231

    
1232
def truncate(db, table, schema='public', **kw_args):
1233
    '''For params, see run_query()'''
1234
    table = sql_gen.as_Table(table, schema)
1235
    return run_query(db, 'TRUNCATE '+table.to_str(db)+' CASCADE', **kw_args)
1236

    
1237
def empty_temp(db, tables):
1238
    tables = lists.mk_seq(tables)
1239
    for table in tables: truncate(db, table, log_level=3)
1240

    
1241
def empty_db(db, schema='public', **kw_args):
1242
    '''For kw_args, see tables()'''
1243
    for table in tables(db, schema, **kw_args): truncate(db, table, schema)
1244

    
1245
def distinct_table(db, table, distinct_on):
1246
    '''Creates a copy of a temp table which is distinct on the given columns.
1247
    The old and new tables will both get an index on these columns, to
1248
    facilitate merge joins.
1249
    @param distinct_on If empty, creates a table with one row. This is useful if
1250
        your distinct_on columns are all literal values.
1251
    @return The new table.
1252
    '''
1253
    new_table = sql_gen.suffixed_table(table, '_distinct')
1254
    
1255
    copy_table_struct(db, table, new_table)
1256
    
1257
    limit = None
1258
    if distinct_on == []: limit = 1 # one sample row
1259
    else:
1260
        add_index(db, distinct_on, new_table, unique=True)
1261
        add_index(db, distinct_on, table) # for join optimization
1262
    
1263
    insert_select(db, new_table, None, mk_select(db, table, start=0,
1264
        limit=limit), ignore=True)
1265
    analyze(db, new_table)
1266
    
1267
    return new_table
(24-24/37)