Project

General

Profile

1
#!/usr/bin/env python
2
# Maps one datasource to another, using a map spreadsheet if needed
3
# Exit status is the # of errors in the import, up to the maximum exit status
4
# For outputting an XML file to a PostgreSQL database, use the general format of
5
# http://vegbank.org/vegdocs/xml/vegbank_example_ver1.0.2.xml
6

    
7
import csv
8
import itertools
9
import os.path
10
import sys
11
import xml.dom.minidom as minidom
12

    
13
sys.path.append(os.path.dirname(__file__)+"/../lib")
14

    
15
import csvs
16
import exc
17
import iters
18
import maps
19
import opts
20
import parallel
21
import Parser
22
import profiling
23
import sql
24
import streams
25
import strings
26
import term
27
import util
28
import xpath
29
import xml_dom
30
import xml_func
31
import xml_parse
32

    
33
def get_with_prefix(map_, prefixes, key):
34
    '''Gets all entries for the given key with any of the given prefixes'''
35
    values = []
36
    for key_ in strings.with_prefixes(['']+prefixes, key): # also with no prefix
37
        try: value = map_[key_]
38
        except KeyError, e: continue # keep going
39
        values.append(value)
40
    
41
    if values != []: return values
42
    else: raise e # re-raise last KeyError
43

    
44
def metadata_value(name): return None # this feature has been removed
45

    
46
def cleanup(val):
47
    if val == None: return val
48
    return util.none_if(strings.cleanup(strings.ustr(val)), u'', u'\\N')
49

    
50
def main_():
51
    env_names = []
52
    def usage_err():
53
        raise SystemExit('Usage: '+opts.env_usage(env_names, True)+' '
54
            +sys.argv[0]+' [map_path...] [<input] [>output]\n'
55
            'Note: Row #s start with 1')
56
    
57
    ## Get config from env vars
58
    
59
    # Modes
60
    test = opts.env_flag('test', False, env_names)
61
    commit = opts.env_flag('commit', False, env_names) and not test
62
        # never commit in test mode
63
    redo = opts.env_flag('redo', test, env_names) and not commit
64
        # never redo in commit mode (manually run `make empty_db` instead)
65
    
66
    # Ranges
67
    start = util.cast(int, opts.get_env_var('start', 1, env_names)) # 1-based
68
    # Make start interally 0-based.
69
    # It's 1-based to the user to match up with the staging table row #s.
70
    start -= 1
71
    if test: n_default = 1
72
    else: n_default = None
73
    n = util.cast(int, util.none_if(opts.get_env_var('n', n_default, env_names),
74
        u''))
75
    end = n
76
    if end != None: end += start
77
    
78
    # Debugging
79
    debug = opts.env_flag('debug', False, env_names)
80
    sql.run_raw_query.debug = debug
81
    verbose = debug or opts.env_flag('verbose', not test, env_names)
82
    opts.get_env_var('profile_to', None, env_names) # add to env_names
83
    
84
    # DB
85
    def get_db_config(prefix):
86
        return opts.get_env_vars(sql.db_config_names, prefix, env_names)
87
    in_db_config = get_db_config('in')
88
    out_db_config = get_db_config('out')
89
    in_is_db = 'engine' in in_db_config
90
    out_is_db = 'engine' in out_db_config
91
    in_schema = opts.get_env_var('in_schema', None, env_names)
92
    in_table = opts.get_env_var('in_table', None, env_names)
93
    
94
    # Optimization
95
    by_col = in_db_config == out_db_config and opts.env_flag('by_col', False,
96
        env_names) # by-column optimization only applies if mapping to same DB
97
    if test: cpus_default = 0
98
    else: cpus_default = None
99
    cpus = util.cast(int, util.none_if(opts.get_env_var('cpus', cpus_default,
100
        env_names), u''))
101
    
102
    ##
103
    
104
    # Logging
105
    def log(msg, on=verbose):
106
        if on: sys.stderr.write(msg+'\n')
107
    if debug: log_debug = lambda msg: log(msg, debug)
108
    else: log_debug = sql.log_debug_none
109
    
110
    # Parse args
111
    map_paths = sys.argv[1:]
112
    if map_paths == []:
113
        if in_is_db or not out_is_db: usage_err()
114
        else: map_paths = [None]
115
    
116
    def connect_db(db_config):
117
        log('Connecting to '+sql.db_config_str(db_config))
118
        return sql.connect(db_config, log_debug=log_debug)
119
    
120
    if end != None: end_str = str(end-1) # end is one past the last #
121
    else: end_str = 'end'
122
    log('Processing input rows '+str(start)+'-'+end_str)
123
    
124
    ex_tracker = exc.ExPercentTracker(iter_text='row')
125
    profiler = profiling.ItersProfiler(start_now=True, iter_text='row')
126
    
127
    # Parallel processing
128
    pool = parallel.MultiProducerPool(cpus)
129
    log('Using '+str(pool.process_ct)+' parallel CPUs')
130
    
131
    doc = xml_dom.create_doc()
132
    root = doc.documentElement
133
    out_is_xml_ref = [False]
134
    in_label_ref = [None]
135
    def update_in_label():
136
        if in_label_ref[0] != None:
137
            xpath.get(root, '/_ignore/inLabel="'+in_label_ref[0]+'"', True)
138
    def prep_root():
139
        root.clear()
140
        update_in_label()
141
    prep_root()
142
    
143
    # Define before the out_is_db section because it's used by by_col
144
    row_ins_ct_ref = [0]
145
    
146
    def process_input(root, row_ready, map_path):
147
        '''Inputs datasource to XML tree, mapping if needed'''
148
        # Load map header
149
        in_is_xpaths = True
150
        out_is_xpaths = True
151
        out_label = None
152
        if map_path != None:
153
            metadata = []
154
            mappings = []
155
            stream = open(map_path, 'rb')
156
            reader = csv.reader(stream)
157
            in_label, out_label = reader.next()[:2]
158
            
159
            def split_col_name(name):
160
                label, sep, root = name.partition(':')
161
                label, sep2, prefixes_str = label.partition('[')
162
                prefixes_str = strings.remove_suffix(']', prefixes_str)
163
                prefixes = strings.split(',', prefixes_str)
164
                return label, sep != '', root, prefixes
165
                    # extract datasrc from "datasrc[data_format]"
166
            
167
            in_label, in_root, prefixes = maps.col_info(in_label)
168
            in_is_xpaths = in_root != None
169
            in_label_ref[0] = in_label
170
            update_in_label()
171
            out_label, out_root = maps.col_info(out_label)[:2]
172
            out_is_xpaths = out_root != None
173
            if out_is_xpaths: has_types = out_root.find('/*s/') >= 0
174
                # outer elements are types
175
            
176
            for row in reader:
177
                in_, out = row[:2]
178
                if out != '':
179
                    if out_is_xpaths: out = xpath.parse(out_root+out)
180
                    mappings.append((in_, out))
181
            
182
            stream.close()
183
            
184
            root.ownerDocument.documentElement.tagName = out_label
185
        in_is_xml = in_is_xpaths and not in_is_db
186
        out_is_xml_ref[0] = out_is_xpaths and not out_is_db
187
        
188
        def process_rows(process_row, rows, rows_start=0):
189
            '''Processes input rows      
190
            @param process_row(in_row, i)
191
            @rows_start The (0-based) row # of the first row in rows. Set this
192
                only if the pre-start rows have already been skipped.
193
            '''
194
            rows = iter(rows)
195
            
196
            if end != None: row_nums = xrange(rows_start, end)
197
            else: row_nums = itertools.count(rows_start)
198
            for i in row_nums:
199
                try: row = rows.next()
200
                except StopIteration: break # no more rows
201
                if i < start: continue # not at start row yet
202
                
203
                process_row(row, i)
204
                row_ready(i, row)
205
            row_ct = i-start
206
            return row_ct
207
        
208
        def map_rows(get_value, rows, **kw_args):
209
            '''Maps input rows
210
            @param get_value(in_, row):str
211
            '''
212
            id_node = None
213
            if out_is_db:
214
                for i, mapping in enumerate(mappings):
215
                    in_, out = mapping
216
                    # All put_obj()s should return the same id_node
217
                    nodes, id_node = xpath.put_obj(root, out, '-1', has_types,
218
                        '$'+str(in_)) # value is placeholder that documents name
219
                #assert id_node != None
220
                
221
                if debug: # only str() if debug
222
                    log_debug('Put template:\n'+str(root))
223
                prep_root()
224
            
225
            def process_row(row, i):
226
                row_id = str(i)
227
                for in_, out in mappings:
228
                    value = metadata_value(in_)
229
                    if value == None:
230
                        log_debug('Getting '+str(in_))
231
                        value = cleanup(get_value(in_, row))
232
                    if out_is_db or value != None:
233
                        log_debug('Putting '+str(out))
234
                        xpath.put_obj(root, out, row_id, has_types, value)
235
            return process_rows(process_row, rows, **kw_args)
236
        
237
        def map_table(col_names, rows, **kw_args):
238
            col_names_ct = len(col_names)
239
            col_idxs = util.list_flip(col_names)
240
            
241
            mappings_orig = mappings[:] # save a copy
242
            mappings[:] = [] # empty existing elements
243
            for in_, out in mappings_orig:
244
                if metadata_value(in_) == None:
245
                    try: names = get_with_prefix(col_idxs, prefixes, in_)
246
                    except KeyError: pass
247
                    else: mappings.append((names, out))
248
            
249
            def get_value(in_, row):
250
                return util.coalesce(*util.list_subset(row.list, in_))
251
            def wrap_row(row):
252
                return util.ListDict(util.list_as_length(row, col_names_ct),
253
                    col_names, col_idxs) # handle CSV rows of different lengths
254
            
255
            return map_rows(get_value, util.WrapIter(wrap_row, rows), **kw_args)
256
        
257
        stdin = streams.LineCountStream(sys.stdin)
258
        def on_error(e):
259
            exc.add_msg(e, term.emph('input line #:')+' '+str(stdin.line_num))
260
            ex_tracker.track(e)
261
        
262
        if in_is_db:
263
            in_db = connect_db(in_db_config)
264
            
265
            # Get table and schema name
266
            schema = in_schema # modified, so can't have same name as outer var
267
            table = in_table # modified, so can't have same name as outer var
268
            if table == None:
269
                assert in_is_xpaths
270
                schema, sep, table = in_root.partition('.')
271
                if sep == '': # only the table name was specified
272
                    table = schema
273
                    schema = None
274
            table_is_esc = False
275
            if schema != None:
276
                table = sql.qual_name(in_db, schema, table)
277
                table_is_esc = True
278
            
279
            # Fetch rows
280
            if by_col: limit = 0 # only fetch column names
281
            else: limit = n
282
            cur = sql.select(in_db, table, limit=limit, start=start,
283
                table_is_esc=table_is_esc)
284
            col_names = list(sql.col_names(cur))
285
            
286
            if by_col:
287
                row_ready = lambda row_num, input_row: None# disable row_ready()
288
                row = ['$'+v for v in col_names] # values are the column names
289
                map_table(col_names, [row]) # map just the sample row
290
                xml_func.strip(root)
291
                db_xml.put_table(in_db, root.firstChild, table, commit,
292
                    row_ins_ct_ref, table_is_esc)
293
            else:
294
                # Use normal by-row method
295
                row_ct = map_table(col_names, sql.rows(cur), rows_start=start)
296
                    # rows_start: pre-start rows have been skipped
297
            
298
            in_db.db.close()
299
        elif in_is_xml:
300
            def get_rows(doc2rows):
301
                return iters.flatten(itertools.imap(doc2rows,
302
                    xml_parse.docs_iter(stdin, on_error)))
303
            
304
            if map_path == None:
305
                def doc2rows(in_xml_root):
306
                    iter_ = xml_dom.NodeElemIter(in_xml_root)
307
                    util.skip(iter_, xml_dom.is_text) # skip metadata
308
                    return iter_
309
                
310
                row_ct = process_rows(lambda row, i: root.appendChild(row),
311
                    get_rows(doc2rows))
312
            else:
313
                def doc2rows(in_xml_root):
314
                    rows = xpath.get(in_xml_root, in_root, limit=end)
315
                    if rows == []: raise SystemExit('Map error: Root "'
316
                        +in_root+'" not found in input')
317
                    return rows
318
                
319
                def get_value(in_, row):
320
                    in_ = './{'+(','.join(strings.with_prefixes(
321
                        ['']+prefixes, in_)))+'}' # also with no prefix
322
                    nodes = xpath.get(row, in_, allow_rooted=False)
323
                    if nodes != []: return xml_dom.value(nodes[0])
324
                    else: return None
325
                
326
                row_ct = map_rows(get_value, get_rows(doc2rows))
327
        else: # input is CSV
328
            map_ = dict(mappings)
329
            reader, col_names = csvs.reader_and_header(sys.stdin)
330
            row_ct = map_table(col_names, reader)
331
        
332
        return row_ct
333
    
334
    def process_inputs(root, row_ready):
335
        row_ct = 0
336
        for map_path in map_paths:
337
            row_ct += process_input(root, row_ready, map_path)
338
        return row_ct
339
    
340
    pool.share_vars(locals())
341
    if out_is_db:
342
        import db_xml
343
        
344
        out_db = connect_db(out_db_config)
345
        try:
346
            if redo: sql.empty_db(out_db)
347
            pool.share_vars(locals())
348
            
349
            def row_ready(row_num, input_row):
350
                def on_error(e):
351
                    exc.add_msg(e, term.emph('row #:')+' '+str(row_num+1))
352
                        # row # is interally 0-based, but 1-based to the user
353
                    exc.add_msg(e, term.emph('input row:')+'\n'+str(input_row))
354
                    exc.add_msg(e, term.emph('output row:')+'\n'+str(root))
355
                    ex_tracker.track(e, row_num)
356
                pool.share_vars(locals())
357
                
358
                xml_func.process(root, on_error)
359
                if not xml_dom.is_empty(root):
360
                    assert xml_dom.has_one_child(root)
361
                    try:
362
                        sql.with_savepoint(out_db,
363
                            lambda: db_xml.put(out_db, root.firstChild,
364
                                row_ins_ct_ref, on_error))
365
                        if commit: out_db.db.commit()
366
                    except sql.DatabaseErrors, e: on_error(e)
367
                prep_root()
368
            
369
            row_ct = process_inputs(root, row_ready)
370
            sys.stdout.write('Inserted '+str(row_ins_ct_ref[0])+
371
                ' new rows into database\n')
372
            
373
            # Consume asynchronous tasks
374
            pool.main_loop()
375
        finally:
376
            out_db.db.rollback()
377
            out_db.db.close()
378
    else:
379
        def on_error(e): ex_tracker.track(e)
380
        def row_ready(row_num, input_row): pass
381
        row_ct = process_inputs(root, row_ready)
382
        xml_func.process(root, on_error)
383
        if out_is_xml_ref[0]:
384
            doc.writexml(sys.stdout, **xml_dom.prettyxml_config)
385
        else: # output is CSV
386
            raise NotImplementedError('CSV output not supported yet')
387
    
388
    # Consume any asynchronous tasks not already consumed above
389
    pool.main_loop()
390
    
391
    profiler.stop(row_ct)
392
    ex_tracker.add_iters(row_ct)
393
    if verbose:
394
        sys.stderr.write('Processed '+str(row_ct)+' input rows\n')
395
        sys.stderr.write(profiler.msg()+'\n')
396
        sys.stderr.write(ex_tracker.msg()+'\n')
397
    ex_tracker.exit()
398

    
399
def main():
400
    try: main_()
401
    except Parser.SyntaxError, e: raise SystemExit(str(e))
402

    
403
if __name__ == '__main__':
404
    profile_to = opts.get_env_var('profile_to', None)
405
    if profile_to != None:
406
        import cProfile
407
        sys.stderr.write('Profiling to '+profile_to+'\n')
408
        cProfile.run(main.func_code, profile_to)
409
    else: main()
(25-25/47)