1
|
# CSV I/O
|
2
|
|
3
|
import csv
|
4
|
import _csv
|
5
|
import StringIO
|
6
|
|
7
|
import streams
|
8
|
import strings
|
9
|
import util
|
10
|
|
11
|
delims = ',;\t|`'
|
12
|
tab_padded_delims = ['\t|\t']
|
13
|
tsv_delim = '\t'
|
14
|
escape = '\\'
|
15
|
|
16
|
ending_placeholder = r'\n'
|
17
|
|
18
|
def is_tsv(dialect): return dialect.delimiter.startswith(tsv_delim)
|
19
|
|
20
|
def sniff(line):
|
21
|
'''Automatically detects the dialect'''
|
22
|
line, ending = strings.extract_line_ending(line)
|
23
|
dialect = csv.Sniffer().sniff(line, delims)
|
24
|
|
25
|
if is_tsv(dialect):
|
26
|
# TSVs usually don't quote fields (nor doublequote embedded quotes)
|
27
|
dialect.quoting = csv.QUOTE_NONE
|
28
|
|
29
|
# Check multi-char delims using \t
|
30
|
delim = strings.find_any(line, tab_padded_delims)
|
31
|
if delim:
|
32
|
dialect.delimiter = delim
|
33
|
line_suffix = delim.rstrip('\t')
|
34
|
if line.endswith(line_suffix): ending = line_suffix+ending
|
35
|
else: dialect.doublequote = True # Sniffer doesn't turn this on by default
|
36
|
dialect.lineterminator = ending
|
37
|
|
38
|
return dialect
|
39
|
|
40
|
def stream_info(stream, parse_header=False):
|
41
|
'''Automatically detects the dialect based on the header line.
|
42
|
Uses the Excel dialect if the CSV file is empty.
|
43
|
@return NamedTuple {header_line, header, dialect}'''
|
44
|
info = util.NamedTuple()
|
45
|
info.header_line = stream.readline()
|
46
|
info.header = None
|
47
|
if info.header_line != '':
|
48
|
info.dialect = sniff(info.header_line)
|
49
|
else: info.dialect = csv.excel # line of '' indicates EOF = empty stream
|
50
|
|
51
|
if parse_header:
|
52
|
try: info.header = reader_class(info.dialect)(
|
53
|
StringIO.StringIO(info.header_line), info.dialect).next()
|
54
|
except StopIteration: info.header = []
|
55
|
|
56
|
return info
|
57
|
|
58
|
tsv_encode_map = strings.json_encode_map[:]
|
59
|
tsv_encode_map.append(('\t', r'\t'))
|
60
|
tsv_decode_map = strings.flip_map(tsv_encode_map)
|
61
|
|
62
|
class TsvReader:
|
63
|
'''Unlike csv.reader, for TSVs, interprets \ as escaping a line ending but
|
64
|
ignores it before everything else (e.g. \N for NULL).
|
65
|
Also expands tsv_encode_map escapes.
|
66
|
'''
|
67
|
def __init__(self, stream, dialect):
|
68
|
assert is_tsv(dialect)
|
69
|
self.stream = stream
|
70
|
self.dialect = dialect
|
71
|
|
72
|
def __iter__(self): return self
|
73
|
|
74
|
def next(self):
|
75
|
record = ''
|
76
|
ending = None
|
77
|
while True:
|
78
|
line = self.stream.readline()
|
79
|
if line == '': raise StopIteration
|
80
|
|
81
|
line = strings.remove_suffix(self.dialect.lineterminator, line)
|
82
|
contents = strings.remove_suffix(escape, line)
|
83
|
record += contents
|
84
|
if len(contents) == len(line): break # no line continuation
|
85
|
record += ending_placeholder
|
86
|
|
87
|
# Prevent "new-line character seen in unquoted field" errors
|
88
|
record = record.replace('\r', ending_placeholder)
|
89
|
|
90
|
# Split line
|
91
|
if len(self.dialect.delimiter) > 1: # multi-char delims
|
92
|
row = record.split(self.dialect.delimiter)
|
93
|
else: row = csv.reader(StringIO.StringIO(record), self.dialect).next()
|
94
|
|
95
|
return [strings.replace_all(tsv_decode_map, v) for v in row]
|
96
|
|
97
|
def reader_class(dialect):
|
98
|
if is_tsv(dialect): return TsvReader
|
99
|
else: return csv.reader
|
100
|
|
101
|
def make_reader(stream, dialect): return reader_class(dialect)(stream, dialect)
|
102
|
|
103
|
def reader_and_header(stream):
|
104
|
'''Automatically detects the dialect based on the header line
|
105
|
@return tuple (reader, header)'''
|
106
|
info = stream_info(stream, parse_header=True)
|
107
|
return (make_reader(stream, info.dialect), info.header)
|
108
|
|
109
|
##### csv modifications
|
110
|
|
111
|
# Note that these methods only work on *instances* of Dialect classes
|
112
|
csv.Dialect.__eq__ = lambda self, other: self.__dict__ == other.__dict__
|
113
|
csv.Dialect.__ne__ = lambda self, other: not (self == other)
|
114
|
|
115
|
__Dialect__validate_orig = csv.Dialect._validate
|
116
|
def __Dialect__validate(self):
|
117
|
try: __Dialect__validate_orig(self)
|
118
|
except _csv.Error, e:
|
119
|
if str(e) == '"delimiter" must be an 1-character string': pass # OK
|
120
|
else: raise
|
121
|
csv.Dialect._validate = __Dialect__validate
|
122
|
|
123
|
##### Row filters
|
124
|
|
125
|
class Filter:
|
126
|
'''Wraps a reader, filtering each row'''
|
127
|
def __init__(self, filter_, reader):
|
128
|
self.reader = reader
|
129
|
self.filter = filter_
|
130
|
|
131
|
def __iter__(self): return self
|
132
|
|
133
|
def next(self): return self.filter(self.reader.next())
|
134
|
|
135
|
def close(self): pass # support using as a stream
|
136
|
|
137
|
std_nulls = [r'\N']
|
138
|
empty_nulls = [''] + std_nulls
|
139
|
|
140
|
class NullFilter(Filter):
|
141
|
'''Translates special string values to None'''
|
142
|
def __init__(self, reader, nulls=std_nulls):
|
143
|
map_ = dict.fromkeys(nulls, None)
|
144
|
def filter_(row): return [map_.get(v, v) for v in row]
|
145
|
Filter.__init__(self, filter_, reader)
|
146
|
|
147
|
class StripFilter(Filter):
|
148
|
'''Strips whitespace'''
|
149
|
def __init__(self, reader):
|
150
|
def filter_(row): return [v.strip() for v in row]
|
151
|
Filter.__init__(self, filter_, reader)
|
152
|
|
153
|
class ColCtFilter(Filter):
|
154
|
'''Gives all rows the same # columns'''
|
155
|
def __init__(self, reader, cols_ct):
|
156
|
def filter_(row): return util.list_as_length(row, cols_ct)
|
157
|
Filter.__init__(self, filter_, reader)
|
158
|
|
159
|
##### Translators
|
160
|
|
161
|
class StreamFilter(Filter):
|
162
|
'''Wraps a reader in a way that's usable to a filter stream that does not
|
163
|
require lines to be strings. Reports EOF as '' instead of StopIteration.'''
|
164
|
def __init__(self, reader):
|
165
|
Filter.__init__(self, None, reader)
|
166
|
|
167
|
def readline(self):
|
168
|
try: return self.reader.next()
|
169
|
except StopIteration: return '' # EOF
|
170
|
|
171
|
class ColInsertFilter(Filter):
|
172
|
'''Adds a column to each row
|
173
|
@param mk_value(row, row_num)
|
174
|
'''
|
175
|
def __init__(self, reader, mk_value, index=0):
|
176
|
def filter_(row):
|
177
|
row = list(row) # make sure it's mutable; don't modify input!
|
178
|
row.insert(index, mk_value(row, self.reader.line_num))
|
179
|
return row
|
180
|
Filter.__init__(self, filter_,
|
181
|
streams.LineCountInputStream(StreamFilter(reader)))
|
182
|
|
183
|
class RowNumFilter(ColInsertFilter):
|
184
|
'''Adds a row # column at the beginning of each row'''
|
185
|
def __init__(self, reader):
|
186
|
def mk_value(row, row_num): return row_num
|
187
|
ColInsertFilter.__init__(self, reader, mk_value, 0)
|
188
|
|
189
|
class InputRewriter(StreamFilter):
|
190
|
'''Wraps a reader, writing each row back to CSV'''
|
191
|
def __init__(self, reader, dialect=csv.excel):
|
192
|
StreamFilter.__init__(self, reader)
|
193
|
|
194
|
self.dialect = dialect
|
195
|
|
196
|
def readline(self):
|
197
|
row = self.reader.readline()
|
198
|
if row == '': return row # EOF
|
199
|
|
200
|
line_stream = StringIO.StringIO()
|
201
|
csv.writer(line_stream, self.dialect).writerow(row)
|
202
|
return line_stream.getvalue()
|
203
|
|
204
|
def read(self, n): return self.readline() # forward all reads to readline()
|