# CSV I/O

import csv
import _csv
import StringIO

import exc
import streams
import strings
import util

delims = ',;\t|`'
tab_padded_delims = ['\t|\t']
tsv_delim = '\t'
escape = '\\'

ending_placeholder = r'\n'

def is_tsv(dialect): return dialect.delimiter.startswith(tsv_delim)

def sniff(line):
    '''Automatically detects the dialect'''
    line, ending = strings.extract_line_ending(line)
    try: dialect = csv.Sniffer().sniff(line, delims)
    except _csv.Error, e:
        if exc.e_msg(e) == 'Could not determine delimiter': dialect = csv.excel
        else: raise
    
    if is_tsv(dialect):
        dialect.quoting = csv.QUOTE_NONE
        # Check multi-char delims using \t
        delim = strings.find_any(line, tab_padded_delims)
        if delim:
            dialect.delimiter = delim
            line_suffix = delim.rstrip('\t')
            if line.endswith(line_suffix): ending = line_suffix+ending
    else: dialect.doublequote = True # Sniffer doesn't turn this on by default
    dialect.lineterminator = ending
    
    return dialect

def has_unbalanced_quotes(str_): return str_.count('"') % 2 == 1 # odd # of "

def has_multiline_column(str_): return has_unbalanced_quotes(str_)

def stream_info(stream, parse_header=False):
    '''Automatically detects the dialect based on the header line.
    Uses the Excel dialect if the CSV file is empty.
    @return NamedTuple {header_line, header, dialect}'''
    info = util.NamedTuple()
    info.header_line = stream.readline()
    if has_multiline_column(info.header_line): # 1st line not full header
        # assume it's a header-only csv with multiline columns
        info.header_line += ''.join(stream.readlines()) # use entire file
    info.header = None
    if info.header_line != '':
        info.dialect = sniff(info.header_line)
    else: info.dialect = csv.excel # line of '' indicates EOF = empty stream
    
    if parse_header:
        try: info.header = reader_class(info.dialect)(
            StringIO.StringIO(info.header_line), info.dialect).next()
        except StopIteration: info.header = []
    
    return info

tsv_encode_map = strings.json_encode_map[:]
tsv_encode_map.append(('\t', r'\t'))
tsv_decode_map = strings.flip_map(tsv_encode_map)

class TsvReader:
    '''Unlike csv.reader, for TSVs, interprets \ as escaping a line ending but
    ignores it before everything else (e.g. \N for NULL).
    Also expands tsv_encode_map escapes.
    '''
    def __init__(self, stream, dialect):
        assert is_tsv(dialect)
        self.stream = stream
        self.dialect = dialect
    
    def __iter__(self): return self
    
    def next(self):
        record = ''
        ending = None
        while True:
            line = self.stream.readline()
            if line == '': raise StopIteration
            
            line = strings.remove_suffix(self.dialect.lineterminator, line)
            contents = strings.remove_suffix(escape, line)
            record += contents
            if len(contents) == len(line): break # no line continuation
            record += ending_placeholder
        
        # Prevent "new-line character seen in unquoted field" errors
        record = record.replace('\r', ending_placeholder)
        
        # Split line
        if record == '': row = [] # csv.reader would interpret as EOF
        elif len(self.dialect.delimiter) > 1: # multi-char delims
            row = record.split(self.dialect.delimiter)
        else: row = csv.reader(StringIO.StringIO(record), self.dialect).next()
        
        return [strings.replace_all(tsv_decode_map, v) for v in row]

def reader_class(dialect):
    if is_tsv(dialect): return TsvReader
    else: return csv.reader

def make_reader(stream, dialect): return reader_class(dialect)(stream, dialect)

def reader_and_header(stream):
    '''Automatically detects the dialect based on the header line
    @return tuple (reader, header)'''
    info = stream_info(stream, parse_header=True)
    return (make_reader(stream, info.dialect), info.header)

##### csv modifications

# Note that these methods only work on *instances* of Dialect classes
csv.Dialect.__eq__ = lambda self, other: self.__dict__ == other.__dict__
csv.Dialect.__ne__ = lambda self, other: not (self == other)

__Dialect__validate_orig = csv.Dialect._validate
def __Dialect__validate(self):
        try: __Dialect__validate_orig(self)
        except _csv.Error, e:
            if str(e) == '"delimiter" must be an 1-character string': pass # OK
            else: raise
csv.Dialect._validate = __Dialect__validate

##### Row filters

class Filter:
    '''Wraps a reader, filtering each row'''
    def __init__(self, filter_, reader):
        self.reader = reader
        self.filter = filter_
    
    def __iter__(self): return self
    
    def next(self): return self.filter(self.reader.next())
    
    def close(self): pass # support using as a stream

std_nulls = [r'\N']
empty_nulls = [''] + std_nulls

class NullFilter(Filter):
    '''Translates special string values to None'''
    def __init__(self, reader, nulls=std_nulls):
        map_ = dict.fromkeys(nulls, None)
        def filter_(row): return [map_.get(v, v) for v in row]
        Filter.__init__(self, filter_, reader)

class StripFilter(Filter):
    '''Strips whitespace'''
    def __init__(self, reader):
        def filter_(row): return [v.strip() for v in row]
        Filter.__init__(self, filter_, reader)

class ColCtFilter(Filter):
    '''Gives all rows the same # columns'''
    def __init__(self, reader, cols_ct):
        def filter_(row): return util.list_as_length(row, cols_ct)
        Filter.__init__(self, filter_, reader)

##### Translators

class StreamFilter(Filter):
    '''Wraps a reader in a way that's usable to a filter stream that does not
    require lines to be strings. Reports EOF as '' instead of StopIteration.'''
    def __init__(self, reader):
        Filter.__init__(self, None, reader)
    
    def readline(self):
        try: return self.reader.next()
        except StopIteration: return '' # EOF

class ColInsertFilter(Filter):
    '''Adds column(s) to each row
    @param mk_value(row, row_num) | literal_value
    '''
    def __init__(self, reader, mk_value, index=0, n=1):
        if not callable(mk_value):
            value = mk_value
            def mk_value(row, row_num): return value
        
        def filter_(row):
            row = list(row) # make sure it's mutable; don't modify input!
            for i in xrange(n):
                row.insert(index+i, mk_value(row, self.reader.line_num))
            return row
        Filter.__init__(self, filter_,
            streams.LineCountInputStream(StreamFilter(reader)))

class RowNumFilter(ColInsertFilter):
    '''Adds a row # column at the beginning of each row'''
    def __init__(self, reader):
        def mk_value(row, row_num): return row_num
        ColInsertFilter.__init__(self, reader, mk_value, 0)

class InputRewriter(StreamFilter):
    '''Wraps a reader, writing each row back to CSV'''
    def __init__(self, reader, dialect=csv.excel):
        StreamFilter.__init__(self, reader)
        
        self.dialect = dialect
    
    def readline(self):
        try:
            row = self.reader.readline()
            if row == '': return row # EOF
            
            line_stream = StringIO.StringIO()
            csv.writer(line_stream, self.dialect).writerow(row)
            return line_stream.getvalue()
        except Exception, e:
            exc.print_ex(e)
            raise
    
    def read(self, n): return self.readline() # forward all reads to readline()
