Project

General

Profile

1
# TNRS
2

    
3
import re
4
import sys
5
import time
6
import urllib2
7

    
8
import streams
9

    
10
# Config
11
initial_pause = 0.35 # sec
12
pause_growth_factor = 1.3
13
max_pause = 600 # sec; = 10 min
14
assert initial_pause <= max_pause
15
max_taxons = 5000# according to http://tnrs.iplantcollaborative.org/TNRSapp.html
16

    
17
# Protocol params
18
url_base = 'http://tnrs.iplantcollaborative.org/tnrsdemo/'
19
url = url_base+'search'
20
initial_headers = {
21
    'Content-Type': 'text/x-gwt-rpc; charset=utf-8',
22
    'X-GWT-Module-Base': url_base,
23
    'X-GWT-Permutation': '574AA16D15D917C7704646FD92AFF6B3',
24
}
25
submission_request_template = ('7|0|7|'+url_base+
26
'||org.iplantc.tnrs.demo.client.SearchService|doSearch|\
27
java.lang.String/2004016611|{"sources":"gcc,tropicos,usda", "names":"[taxons]"\
28
, "type":"matching", "taxonomic":"true", "classification":"tropicos", \
29
"match_to_rank":"true"}|0.05|1|2|3|4|2|5|5|6|7|')
30
submission_response_pattern = r'^//OK\[1,\["(\w+)"\],0,7\]$'
31
retrieval_request_template = ('7|0|15|'+url_base+
32
'|1E87C78041CEFBF0992F46BDF84D7D60|org.iplantc.tnrs.demo.client.SearchService\
33
|getRemoteData|com.extjs.gxt.ui.client.data.PagingLoadConfig|\
34
java.lang.String/2004016611|com.extjs.gxt.ui.client.data.BasePagingLoadConfig/\
35
2011366567|com.extjs.gxt.ui.client.data.RpcMap/3441186752|sortField|sortDir|\
36
com.extjs.gxt.ui.client.Style$SortDir/640452531|offset|java.lang.Integer/\
37
3438268394|limit|{"email":"tnrs@lka5jjs.orv", "key":"[key]", \
38
"taxonomic_constraint":"false", "source_sorting":"false", "first":"false"}\
39
|1|2|3|4|2|5|6|7|0|1|8|4|9|0|10|11|0|12|13|0|14|13|100|15|')
40
retrieval_response_pattern = '^//OK\[.*?\["com.extjs.gxt.ui.client.data.\
41
BasePagingLoadResult/496878394","java.util.ArrayList/4159755760","org.iplantc.\
42
tnrs.demo.shared.BeanTNRSEntry/1039545748",".*"\],0,7\]$'
43
retrieval_response_info_pattern = r'(?ms).*^Set-Cookie: JSESSIONID=(\w+);'
44
download_request_template = ('7|0|6|'+url_base+
45
'|1E87C78041CEFBF0992F46BDF84D7D60|org.iplantc.tnrs.demo.client.SearchService|\
46
downloadRemoteResults|java.lang.String/2004016611|{"name":"tnrs_results.txt", \
47
"mode":"Best", "type":"Detailed", "encoding":"utf8", "dirty":"false", \
48
"sources":"false", "taxonomic":"false", "email":"tnrs@lka5jjs.orv", \
49
"key":"[key]"}|1|2|3|4|1|5|6|')
50
download_response_pattern = '^//OK\[1,\["(.*)"\],0,7\]$'
51
download_url_suffix = '&name=tnrs_results.txt&encoding=utf8'
52

    
53
def gwt_encode(str_): return re.sub(r'[^\w.() -]+', r' ', str_)
54

    
55
def parse_response(name, pattern, response):
56
    match = re.match(pattern, response)
57
    if not match: raise SystemExit('Invalid '+name+' response:\n'+response)
58
    return match.groups()
59

    
60
def tnrs_request(taxons, debug=False):
61
    assert len(taxons) <= max_taxons
62
    
63
    # Logging
64
    def debug_log(label, str_=''):
65
        if debug: sys.stderr.write('\n'+label+':\n'+str_+'\n')
66
    
67
    ## HTTP
68
    headers = initial_headers
69
    
70
    def do_request(request):
71
        debug_log('request', str(request))
72
        response = urllib2.urlopen(urllib2.Request(url, request, headers))
73
        response_str = streams.read_all(response)
74
        response_info = str(response.info())
75
        debug_log('response info', response_info)
76
        debug_log('response str', response_str)
77
        return response_str, response_info
78
    
79
    def do_repeated_request(request):
80
        pause = initial_pause
81
        total_pause = 0
82
        while True:
83
            total_pause += pause
84
            if total_pause > max_pause: raise # error is not temporary
85
            debug_log('total_pause', str(total_pause)+'s')
86
            time.sleep(pause) # wait for job to complete
87
            
88
            try: return do_request(request)
89
            except urllib2.HTTPError: pass # try again
90
            pause *= pause_growth_factor
91
    
92
    debug_log('Submit')
93
    request = submission_request_template.replace('[taxons]',
94
        r'\\n'.join(map(gwt_encode, taxons))) # double-escape \n
95
    response, response_info = do_request(request)
96
    key, = parse_response('submission', submission_response_pattern, response)
97
    debug_log('key', key)
98
    key_enc = gwt_encode(key)
99
    
100
    debug_log('Retrieve')
101
    request = retrieval_request_template.replace('[key]', key_enc)
102
    response, response_info = do_repeated_request(request)
103
    parse_response('retrieval', retrieval_response_pattern, response)
104
    session_id, = parse_response('retrieval info',
105
        retrieval_response_info_pattern, response_info)
106
    debug_log('session_id', session_id)
107
    headers['Cookie'] = 'JSESSIONID='+session_id
108
    
109
    # The output of the retrieve step is unusable because the array has
110
    # different lengths depending on the taxonomic ranks present in the provided
111
    # taxon name. The extra download step is therefore necessary.
112
    
113
    debug_log('Prepare download')
114
    request = download_request_template.replace('[key]', key_enc)
115
    response, response_info = do_request(request)
116
    csv_url, = parse_response('download', download_response_pattern, response)
117
    csv_url += download_url_suffix
118
    debug_log('csv_url', csv_url)
119
    
120
    debug_log('Download')
121
    response = urllib2.urlopen(urllib2.Request(csv_url))
122
    debug_log('response info', str(response.info()))
123
    return response
(34-34/41)