-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathserver.py
executable file
·430 lines (375 loc) · 18 KB
/
server.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
usage: python server.py --host=<host_name> --port=<port_name> \
--feature-json-path=<feature_json_path>
description: Run the Flask web server that handles custom feature extraction
"""
# Metadata
__author__ = 'Vincent Dowling'
__email__ = '[email protected]'
# Standard imports
import csv
import sys
import json
import copy
import time
import os
import logging
from logging import StreamHandler
from logging.handlers import TimedRotatingFileHandler
# 3rd party imports
from flask import Flask
app = Flask(__name__)
from flask import Request, request, Response
import requests
from requests.exceptions import HTTPError
import argparse
# Local imports
from rr_scorers import scorers
# Global variables
BLUEMIX_HOST = '0.0.0.0'
BLUEMIX_PORT = 80
class FcSelect(object):
def __init__(self, srs, service_url, service_username, service_password,
cluster_id, collection_name, answer_directory, default_rerank_rows=10,
default_search_rows=30, default_fl='id,title,text'):
"""
Class that manages custom feature scorers
Args:
srs (rr_scorers.scorers.Scorers): Scorers object, which is \
used to score individual query/document pairs
service_url, service_username, service_password (str): Credentials \
for the different services
cluster_id (str): Id for the Solr Cluster
collection_name (str): Name of the Solr Collection
"""
self.scorers_ = srs
self.service_url_ = service_url
self.service_username_ = service_username
self.service_password_ = service_password
self.cluster_id_ = cluster_id
self.collection_name_ = collection_name
self.answer_directory_ = answer_directory
self.default_rerank_rows_ = default_rerank_rows
self.default_search_rows_ = default_search_rows
self.default_fl_ = default_fl
def fcselect(self, **kwargs):
"""
/fcselect endpoint
Args:
kwargs (dict): Contains the same query params as are supported \
by the traditional fcselect endpoint
"""
# Re-rank the answers
if kwargs.has_key('ranker_id'):
return self.rerank(**kwargs)
# Parameters
q = self.get_query_value(kwargs, 'q')
search_rows = self.get_query_value(kwargs, 'rows', self.default_search_rows_)
gt = self.get_query_value(kwargs, 'gt')
fl = self.get_query_value(kwargs, 'fl', self.default_fl_)
# Determine the parameters to send to the classifier
required_fields = self.scorers_.get_required_fields()
required_fields.append('featureVector')
required_fields.extend([x.strip() for x in fl.split(',')])
non_return_fields = set(required_fields) - {'featureVector'} - set([x.strip() for x in fl.split(',')])
required_fl = ','.join(list(set(required_fields)))
# Call fcselect
params_no_rs = {'q': q, 'rows': search_rows, 'fl': required_fl, 'gt': gt, 'wt': 'json'}
params_rs = copy.copy(params_no_rs)
generate_header, return_rs_input = False, False
if kwargs.has_key('generateHeader'):
generate_header = kwargs.get('generateHeader')
params_rs['generateHeader'] = generate_header if type(generate_header) is not list else generate_header[0]
generate_header = True if params_rs['generateHeader'] == 'true' else False
if kwargs.has_key('returnRSInput'):
return_rs_input = kwargs.get('returnRSInput')
params_rs['returnRSInput'] = return_rs_input if type(return_rs_input) is not list else return_rs_input[0]
return_rs_input = True
time_1 = time.time()
fcselect_json = self.service_fcselect(params_no_rs)
time_2 = time.time()
print("Time for Service Call #1 = %.4f" % (time_2 - time_1))
# Modify individual feature vectors
score_map, score_list = dict(), list()
times = list()
prev_time = time_2
for i, doc in enumerate(fcselect_json.get('response', {}).get('docs', [])):
feature_doc = self.prepare_document(doc, fl)
fv = doc.get('featureVector')
new_scores = self.scorers_.scores(params_rs, feature_doc)
fv += ' ' + ' '.join('%.4f' % x if x > 0.0 else '0.0' for x in new_scores)
fcselect_json['response']['docs'][i]['featureVector'] = fv
for field_value in fcselect_json['response']['docs'][i].keys():
if field_value in non_return_fields:
del fcselect_json['response']['docs'][i][field_value]
score_map[doc.get('id')] = new_scores
score_list.append(new_scores)
times.append(time.time() - prev_time)
prev_time = time.time()
time_3 = time.time()
print("Time for the first 10 in the loop = %r" % times[:10])
print("Time to extract the new features = %.4f" % (time_3 - time_2))
# Modify RSInput
if return_rs_input:
fcselect_json_rs = self.service_fcselect(params_rs)
time_4 = time.time()
print("Time for service call #2 = %.4f" % (time_4 - time_3))
rs_input_splits = fcselect_json_rs['RSInput'].split('\n')
rs_input_modified = ''
score_index, rs_split_index = 0, 0
while score_index < len(score_list) and rs_split_index < len(rs_input_splits):
if rs_split_index == 0 and generate_header:
# Old headers
base_header = rs_input_splits[rs_split_index]
base_header_string = ','.join(base_header.split(',')[:-1])
ground_truth_header = base_header.split(',')[-1]
rs_split_index += 1
# New header (s)
new_headers = self.scorers_.get_headers()
new_header_string = ','.join(new_headers)
# Combine
rs_input_modified += base_header_string + ','
rs_input_modified += new_header_string + ','
rs_input_modified += ground_truth_header + '\n'
else:
# Get the splits
rs_input_split = rs_input_splits[rs_split_index]
if rs_input_split.strip() != '':
base_feat_string = ','.join(rs_input_split.split(',')[:-1])
relevance = rs_input_split.split(',')[-1]
rs_split_index += 1
# Get new scores
new_scores = score_list[score_index]
new_score_string = ','.join('%.4f' % x if x > 0.0 else '0.0' for x in new_scores)
score_index += 1
# Combine
rs_input_modified += base_feat_string + ','
rs_input_modified += new_score_string + ','
rs_input_modified += relevance + '\n'
else:
rs_split_index += 1
score_index += 1
fcselect_json['RSInput'] = rs_input_modified
time_5 = time.time()
print("Time for Creating RS String = %.4f" % (time_5 - time_4))
return fcselect_json
def get_query_value(self, dct, arg, default_value=None):
if arg not in dct.keys():
if default_value is not None:
return default_value
else:
raise ValueError('arg = %r is not in dct = %r' % (arg, dct))
val = dct[arg]
if type(val) is list:
return val[0]
else:
return val
def rerank(self, **kwargs):
""" Re-rank the incoming query """
# Extract the parameters
ranker_id = self.get_query_value(kwargs, 'ranker_id')
q = self.get_query_value(kwargs, 'q')
rows = self.get_query_value(kwargs, 'rows', self.default_rerank_rows_)
search_rows = self.get_query_value(kwargs, 'search_rows', self.default_search_rows_)
# Get the appropriate values
fl = self.get_query_value(kwargs, 'fl', self.default_fl_)
required_fields = self.scorers_.get_required_fields()
required_fields.append('featureVector')
required_fields.extend([x.strip() for x in fl.split(',')])
required_fl = ','.join(list(set(required_fields)))
# Make a call to fcselect and get the features plus other parameters
fcselect_params = {'q': q, 'rows': search_rows, 'fl': required_fl, 'wt': 'json', \
'generateHeader': 'true', 'returnRSInput':'true'}
fcselect_json = self.service_fcselect(fcselect_params)
app.logger.debug('JSON Request / Response = %r / %r' % (fcselect_params, fcselect_json))
if type(fcselect_json) is not dict:
raise ValueError('Response object %r is type %r and is not a dictionary' % (fcselect_json, type(fcselect_json)))
elif 'RSInput' not in fcselect_json:
raise ValueError('Response object %r does not contain key "RSInput"' % fcselect_json)
else:
rs_input_splits = fcselect_json['RSInput'].split('\n')
if len(rs_input_splits) == 0:
raise ValueError('RSInput value %r is not split by new line character' % fcselect_json['RSInput'])
full_header = rs_input_splits[0] + ',' + ','.join(self.scorers_.get_headers())
# Score the documents/queries
features = list()
for i, doc in enumerate(fcselect_json.get('response', {}).get('docs', [])):
feature_doc = self.prepare_document(doc, fl)
fv = doc.get('featureVector').split(' ')
new_scores = self.scorers_.scores(fcselect_params, feature_doc)
fv.extend([str(x) for x in new_scores])
app.logger.debug('Doc # / Doc / Scores = %r / %r / %r' % (i, feature_doc, fv))
features.append((doc.get('id'), fv))
# Write to a CSV
file_path = os.path.join(self.answer_directory_, 'answer_%d.csv' % time.time())
self.write_to_answer_csv(file_path, full_header.split(','), features)
# Call the re-rank API
rerank_resp = requests.post('%s/v1/rankers/%s/rank' % (self.service_url_, ranker_id), \
auth=(self.service_username_, self.service_password_), \
headers={'Accept':'application/json'}, \
files={'answer_data': open(file_path, 'rb')})
if rerank_resp.ok:
print('Response is ok')
if 'answers' not in rerank_resp.json():
raise ValueError('No answers contained in response=%r' % rerank_resp.json())
else:
answers = rerank_resp.json()['answers']
return self.order_answers_by_id(answers, fl)
else:
raise rerank_resp.raise_for_status()
def order_answers_by_id(self, answers, fl):
" Retrieve the reranked answers by id"
ids = map(lambda e: e['answer_id'], answers)
id_to_answer = {a['answer_id']:a for a in answers}
id_to_index = {id: i for i, id in enumerate(ids)}
fq = ' '.join(['id:%s' % (str(id)) for id in ids])
params = {'q': fq, 'fl':fl, 'wt':'json'}
resps = self.service_select(params=params)
modified_docs = list()
for doc in resps['response']['docs']:
answer = id_to_answer.get(doc['id'])
order = id_to_index.get(doc['id'])
modified_doc = copy.copy(doc)
modified_doc['confidence'] = answer['confidence']
modified_docs.insert(order, modified_doc)
resps['response']['docs'] = modified_docs
return resps
def service_fcselect(self, params, timeout=10):
url = '%s/v1/solr_clusters/%s/solr/%s/fcselect' % (self.service_url_,
self.cluster_id_, self.collection_name_)
resp = requests.post(url, data=params, auth=(self.service_username_, self.service_password_), timeout=timeout)
if resp.ok:
return resp.json()
else:
raise resp.raise_for_status()
def service_select(self, params, timeout=10):
url = '%s/v1/solr_clusters/%s/solr/%s/select' % (self.service_url_,
self.cluster_id_, self.collection_name_)
resp = requests.get(url, params=params, auth=(self.service_username_, self.service_password_), timeout=timeout)
if resp.ok:
return resp.json()
else:
raise resp.raise_for_status()
def prepare_document(self, doc, fl):
"""
Prepare a document to be consumed by a scorer
args:
doc (dict): This is the object that is returned in the response \
by the /fcselect API
"""
modified_doc = dict()
for fn in set(fl.split(',')) - {'featureVector'}:
fv = doc.get(fn)
modified_doc[fn] = fv if type(fv) is not list else fv[0]
return modified_doc
def write_to_answer_csv(self, file_path, headers, scores):
"""
Write to an answer CSV
Args:
file_path (str): Path to the output file
headers (list): Headers to write
scores (list): List of feature scores
"""
with open(file_path, 'wt') as outfile:
writer = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_NONE)
writer.writerow(headers)
for (doc_id, feature_scores) in scores:
writer.writerow([doc_id] + feature_scores)
#endclass FcSelect
def parse_args():
" Parse the command line arguments "
parser = argparse.ArgumentParser(description='Flask server that handles ' + \
'custom requests')
parser.add_argument('--host', type=str, default=None, help='Host name for the server')
parser.add_argument('--port', type=int, default=None, help='Port for the server')
parser.add_argument('--feature-json-file', type=str, help='Path to config ' + \
'file containing the scorers')
parser.add_argument('--cluster-id', type=str, help='Id of the Solr Cluster')
parser.add_argument('--collection-name', type=str, help='Name of the ' + \
'Solr collection')
parser.add_argument('--service-url', type=str, help='R&R Service URL')
parser.add_argument('--service-username', type=str, help='R&R Service Username')
parser.add_argument('--service-password', type=str, help='R&R Service Password')
parser.add_argument('--answer-directory', type=str, help='Directory to write answer CSVs to')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--use-bluemix', action='store_true', default=False)
ns = parser.parse_args()
if ns.use_bluemix:
host = "0.0.0.0"
port = int(os.getenv('VCAP_APP_PORT', '5000'))
else:
if not ns.host or not ns.port:
raise ValueError('Host / Port = %r / %r is not properly configured for non-bluemix environment' % (ns.host, ns.port))
host, port = ns.host, ns.port
answer_directory = ns.answer_directory
if not os.path.isdir(answer_directory):
raise ValueError('Answer Directory %r does not exist' % answer_directory)
return host, port, ns.feature_json_file, ns.service_url, ns.service_username, ns.service_password, ns.cluster_id, \
ns.collection_name, ns.debug, answer_directory
@app.route('/fcselect')
def fcselect():
selecter = app.selecter
try:
print('Something...')
app.logger.info('Received request with args=%r' % request.args)
resp = selecter.fcselect(**request.args)
return Response(json.dumps(resp), status=200, mimetype='application/json')
except HTTPError, e:
app.logger.error('Exception : %r' % e)
obj = {'message': e.message, 'response': json.loads(e.response.content)}
return Response(json.dumps(obj), status=e.response.status_code, mimetype='application/json')
except Exception, e:
app.logger.error('Exception : %r' % e)
obj = {'message': e.message}
return Response(json.dumps(obj), status=500, mimetype='application/json')
@app.route('/ping')
def ping():
return Response(status=200)
@app.route('/test')
def test():
return Response(json.dumps({'message':'This is a test'}), status=200, mimetype='application/json')
def setup_logger():
# Streaming Info logger
str_info_h = StreamHandler(stream=sys.stdout)
str_info_h.setLevel(logging.INFO)
app.logger.addHandler(str_info_h)
# File info logger
file_info_h = TimedRotatingFileHandler('logs/app.log', when='d', interval=1)
file_info_h.setLevel(logging.INFO)
app.logger.addHandler(file_info_h)
# Debug logger
debug_h = TimedRotatingFileHandler('logs/debug.log', when='h', interval=12)
debug_h.setLevel(logging.DEBUG)
app.logger.addHandler(debug_h)
# Error logger --> stderr
error_h = StreamHandler(stream=sys.stderr)
error_h.setLevel(logging.ERROR)
app.logger.addHandler(error_h)
# Error Logger --> file
error_file_h = TimedRotatingFileHandler('logs/error.log', when='d', interval=1)
error_file_h.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
))
error_file_h.setLevel(logging.ERROR)
app.logger.addHandler(error_file_h)
if __name__ == "__main__":
" Main script "
# Parameters
print('Starting script...')
h, p, fp, url, user, pw, cluster, collection, use_debug, answer_directory = parse_args()
# configure the app
app.debug = use_debug
setup_logger()
app.logger.info('Service Url = %s' % url)
app.logger.info('Solr Cluster Id = %s' % cluster)
app.logger.info('Solr Collection = %s' % collection)
# Bind the selecter object to the app
custom_scorers = scorers.Scorers(fp)
selecter = FcSelect(custom_scorers, url, user, pw, cluster, collection, answer_directory)
app.selecter = selecter
# Run the app
app.logger.info('App starting on host=%s, port=%s' % (h, p))
app.run(host=h, port=p)