File: _service_access_layer.py

package info (click to toggle)
python-pbcommand 2.1.1%2Bgit20231020.28d1635-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,016 kB
  • sloc: python: 7,676; makefile: 220; sh: 73
file content (1378 lines) | stat: -rw-r--r-- 53,072 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
"""
Utils for Updating state/progress and results to WebServices
"""

from .utils import to_sal_summary
from pbcommand.pb_io import load_report_from
from .models import (SMRTServiceBaseError,
                     JobResult, JobStates, JobExeError, JobTypes,
                     ServiceResourceTypes, ServiceJob, JobEntryPoint,
                     JobTask)
from pbcommand.utils import get_dataset_metadata
from pbcommand.models import (FileTypes,
                              DataSetFileType,
                              DataStore,
                              DataStoreFile)
import base64
import datetime
import json
import logging
import os
import pprint
import time
import warnings

import pytz
import requests
from requests import RequestException
from requests.exceptions import HTTPError, ConnectionError
from urllib3.exceptions import ProtocolError, InsecureRequestWarning  # pylint: disable=import-error
# To disable the ssl cert check warning
import urllib3
urllib3.disable_warnings(InsecureRequestWarning)  # pylint: disable=no-member


log = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# log.addHandler(logging.NullHandler())  # to prevent the annoying 'No
# handlers .. ' msg

# Everything else is considered a non-public
__all__ = [
    'ServiceAccessLayer',
    'SmrtLinkAuthClient',
]


class Constants:
    HEADERS = {
        'Content-type': 'application/json',
        'Accept-Encoding': 'gzip, deflate'
    }


def __jsonable_request(request_method, headers):
    def wrapper(url, d_):
        data = json.dumps(d_)
        # FIXME 'verify' should be passed in
        return request_method(url, data=data, headers=headers, verify=False)
    return wrapper


def _post_requests(headers):
    return __jsonable_request(requests.post, headers)


def _put_requests(headers):
    return __jsonable_request(requests.put, headers)


def _get_requests(headers):
    def wrapper(url):
        return requests.get(url, headers=headers, verify=False)
    return wrapper


def _parse_base_service_error(response):
    """:type response: requests.Response

    Don't trust the services. Try to parse the response to SMRT Server Error
    datastructure (even if a 200 is returned)
    """
    if response.ok:
        try:
            d = response.json()
            emsg = SMRTServiceBaseError.from_d(d)
            raise emsg
        except (KeyError, TypeError):
            # couldn't parse response -> error,
            # so everything is fine
            return response
    else:
        return response


def __get_headers(h):
    if h is None:
        return Constants.HEADERS
    return h


def _process_rget(total_url, ignore_errors=False, headers=None):
    """Process get request and return JSON response. Raise if not successful"""
    r = _get_requests(__get_headers(headers))(total_url)
    _parse_base_service_error(r)
    if not r.ok and not ignore_errors:
        log.warning(
            "Failed ({s}) GET to {x}".format(
                x=total_url,
                s=r.status_code))
    r.raise_for_status()
    j = r.json()
    return j


def _process_rget_or_empty(total_url, ignore_errors=False, headers=None):
    """
    Process get request and return JSON response if populated, otherwise None.
    Raise if not successful
    """
    r = _get_requests(__get_headers(headers))(total_url)
    if len(r.content) > 0:
        _parse_base_service_error(r)
    if not r.ok and not ignore_errors:
        log.warning(
            "Failed ({s}) GET to {x}".format(
                x=total_url,
                s=r.status_code))
    r.raise_for_status()
    if len(r.content) > 0:
        object_d = r.json()
        if len(object_d) > 0:
            return object_d
    return None


def _process_rget_with_transform(func, ignore_errors=False):
    """Post process the JSON result (if successful) with F(json_d) -> T"""
    def wrapper(total_url, headers=None):
        j = _process_rget(
            total_url,
            ignore_errors=ignore_errors,
            headers=headers)
        return func(j)
    return wrapper


def _process_rget_with_jobs_transform(
        total_url, ignore_errors=False, headers=None):
    # defining an internal method, because this used in several places
    jobs_d = _process_rget(
        total_url,
        ignore_errors=ignore_errors,
        headers=headers)
    # Sort by Id desc so newer jobs show up first
    jobs = [ServiceJob.from_d(job_d) for job_d in jobs_d]
    return sorted(jobs, key=lambda x: x.id, reverse=True)


def _process_rget_or_none(func, ignore_errors=False):
    """
    apply the transform func to the output of GET request if it was successful, else returns None

    This is intended to be used for looking up Results by Id where the a 404
    is found.
    """
    def wrapper(total_url, headers):
        try:
            return _process_rget_with_transform(
                func, ignore_errors)(total_url, headers)
        except (RequestException, SMRTServiceBaseError):
            # FIXME
            # this should be a tighter exception case
            # only look for 404
            return None

    return wrapper


def _process_rget_with_job_transform_or_none(total_url, headers=None):
    return _process_rget_or_none(ServiceJob.from_d)(total_url, headers=headers)


def __process_creatable_to_json(f):
    def wrapper(total_url, payload_d, headers):
        r = f(__get_headers(headers))(total_url, payload_d)
        _parse_base_service_error(r)
        # FIXME This should be strict to only return a 201
        if r.status_code not in (200, 201, 202, 204):
            log.warning(
                "Failed ({s} to call {u}".format(
                    u=total_url,
                    s=r.status_code))
            log.warning("payload")
            log.warning("\n" + pprint.pformat(payload_d))
        r.raise_for_status()
        j = r.json()
        return j
    return wrapper


_process_rpost = __process_creatable_to_json(_post_requests)
_process_rput = __process_creatable_to_json(_put_requests)


def _process_rpost_with_transform(func):
    def wrapper(total_url, payload_d, headers=None):
        j = _process_rpost(total_url, payload_d, headers)
        return func(j)
    return wrapper


def _process_rput_with_transform(func):
    def wrapper(total_url, payload_d, headers=None):
        j = _process_rput(total_url, payload_d, headers)
        return func(j)
    return wrapper


def _to_url(base, ext):
    return "".join([base, ext])


def _null_func(x):
    # Pass thorough func
    return x


def _transform_job_tasks(j):
    return [JobTask.from_d(d) for d in j]


def _get_job_by_id_or_raise(sal, job_id, error_klass,
                            error_messge_extras=None):
    job = sal.get_job_by_id(job_id)

    if job is None:
        details = "" if error_messge_extras is None else error_messge_extras
        base_msg = "Failed to find job {i}".format(i=job_id)
        emsg = " ".join([base_msg, details])
        raise error_klass(emsg)

    return job


# FIXME this overlaps with job_poller.py, which is more fault-tolerant
def _block_for_job_to_complete(sal, job_id, time_out=1200, sleep_time=2,
                               abort_on_interrupt=True,
                               retry_on_failure=False):
    """
    Waits for job to complete

    :param sal: ServiceAccessLayer
    :param job_id: Job Id
    :param time_out: Total runtime before aborting
    :param sleep_time: polling interval (in sec)

    :rtype: JobResult
    :raises: KeyError if job is not initially found, or JobExeError
    if the job fails during the polling process or times out
    """

    try:
        external_job_id = None
        time.sleep(sleep_time)
        job = _get_job_by_id_or_raise(sal, job_id, KeyError)
        log.info("SMRT Link job {i} ({u})".format(i=job.id, u=job.uuid))
        log.debug("time_out = {t}".format(t=time_out))

        error_msg = ""
        job_result = JobResult(job, 0, error_msg)
        started_at = time.time()

        # number of polling steps
        i = 0
        while True:
            run_time = time.time() - started_at
            if external_job_id is None and job.external_job_id is not None:
                external_job_id = job.external_job_id
                log.info("Cromwell workflow ID is %s", external_job_id)

            if job.state in JobStates.ALL_COMPLETED:
                break

            i += 1
            time.sleep(sleep_time)

            msg = "Running pipeline {n} (job {j}) state: {s} runtime:{r:.2f} sec {i} iteration".format(
                n=job.name, j=job.id, s=job.state, r=run_time, i=i)
            log.debug(msg)
            # making the exceptions different to distinguish between an initial
            # error and a "polling" error. Adding some msg details
            # FIXME this should distinguish between failure modes - an HTTP 503
            # or 401 is different from a 404 in this context
            try:
                job = _get_job_by_id_or_raise(
                    sal, job_id, JobExeError, error_messge_extras=msg)
            except JobExeError as e:
                if retry_on_failure:
                    log.warning(e)
                    log.warning("Polling job {i} failed".format(i=job_id))
                    continue
                else:
                    raise

            # FIXME, there's currently not a good way to get errors for jobs
            job_result = JobResult(job, run_time, "")
            if time_out is not None:
                if run_time > time_out:
                    raise JobExeError(
                        "Exceeded runtime {r} of {t}. {m}".format(
                            r=run_time, t=time_out, m=msg))

        return job_result
    except KeyboardInterrupt:
        if abort_on_interrupt:
            sal.terminate_job_id(job_id)
        raise


# Make this consistent somehow. Maybe defined 'shortname' in the core model?
# Martin is doing this for the XML file names
DATASET_METATYPES_TO_ENDPOINTS = {
    FileTypes.DS_SUBREADS_H5.file_type_id: "hdfsubreads",
    FileTypes.DS_SUBREADS.file_type_id: "subreads",
    FileTypes.DS_ALIGN.file_type_id: "alignments",
    FileTypes.DS_REF.file_type_id: "references",
    FileTypes.DS_BARCODE.file_type_id: "barcodes",
    FileTypes.DS_CCS.file_type_id: "ccsreads",
    FileTypes.DS_CONTIG.file_type_id: "contigs",
    FileTypes.DS_ALIGN_CCS.file_type_id: "cssalignments",
    FileTypes.DS_GMAP_REF.file_type_id: "gmapreferences"}


def _get_endpoint_or_raise(ds_type):
    if ds_type in DATASET_METATYPES_TO_ENDPOINTS:
        return DATASET_METATYPES_TO_ENDPOINTS[ds_type]
    raise KeyError("Unsupported datasettype {t}. Supported values {v}".format(
        t=ds_type, v=list(DATASET_METATYPES_TO_ENDPOINTS.keys())))


def _job_id_or_error(job_or_error, custom_err_msg=None):
    """
    Extract job id from job creation service (by type)
    or Raise exception from an EngineJob response

    :raises: JobExeError
    """
    if isinstance(job_or_error, ServiceJob):
        return job_or_error.id
    else:
        emsg = job_or_error.get('message', "Unknown")
        if custom_err_msg is not None:
            emsg += " {f}".format(f=custom_err_msg)
        raise JobExeError(
            "Failed to create job. {e}. Raw Response {x}".format(
                e=emsg, x=job_or_error))


def _to_ds_file(d):
    # is_chunk this isn't exposed at the service level
    return DataStoreFile(d['uuid'], d['sourceId'], d['fileTypeId'], d['path'],
                         is_chunked=False, name=d.get("name", ""), description=d.get("description", ""))


def _to_datastore(dx):
    # Friction to get around service endpoint not returning a list of files
    ds_files = [_to_ds_file(d) for d in dx]
    return DataStore(ds_files)


def _to_job_report_files(dx):
    return [{u"reportTypeId": d["reportTypeId"],
             u"dataStoreFile": _to_ds_file(d["dataStoreFile"])} for d in dx]


def _to_entry_points(d):
    return [JobEntryPoint.from_d(i) for i in d]


def _get_all_report_attributes(
        sal_get_reports_func, sal_get_reports_details_func, job_id):
    """Util func for getting report Attributes

    Note, this assumes that only one report type has been created. This is
    probably not a great idea. Should re-evaluate this.
    """
    report_datafiles = sal_get_reports_func(job_id)
    report_uuids = [list(r.values())[0].uuid for r in report_datafiles]
    reports = [
        sal_get_reports_details_func(
            job_id,
            r_uuid) for r_uuid in report_uuids]
    all_report_attributes = {}

    for r in reports:
        for x in r['attributes']:
            all_report_attributes[x['id']] = x['value']

    return all_report_attributes


def _to_relative_tasks_url(job_type):
    def wrapper(job_id_or_uuid):
        return "/".join([ServiceAccessLayer.ROOT_JOBS,
                         job_type, str(job_id_or_uuid), "tasks"])
    return wrapper


class ServiceAccessLayer:  # pragma: no cover
    """
    General Client Access Layer for interfacing with the job types on
    SMRT Link Analysis Services.  This API only supports insecure (HTTP)
    access to localhost.

    As of 10-02-2018, this should only be used (minimally) for internal purposes. All
    access to the Services should be done via SmrtLinkAuthClient.
    """

    ROOT_SL = "/smrt-link"
    ROOT_JM = ROOT_SL + "/job-manager"
    ROOT_JOBS = ROOT_JM + "/jobs"
    ROOT_MJOBS = ROOT_JM + "/multi-jobs"
    ROOT_RUNS = ROOT_SL + "/runs"
    ROOT_SAMPLES = ROOT_SL + "/samples"
    ROOT_DS = "/smrt-link/datasets"
    ROOT_PT = '/smrt-link/resolved-pipeline-templates'

    # in sec when blocking to run a job
    JOB_DEFAULT_TIMEOUT = 60 * 30

    def __init__(self, base_url, port, debug=False, sleep_time=2):
        """

        :param base_url: base url of the SL Server.  This MUST be either 'localhost' or 'http://localhost'
        :param port: port of the SL server
        :param debug: set improved debugging output on Services request failures
        :param sleep_time: sleep time (in seconds) between polling for job status
        """
        self.base_url = self._to_base_url(base_url)
        self.port = port
        # This will display verbose details with respect to the failed request
        self.debug = debug
        self._sleep_time = sleep_time

    def _get_headers(self):
        return Constants.HEADERS

    def _to_base_url(self, h):
        if h not in {"http://localhost", "localhost"}:
            raise NotImplementedError(
                "This API only supports HTTP connections to localhost")
        prefix = "http://"
        return h if h.startswith(prefix) else prefix + h

    @property
    def uri(self):
        return "{b}:{u}".format(b=self.base_url, u=self.port)

    def _to_url(self, rest):
        return _to_url(self.uri, rest)

    def __repr__(self):
        return "<{k} {u} >".format(k=self.__class__.__name__, u=self.uri)

    def to_summary(self):
        """
        Returns a summary of System status, DataSets, and Jobs in the system

        :rtype: str
        """
        return to_sal_summary(self)

    def get_status(self):
        """Get status of the server

        :rtype: dict
        """
        # This should be converted to a concrete typed object
        return _process_rget(_to_url(self.uri, "/status"),
                             headers=self._get_headers())

    def get_job_by_type_and_id(self, job_type, job_id):
        return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}".format(
            i=job_id, t=job_type, p=ServiceAccessLayer.ROOT_JOBS)), headers=self._get_headers())

    def get_job_by_id(self, job_id):
        """Get a Job by int id"""
        # FIXME. Make this an internal method It's ambiguous which job type
        # type you're asking for
        return _process_rget_with_job_transform_or_none(_to_url(
            self.uri, "{r}/{i}".format(i=job_id, r=ServiceAccessLayer.ROOT_JOBS)), headers=self._get_headers())

    def _get_job_resource_type(self, job_type, job_id, resource_type_id):
        # grab the datastore or the reports
        _d = dict(
            t=job_type,
            i=job_id,
            r=resource_type_id,
            p=ServiceAccessLayer.ROOT_JOBS)
        return _process_rget_with_job_transform_or_none(
            _to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d)), headers=self._get_headers())

    def _get_job_resource_type_with_transform(
            self, job_type, job_id, resource_type_id, transform_func):
        _d = dict(
            t=job_type,
            i=job_id,
            r=resource_type_id,
            p=ServiceAccessLayer.ROOT_JOBS)
        return _process_rget_with_transform(transform_func)(
            _to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d)), headers=self._get_headers())

    def _get_jobs_by_job_type(self, job_type, query=None):
        base_url = "{p}/{t}".format(t=job_type, p=ServiceAccessLayer.ROOT_JOBS)
        if query is not None:
            base_url = "".join([base_url, "?", query])
        return _process_rget_with_jobs_transform(_to_url(self.uri, base_url),
                                                 headers=self._get_headers())

    def get_multi_analysis_jobs(self):
        return _process_rget_with_jobs_transform(_to_url(self.uri, "{p}/{t}".format(
            t="multi-analysis", p=ServiceAccessLayer.ROOT_MJOBS)), headers=self._get_headers())

    def get_multi_analysis_job_by_id(self, int_or_uuid):
        return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}".format(
            t="multi-analysis", p=ServiceAccessLayer.ROOT_MJOBS, i=int_or_uuid)), headers=self._get_headers())

    def get_multi_analysis_job_children_by_id(self, multi_job_int_or_uuid):
        return _process_rget_with_jobs_transform(
            _to_url(self.uri,
                    "{p}/{t}/{i}/jobs".format(t="multi-analysis",
                                              p=ServiceAccessLayer.ROOT_MJOBS,
                                              i=multi_job_int_or_uuid)),
            headers=self._get_headers())

    def get_all_analysis_jobs(self):
        return _process_rget_with_jobs_transform(
            _to_url(self.uri, "{p}/analysis-jobs".format(
                p=ServiceAccessLayer.ROOT_JM)),
            headers=self._get_headers())

    def get_analysis_jobs(self, query=None):
        return self._get_jobs_by_job_type(JobTypes.ANALYSIS, query=query)

    def get_cromwell_jobs(self, query=None):
        """:rtype: list[ServiceJob]"""
        return self._get_jobs_by_job_type(JobTypes.CROMWELL, query=query)

    def get_import_dataset_jobs(self, query=None):
        """:rtype: list[ServiceJob]"""
        return self._get_jobs_by_job_type(JobTypes.IMPORT_DS, query=query)

    def get_merge_dataset_jobs(self, query=None):
        """:rtype: list[ServiceJob]"""
        return self._get_jobs_by_job_type(JobTypes.MERGE_DS, query=query)

    def get_fasta_convert_jobs(self, query=None):
        """:rtype: list[ServiceJob]"""
        self._get_jobs_by_job_type(JobTypes.CONVERT_FASTA, query=query)

    def get_analysis_job_by_id(self, job_id):
        """Get an Analysis job by id or UUID or return None

        :rtype: ServiceJob
        """
        return self.get_job_by_type_and_id(JobTypes.ANALYSIS, job_id)

    def get_import_job_by_id(self, job_id):
        return self.get_job_by_type_and_id(JobTypes.IMPORT_DS, job_id)

    def get_analysis_job_datastore(self, job_id):
        """Get DataStore output from analysis job"""
        # this doesn't work the list is sli
        return self._get_job_resource_type_with_transform(
            "analysis", job_id, ServiceResourceTypes.DATASTORE, _to_datastore)

    def _to_dsf_id_url(self, job_id, dsf_uuid):
        u = "/".join([ServiceAccessLayer.ROOT_JOBS, "analysis",
                      str(job_id), ServiceResourceTypes.DATASTORE, dsf_uuid])
        return _to_url(self.uri, u)

    def get_analysis_job_datastore_file(self, job_id, dsf_uuid):
        return _process_rget_or_none(_to_ds_file)(
            self._to_dsf_id_url(job_id, dsf_uuid), headers=self._get_headers())

    def get_analysis_job_datastore_file_download(
            self, job_id, dsf_uuid, output_file=None):
        """
        Download an DataStore file to an output file

        :param job_id:
        :param dsf_uuid:
        :param output_file: if None, the file name from the server (content-disposition) will be used.
        :return:
        """
        url = "{}/download".format(self._to_dsf_id_url(job_id, dsf_uuid))
        dsf = self.get_analysis_job_datastore_file(job_id, dsf_uuid)

        default_name = "download-job-{}-dsf-{}".format(job_id, dsf_uuid)

        if dsf is not None:
            r = requests.get(
                url,
                stream=True,
                verify=False,
                headers=self._get_headers())
            if output_file is None:
                try:
                    # 'attachment; filename="job-106-be2b5106-91dc-4ef9-b199-f1481f88b7e4-file-024.subreadset.xml'
                    raw_header = r.headers.get('content-disposition')
                    local_filename = raw_header.split(
                        "filename=")[-1].replace('"', '')
                except (TypeError, IndexError, KeyError, AttributeError):
                    local_filename = default_name

            else:
                local_filename = output_file

            with open(local_filename, 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
            r.close()
            return local_filename
        else:
            # This should probably return None to be consistent with the
            # current API
            raise KeyError(
                "Unable to get DataStore file {} from Job {}".format(
                    dsf_uuid, job_id))

    def get_analysis_job_reports(self, job_id):
        """Get list of DataStore ReportFile types output from analysis job"""
        return self._get_job_resource_type_with_transform(
            JobTypes.ANALYSIS, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)

    def get_analysis_job_reports_objs(self, job_id):
        """
        Get a List of Report Instances

        :param job_id:
        :rtype list[Report]
        :return: List of Reports
        """
        job_reports = self.get_analysis_job_reports(job_id)
        return [self.get_analysis_job_report_obj(
            job_id, x['dataStoreFile'].uuid) for x in job_reports]

    def __get_report_d(self, job_id, report_uuid, processor_func):
        _d = dict(t=JobTypes.ANALYSIS, i=job_id, r=ServiceResourceTypes.REPORTS, p=ServiceAccessLayer.ROOT_JOBS,
                  u=report_uuid)
        u = "{p}/{t}/{i}/{r}/{u}".format(**_d)
        return _process_rget_or_none(processor_func)(
            _to_url(self.uri, u), headers=self._get_headers())

    def get_analysis_job_report_details(self, job_id, report_uuid):
        return self.__get_report_d(job_id, report_uuid, lambda x: x)

    def get_analysis_job_report_obj(self, job_id, report_uuid):
        """
        Fetch a SMRT Link Report Instance from a Job Id and Report UUID

        There's inconsistencies in the API, hence the naming of the method is a bit verbose.
        :rtype Report
        """
        return self.__get_report_d(job_id, report_uuid, load_report_from)

    def get_analysis_job_report_attrs(self, job_id):
        """Return a dict of all the Report Attributes"""
        return _get_all_report_attributes(
            self.get_analysis_job_reports, self.get_analysis_job_report_details, job_id)

    def get_import_job_reports(self, job_id):
        return self._get_job_resource_type_with_transform(
            JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)

    def get_import_job_report_details(self, job_id, report_uuid):
        # It would have been better to return a Report instance, not raw json
        _d = dict(
            t=JobTypes.IMPORT_DS,
            i=job_id,
            r=ServiceResourceTypes.REPORTS,
            p=ServiceAccessLayer.ROOT_JOBS,
            u=report_uuid)
        return _process_rget_or_none(lambda x: x)(_to_url(
            self.uri, "{p}/{t}/{i}/{r}/{u}".format(**_d)), headers=self._get_headers())

    def get_import_job_report_attrs(self, job_id):
        """Return a dict of all the Report Attributes"""
        return _get_all_report_attributes(
            self.get_import_job_reports, self.get_import_job_report_details, job_id)

    def get_analysis_job_entry_points(self, job_id):
        return self._get_job_resource_type_with_transform(
            JobTypes.ANALYSIS, job_id, ServiceResourceTypes.ENTRY_POINTS, _to_entry_points)

    def get_import_dataset_job_datastore(self, job_id):
        """Get a List of Service DataStore files from an import DataSet job"""
        return self._get_job_resource_type(
            JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.DATASTORE)

    def get_merge_dataset_job_datastore(self, job_id):
        return self._get_job_resource_type(
            JobTypes.MERGE_DS, job_id, ServiceResourceTypes.DATASTORE)

    def import_dataset(self,
                       path,
                       avoid_duplicate_import=False):
        # This returns a job resource
        url = self._to_url(
            "{p}/{x}".format(x=JobTypes.IMPORT_DS, p=ServiceAccessLayer.ROOT_JOBS))
        d = {
            "path": path,
            "avoidDuplicateImport": avoid_duplicate_import
        }
        return _process_rpost_with_transform(
            ServiceJob.from_d)(url, d, headers=self._get_headers())

    def run_import_dataset(self,
                           path_to_xml,
                           avoid_duplicate_import=False):
        job_or_error = self.import_dataset(
            path_to_xml,
            avoid_duplicate_import=avoid_duplicate_import)
        custom_err_msg = "Import {p}".format(p=path_to_xml)
        job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
        return _block_for_job_to_complete(
            self, job_id, sleep_time=self._sleep_time)

    def run_import_dataset_by_type(self, dataset_type, path_to_xml,
                                   avoid_duplicate_import=False):
        return self.run_import_dataset(path_to_xml, avoid_duplicate_import)

    def _run_import_and_block(self, func, path, time_out=None):
        # func while be self.import_dataset_X
        job_or_error = func(path)
        custom_err_msg = "Import {p}".format(p=path)
        job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
        return _block_for_job_to_complete(self, job_id, time_out=time_out,
                                          sleep_time=self._sleep_time)

    def import_dataset_subread(self, path):
        log.warn("DEPRECATED METHOD")
        return self.import_dataset(path)

    def run_import_dataset_subread(self, path, time_out=10):
        return self._run_import_and_block(
            self.import_dataset_subread, path, time_out=time_out)

    def import_dataset_hdfsubread(self, path):
        log.warn("DEPRECATED METHOD")
        return self.import_dataset(path)

    def run_import_dataset_hdfsubread(self, path, time_out=10):
        return self._run_import_and_block(
            self.import_dataset_hdfsubread, path, time_out=time_out)

    def import_dataset_reference(self, path):
        log.warn("DEPRECATED METHOD")
        return self.import_dataset(path)

    def run_import_dataset_reference(self, path, time_out=10):
        return self._run_import_and_block(
            self.import_dataset_reference, path, time_out=time_out)

    def import_dataset_barcode(self, path):
        log.warn("DEPRECATED METHOD")
        return self.import_dataset(path)

    def run_import_dataset_barcode(self, path, time_out=10):
        return self._run_import_and_block(
            self.import_dataset_barcode, path, time_out=time_out)

    def run_import_local_dataset(self, path, avoid_duplicate_import=False):
        """Import a file from FS that is local to where the services are running

        Returns a JobResult instance

        :rtype: JobResult
        """
        dsmd = get_dataset_metadata(path)
        result = self.search_dataset_by_uuid(dsmd.uuid)

        if result is None:
            log.info("Importing dataset {p}".format(p=path))
            job_result = self.run_import_dataset_by_type(
                dsmd.metatype,
                path,
                avoid_duplicate_import=avoid_duplicate_import)
            log.info("Confirming database update")
            # validation 1: attempt to retrieve dataset info
            result_new = self.get_dataset_by_uuid(dsmd.uuid)
            if result_new is None:
                raise JobExeError(("Dataset {u} was imported but could " +
                                   "not be retrieved; this may indicate " +
                                   "XML schema errors.").format(u=dsmd.uuid))
            return job_result
        else:
            log.info(f"Already imported: {result}")
            # need to clean this up
            return JobResult(self.get_job_by_id(result['jobId']), 0, "")

    def get_dataset_children_jobs(self, dataset_id):
        """
        Get a List of Children Jobs for the DataSet

        :param dataset_id: DataSet Int or UUID
        :type dataset_id: int | string
        :rtype list[ServiceJob]
        """
        return _process_rget_with_jobs_transform(
            _to_url(self.uri, "{t}/datasets/{i}/jobs".format(t=ServiceAccessLayer.ROOT_SL, i=dataset_id)), headers=self._get_headers())

    def get_job_types(self):
        u = _to_url(
            self.uri, "{}/{}".format(ServiceAccessLayer.ROOT_JM, "job-types"))
        return _process_rget(u, headers=self._get_headers())

    def get_dataset_types(self):
        """Get a List of DataSet Types"""
        u = _to_url(self.uri,
                    "{}/{}".format(ServiceAccessLayer.ROOT_SL,
                                   "dataset-types"))
        return _process_rget(u, headers=self._get_headers())

    def get_dataset_by_uuid(self, int_or_uuid, ignore_errors=False):
        """The recommend model is to look up DataSet type by explicit MetaType

        Returns None if the dataset was not found
        """
        return _process_rget_or_none(_null_func, ignore_errors=ignore_errors)(
            _to_url(self.uri, "{p}/{i}".format(i=int_or_uuid,
                                               p=ServiceAccessLayer.ROOT_DS)),
            headers=self._get_headers())

    def search_dataset_by_uuid(self, uuid):
        """
        Better alternative to get_dataset_by_uuid, that does not trigger a 404
        """
        return _process_rget_or_empty(
            _to_url(self.uri, f"{ServiceAccessLayer.ROOT_DS}/search/{uuid}"),
            headers=self._get_headers())

    def get_dataset_by_id(self, dataset_type, int_or_uuid):
        """Get a Dataset using the DataSetMetaType and (int|uuid) of the dataset"""
        ds_endpoint = _get_endpoint_or_raise(dataset_type)
        return _process_rget(_to_url(self.uri, "{p}/{t}/{i}".format(
            t=ds_endpoint, i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())

    def _get_dataset_details_by_id(self, dataset_type, int_or_uuid):
        """
        Get a Dataset Details (XML converted to JSON via webservices
        using the DataSetMetaType and (int|uuid) of the dataset
        """
        # FIXME There's some inconsistencies in the interfaces with regards to
        # returning None or raising
        ds_endpoint = _get_endpoint_or_raise(dataset_type)
        return _process_rget(_to_url(self.uri, "{p}/{t}/{i}/details".format(
            t=ds_endpoint, i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())

    def _get_datasets_by_type(self, dstype):
        return _process_rget(_to_url(self.uri, "{p}/{i}".format(
            i=dstype, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())

    def get_subreadset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_SUBREADS, int_or_uuid)

    def get_subreadset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(
            FileTypes.DS_SUBREADS, int_or_uuid)

    def get_subreadsets(self):
        return self._get_datasets_by_type("subreads")

    def get_hdfsubreadset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_SUBREADS_H5, int_or_uuid)

    def get_hdfsubreadset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(
            FileTypes.DS_SUBREADS_H5, int_or_uuid)

    def get_hdfsubreadsets(self):
        return self._get_datasets_by_type("hdfsubreads")

    def get_referenceset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_REF, int_or_uuid)

    def get_referenceset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(FileTypes.DS_REF, int_or_uuid)

    def get_referencesets(self):
        return self._get_datasets_by_type("references")

    def get_barcodeset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_BARCODE, int_or_uuid)

    def get_barcodeset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(
            FileTypes.DS_BARCODE, int_or_uuid)

    def get_barcodesets(self):
        return self._get_datasets_by_type("barcodes")

    def get_alignmentset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_ALIGN, int_or_uuid)

    def get_alignmentset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(FileTypes.DS_ALIGN, int_or_uuid)

    def get_ccsreadset_by_id(self, int_or_uuid):
        return self.get_dataset_by_id(FileTypes.DS_CCS, int_or_uuid)

    def get_ccsreadset_details_by_id(self, int_or_uuid):
        return self._get_dataset_details_by_id(FileTypes.DS_CCS, int_or_uuid)

    def get_ccsreadsets(self):
        return self._get_datasets_by_type("ccsreads")

    def get_alignmentsets(self):
        return self._get_datasets_by_type("alignments")

    def import_fasta(self, fasta_path, name, organism, ploidy):
        """Convert fasta file to a ReferenceSet and Import. Returns a Job """
        d = dict(path=fasta_path,
                 name=name,
                 organism=organism,
                 ploidy=ploidy)
        return _process_rpost_with_transform(ServiceJob.from_d)(self._to_url(
            "{p}/{t}".format(p=ServiceAccessLayer.ROOT_JOBS, t=JobTypes.CONVERT_FASTA)), d, headers=self._get_headers())

    def run_import_fasta(self, fasta_path, name, organism,
                         ploidy, time_out=JOB_DEFAULT_TIMEOUT):
        """Import a Reference into a Block"""""
        job_or_error = self.import_fasta(fasta_path, name, organism, ploidy)
        _d = dict(f=fasta_path, n=name, o=organism, p=ploidy)
        custom_err_msg = "Fasta-convert path:{f} name:{n} organism:{o} ploidy:{p}".format(
            **_d)
        job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
        return _block_for_job_to_complete(self, job_id, time_out=time_out,
                                          sleep_time=self._sleep_time)

    def create_logger_resource(self, idx, name, description):
        _d = dict(id=idx, name=name, description=description)
        return _process_rpost(
            _to_url(self.uri, "/smrt-base/loggers"), _d, headers=self._get_headers())

    def log_progress_update(self, job_type_id, job_id,
                            message, level, source_id):
        """This is the generic job logging mechanism"""
        _d = dict(message=message, level=level, sourceId=source_id)
        return _process_rpost(_to_url(self.uri, "{p}/{t}/{i}/log".format(
            t=job_type_id, i=job_id, p=ServiceAccessLayer.ROOT_JOBS)), _d, headers=self._get_headers())

    def get_pipeline_template_by_id(self, pipeline_template_id):
        return _process_rget(_to_url(self.uri, "{p}/{i}".format(
            i=pipeline_template_id, p=ServiceAccessLayer.ROOT_PT)), headers=self._get_headers())

    def get_pipeline_presets(self):
        return _process_rget(_to_url(self.uri, "/smrt-link/workflow-presets"),
                             headers=self._get_headers())

    def get_pipeline_preset(self, preset_id):
        presets = self.get_pipeline_presets()
        by_id = {p["presetId"]: p for p in presets}
        by_shortid = {p["presetId"].split(".")[-1]: p for p in presets}
        by_name = {p["name"]: p for p in presets}
        return by_id.get(preset_id,
                         by_name.get(preset_id,
                                     by_shortid.get(preset_id, None)))

    def create_by_pipeline_template_id(self, *args, **kwds):
        return self.create_analysis_job(*args, **kwds)

    def create_analysis_job(self,
                            name,
                            pipeline_id,
                            epoints,
                            task_options=(),
                            workflow_options=(),
                            tags=(),
                            preset_id=None,
                            description=None,
                            project_id=1):
        """Creates and runs an analysis workflow by workflow ID

        :param tags: Tags should be a set of strings
        """
        if pipeline_id.startswith("pbsmrtpipe"):
            raise NotImplementedError("pbsmrtpipe is no longer supported")

        # sanity checking to see if pipeline is valid
        _ = self.get_pipeline_template_by_id(pipeline_id)

        service_eps = [dict(entryId=e.entry_id,
                            fileTypeId=e.dataset_type,
                            datasetId=e.resource) for e in epoints]

        def _to_o(opt_id, opt_value, option_type_id):
            return dict(optionId=opt_id,
                        value=opt_value,
                        optionTypeId=option_type_id)

        task_options = list(task_options)
        d = dict(name=name,
                 pipelineId=pipeline_id,
                 entryPoints=service_eps,
                 taskOptions=task_options,
                 workflowOptions=workflow_options,
                 projectId=project_id)
        if description:
            d["description"] = description
        if preset_id:
            preset = self.get_pipeline_preset(preset_id)
            if preset is None:
                raise KeyError(f"Can't find a compute config for '{preset_id}'")
            d["presetId"] = preset["presetId"]

        # Only add the request if the non empty.
        if tags:
            tags_str = ",".join(list(tags))
            d['tags'] = tags_str
        job_type = JobTypes.ANALYSIS
        path = "{r}/{p}".format(p=job_type, r=ServiceAccessLayer.ROOT_JOBS)
        raw_d = _process_rpost(_to_url(self.uri, path),
                               d,
                               headers=self._get_headers())
        return ServiceJob.from_d(raw_d)

    def run_by_pipeline_template_id(self, *args, **kwds):
        return self.run_analysis_job(*args, **kwds)

    def run_analysis_job(self,
                         name,
                         pipeline_id,
                         epoints,
                         task_options=(),
                         workflow_options=(),
                         time_out=JOB_DEFAULT_TIMEOUT,
                         tags=(),
                         abort_on_interrupt=True,
                         retry_on_failure=False):
        """Blocks and runs a job with a timeout"""

        job_or_error = self.create_analysis_job(
            name,
            pipeline_id,
            epoints,
            task_options=task_options,
            workflow_options=workflow_options,
            tags=tags)

        _d = dict(name=name, p=pipeline_id, eps=epoints)
        custom_err_msg = "Job {n} args: {a}".format(n=name, a=_d)

        job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
        return _block_for_job_to_complete(self,
                                          job_id,
                                          time_out=time_out,
                                          sleep_time=self._sleep_time,
                                          abort_on_interrupt=abort_on_interrupt,
                                          retry_on_failure=retry_on_failure)

    def run_cromwell_workflow(self,
                              name,
                              workflow_source,
                              inputs_json,
                              engine_options,
                              dependencies_zip,
                              time_out=JOB_DEFAULT_TIMEOUT,
                              tags=(),
                              abort_on_interrupt=True):
        d = dict(
            name=name,
            workflowSource=workflow_source,
            inputsJson=inputs_json,
            engineOptions=engine_options,
            dependenciesZip=dependencies_zip)
        if tags:
            tags_str = ",".join(list(tags))
            d['tags'] = tags_str
        raw_d = _process_rpost(_to_url(self.uri,
                                       "{r}/{p}".format(p=JobTypes.CROMWELL,
                                                        r=ServiceAccessLayer.ROOT_JOBS)),
                               d,
                               headers=self._get_headers())
        job = ServiceJob.from_d(raw_d)
        return _block_for_job_to_complete(self, job.id, time_out=time_out,
                                          sleep_time=self._sleep_time,
                                          abort_on_interrupt=abort_on_interrupt)

    def terminate_job(self, job):
        """
        POST a terminate request appropriate to the job type.  Currently only
        supported for cromwell, and analysis job types.
        """
        log.warn("Terminating job {i} ({u})".format(i=job.id, u=job.uuid))
        if job.external_job_id is not None:
            log.warn("Will abort Cromwell workflow %s", job.external_job_id)
        return _process_rpost(
            _to_url(self.uri, "{r}/{p}/{i}/terminate".format(
                p=job.job_type,
                r=ServiceAccessLayer.ROOT_JOBS,
                i=job.id)),
            {},
            headers=self._get_headers())

    def terminate_job_id(self, job_id):
        job = _get_job_by_id_or_raise(self, job_id, KeyError)
        return self.terminate_job(job)

    def resume_job(self,
                   job_id,
                   time_out=JOB_DEFAULT_TIMEOUT,
                   abort_on_interrupt=True):
        job = _get_job_by_id_or_raise(self, job_id, KeyError)
        if job.state in JobStates.ALL_COMPLETED:
            return JobResult(job, 0, "")
        return _block_for_job_to_complete(self, job.id, time_out=time_out,
                                          sleep_time=self._sleep_time,
                                          abort_on_interrupt=abort_on_interrupt)

    def get_analysis_job_tasks(self, job_id_or_uuid):
        """Get all the Task associated with a Job by UUID or Int Id"""
        job_url = self._to_url(
            _to_relative_tasks_url(
                JobTypes.ANALYSIS)(job_id_or_uuid))
        return _process_rget_with_transform(_transform_job_tasks)(
            job_url, headers=self._get_headers())

    def get_import_job_tasks(self, job_id_or_uuid):
        # this is more for testing purposes
        job_url = self._to_url(
            _to_relative_tasks_url(
                JobTypes.IMPORT_DS)(job_id_or_uuid))
        return _process_rget_with_transform(_transform_job_tasks)(
            job_url, headers=self._get_headers())

    def get_manifests(self):
        u = self._to_url("{}/manifests".format(ServiceAccessLayer.ROOT_SL))
        return _process_rget_with_transform(
            _null_func)(u, headers=self._get_headers())

    def get_manifest_by_id(self, ix):
        u = self._to_url(
            "{}/manifests/{}".format(ServiceAccessLayer.ROOT_SL, ix))
        return _process_rget_or_none(_null_func)(
            u, headers=self._get_headers())

    def get_runs(self):
        u = self._to_url("{}".format(ServiceAccessLayer.ROOT_RUNS))
        return _process_rget_with_transform(
            _null_func)(u, headers=self._get_headers())

    def get_run_details(self, run_uuid):
        u = self._to_url(
            "{}/{}".format(ServiceAccessLayer.ROOT_RUNS, run_uuid))
        return _process_rget_or_none(_null_func)(
            u, headers=self._get_headers())

    def get_run_collections(self, run_uuid):
        u = self._to_url(
            "{}/{}/collections".format(ServiceAccessLayer.ROOT_RUNS, run_uuid))
        return _process_rget_with_transform(
            _null_func)(u, headers=self._get_headers())

    def get_run_collection(self, run_uuid, collection_uuid):
        u = self._to_url(
            "{}/{}/collections/{}".format(ServiceAccessLayer.ROOT_RUNS, run_uuid, collection_uuid))
        return _process_rget_or_none(_null_func)(
            u, headers=self._get_headers())

    def get_samples(self):
        u = self._to_url("{}/samples".format(ServiceAccessLayer.ROOT_SL, ))
        return _process_rget_with_transform(
            _null_func)(u, headers=self._get_headers())

    def get_sample_by_id(self, sample_uuid):
        u = self._to_url(
            "{}/samples/{}".format(ServiceAccessLayer.ROOT_SL, sample_uuid))
        return _process_rget_or_none(_null_func)(
            u, headers=self._get_headers())

    def submit_multi_job(self, job_options):
        u = self._to_url(
            "{}/multi-analysis".format(ServiceAccessLayer.ROOT_MJOBS))
        return _process_rpost_with_transform(ServiceJob.from_d)(
            u, job_options, headers=self._get_headers())

    def get_sl_api(self, path):
        service_url = f"{self.uri}{path}"
        t1 = time.time()
        r = requests.get(service_url, headers=self._get_headers(), verify=False)
        t2 = time.time()
        log.info("Response time: {:.1f}s".format(t2 - t1))
        r.raise_for_status()
        return r.json()


# -----------------------------------------------------------------------
# SSL stuff
class Wso2Constants:  # pragma: no cover
    SECRET = "KMLz5g7fbmx8RVFKKdu0NOrJic4a"
    CONSUMER_KEY = "6NjRXBcFfLZOwHc0Xlidiz4ywcsa"
    SCOPES = ["welcome", "run-design", "run-qc", "openid", "analysis",
              "sample-setup", "data-management", "userinfo"]


def _create_auth(secret, consumer_key):  # pragma: no cover
    return base64.b64encode(":".join([secret, consumer_key]).encode("utf-8"))


def get_token(url, user, password, scopes, secret, consumer_key):  # pragma: no cover
    basic_auth = _create_auth(secret, consumer_key).decode("utf-8")
    # To be explicit for pedagogical purposes
    headers = {
        "Authorization": "Basic {}".format(basic_auth),
        "Content-Type": "application/x-www-form-urlencoded"
    }

    scope_str = " ".join({s for s in scopes})
    payload = dict(grant_type="password",
                   username=user,
                   password=password,
                   scope=scope_str)

    # verify is false to disable the SSL cert verification
    return requests.post(url, payload, headers=headers, verify=False)


def _get_smrtlink_wso2_token(user, password, url):  # pragma: no cover
    r = get_token(
        url,
        user,
        password,
        Wso2Constants.SCOPES,
        Wso2Constants.SECRET,
        Wso2Constants.CONSUMER_KEY)
    j = r.json()
    access_token = j['access_token']
    refresh_token = j['refresh_token']
    scopes = j['scope'].split(" ")
    return access_token, refresh_token, scopes


class SmrtLinkAuthClient(ServiceAccessLayer):  # pragma: no cover
    """
    HTTPS-enabled client that routes via WSO2 and requires authentication.
    For internal use only - this is NOT an officially supported API.  Currently
    somewhat sloppy w.r.t. SSL security features.
    """

    def __init__(self, base_url, user, password, port=8243, debug=False,
                 sleep_time=2, token=None):
        super().__init__(
            base_url,
            port,
            debug=debug,
            sleep_time=sleep_time)
        self._user = user
        self._password = password

        if token is None:
            if (user is None or password is None):
                raise ValueError(
                    "Both user and password must be defined unless an existing auth token is supplied")
            self._login()
        else:
            # assume token is valid. This will fail on the first client request
            # if not valid with an obvious error message
            self.auth_token = token
            self.refresh_token = None

    def _login(self):
        url = "{u}:{p}/token".format(u=self.base_url, p=self.port)
        self.auth_token, self.refresh_token, _ = _get_smrtlink_wso2_token(
            self._user, self._password, url)

    def _get_headers(self):
        return {
            "Authorization": "Bearer {}".format(self.auth_token),
            "Content-type": "application/json",
            "X-User-ID": self._user
        }

    def _to_base_url(self, h):
        if h.startswith("http://"):
            raise ValueError("Invalid URL - this client requires HTTPS")
        prefix = "https://"
        return h if h.startswith(prefix) else prefix + h

    @property
    def uri(self):
        return "{b}:{u}/SMRTLink/1.0.0".format(b=self.base_url, u=self.port)

    def reauthenticate_if_necessary(self):
        """
        Check whether the client still has authorization to access the /status
        endpoint, and acquire a new auth token if not.
        """
        try:
            status = self.get_status()
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 401:
                self._login()
            else:
                raise e


def get_smrtlink_client(host, port, user=None, password=None, sleep_time=5):  # pragma: no cover
    """
    Convenience method for use in CLI testing tools.  Returns an instance of
    the appropriate client class given the input parameters.  Unlike the client
    itself this hardcodes 8243 as the WSO2 port number.
    """
    if host != "localhost" or None not in [user, password]:
        return SmrtLinkAuthClient(host, user, password, sleep_time=sleep_time)
    else:
        return ServiceAccessLayer(host, port, sleep_time=sleep_time)


def get_smrtlink_client_from_args(args):
    return get_smrtlink_client(
        host=args.host,
        port=args.port,
        user=args.user,
        password=args.password)


def run_client_with_retry(fx, host, port, user, password,
                          sleep_time=60,
                          retry_on=(500, 503)):
    """
    Obtain a services client and run the specified function on it - this can
    be essentially any services call - and return the result.  If the query
    fails due to 401 Unauthorized, the call will be retried with a new token.
    HTTP 500 and 503 errors will be retried without re-authenticating.
    """
    def _get_client():
        return get_smrtlink_client(host, port, user, password)
    auth_errors = 0
    started_at = time.time()
    retry_time = sleep_time
    client = _get_client()
    while True:
        try:
            result = fx(client)
        except HTTPError as e:
            status = e.response.status_code
            log.info("Got error {e} (code = {c})".format(e=str(e), c=status))
            if status == 401:
                auth_errors += 1
                if auth_errors > 10:
                    raise RuntimeError(
                        "10 successive HTTP 401 errors, exiting")
                log.warning("Authentication error, will retry with new token")
                client = _get_client()
                continue
            elif status in retry_on:
                log.warning("Got HTTP {c}, will retry in {d}s".format(
                    c=status, d=retry_time))
                time.sleep(retry_time)
                # if a retryable error occurs, we increment the retry time
                # up to a max of 30 minutes
                retry_time = max(1800, retry_time + sleep_time)
                continue
            else:
                raise
        except (ConnectionError, ProtocolError) as e:
            log.warning("Connection error: {e}".format(e=str(e)))
            log.info("Will retry in {d}s".format(d=retry_time))
            time.sleep(retry_time)
            # if a retryable error occurs, we increment the retry time
            # up to a max of 30 minutes
            retry_time = max(1800, retry_time + sleep_time)
            continue
        else:
            return result