Author: Andreas Tille <tille@debian.org>
Last-Update: Wed, 31 Oct 2018 15:36:38 +0100
Description: Try hard to add Python3 support ... but failed (so patch is deactivated)

--- a/pbcore/io/_utils.py
+++ b/pbcore/io/_utils.py
@@ -8,7 +8,11 @@ except ImportError:
     h5py = h5py_dummy()
 
 import numpy as np
-from cStringIO import StringIO
+import sys
+if sys.version.startswith('2.'):
+    from cStringIO import StringIO
+else:
+    from io import BytesIO
 
 
 def arrayFromDataset(ds, offsetBegin, offsetEnd):
@@ -29,7 +33,10 @@ def splitFileContents(f, delimiter, BLOC
     Same semantics as f.read().split(delimiter), but with memory usage
     determined by largest chunk rather than entire file size
     """
-    remainder = StringIO()
+    if sys.version.startswith('2.'):
+        remainder = StringIO()
+    else:
+        remainder = BytesIO()
     while True:
         block = f.read(BLOCKSIZE)
         if not block:
@@ -38,7 +45,10 @@ def splitFileContents(f, delimiter, BLOC
         remainder.write(parts[0])
         for part in parts[1:]:
             yield remainder.getvalue()
-            remainder = StringIO()
+            if sys.version.startswith('2.'):
+                remainder = StringIO()
+            else:
+                remainder = BytesIO()
             remainder.write(part)
     yield remainder.getvalue()
 
--- a/pbcore/sequence.py
+++ b/pbcore/sequence.py
@@ -6,11 +6,16 @@ from __future__ import absolute_import
 __all__ = [ "complement",
             "reverseComplement"]
 
-from string import maketrans
-import re
+import sys
+if sys.version.startswith('2.'):
+    from string import maketrans
+    DNA_COMPLEMENT = maketrans('agcturyswkmbdhvnAGCTURYSWKMBDHV-N',
+                               'tcgannnnnnnnnnnnTCGANNNNNNNNNNN-N')
+else:
+    DNA_COMPLEMENT = str.maketrans('agcturyswkmbdhvnAGCTURYSWKMBDHV-N',
+                                   'tcgannnnnnnnnnnnTCGANNNNNNNNNNN-N')
 
-DNA_COMPLEMENT = maketrans('agcturyswkmbdhvnAGCTURYSWKMBDHV-N',
-                           'tcgannnnnnnnnnnnTCGANNNNNNNNNNN-N')
+import re
 
 def reverse( sequence ):
     """Return the reverse of any sequence
--- a/pbcore/io/BarcodeH5Reader.py
+++ b/pbcore/io/BarcodeH5Reader.py
@@ -79,7 +79,7 @@ def writeBarcodeH5(labeledZmws, labeler,
     """Write a barcode file from a list of labeled ZMWs. In addition
     to labeledZmws, this function takes a
     pbbarcode.BarcodeLabeler."""
-    bestScores = map(lambda z: z.toBestRecord(), labeledZmws)
+    bestScores = [z.toBestRecord() for z in labeledZmws]
     outDta = n.vstack(bestScores)
     outH5 = h5py.File(outFile, 'a')
 
@@ -107,9 +107,9 @@ def writeBarcodeH5(labeledZmws, labeler,
         def makeRecord(lZmw):
             zmws = makeArray(nBarcodes * lZmw.nScored, lZmw.holeNumber)
             adapters = n.concatenate([makeArray(nBarcodes, i) for i in \
-                                          xrange(1, lZmw.nScored + 1)])
-            idxs = n.concatenate([range(0, nBarcodes) for i in \
-                                      xrange(0, lZmw.nScored)])
+                                          range(1, lZmw.nScored + 1)])
+            idxs = n.concatenate([list(range(0, nBarcodes)) for i in \
+                                      range(0, lZmw.nScored)])
             scores = n.concatenate(lZmw.allScores)
             return n.transpose(n.vstack((zmws, adapters, idxs, scores)))
 
@@ -142,12 +142,12 @@ class BarcodeH5Reader(object):
         self._movieName = self.bestDS.attrs['movieName']
         # zmw => LabeledZmw
         labeledZmws = [LabeledZmw.fromBestRecord(self.bestDS[i,:]) for i in
-                       xrange(0, self.bestDS.shape[0])]
+                       range(0, self.bestDS.shape[0])]
         self.labeledZmws = dict([(lZmw.holeNumber, lZmw) for lZmw in labeledZmws])
 
         # barcode => LabeledZmws
         self.bcLabelToLabeledZmws = {l:[] for l in self.barcodeLabels}
-        for lZmw in self.labeledZmws.values():
+        for lZmw in list(self.labeledZmws.values()):
             d = self.bcLabelToLabeledZmws[self.barcodeLabels[lZmw.bestIdx]]
             d.append(lZmw)
 
@@ -190,7 +190,7 @@ class MPBarcodeH5Reader(object):
             return (n.min(x), n.max(x))
         # these aren't the ranges of ZMWs, but the ranges for the
         # scored ZMWs.
-        self._bins = map(lambda z : rng(z.holeNumbers), self._parts)
+        self._bins = [rng(z.holeNumbers) for z in self._parts]
 
     def choosePart(self, holeNumber):
         for i,b in enumerate(self._bins):
@@ -218,8 +218,7 @@ class MPBarcodeH5Reader(object):
 
     def labeledZmwsFromBarcodeLabel(self, bcLabel):
         lzmws = reduce(lambda x,y: x + y,
-                      map(lambda z: z.labeledZmwsFromBarcodeLabel(bcLabel),
-                          self._parts))
+                      [z.labeledZmwsFromBarcodeLabel(bcLabel) for z in self._parts])
         return sorted(lzmws, key=lambda z: z.holeNumber)
 
     def __iter__(self):
@@ -235,7 +234,7 @@ class MPBarcodeH5Reader(object):
             return self.labeledZmwsFromBarcodeLabel(item)
         elif isinstance(item, slice):
             return [ self.labeledZmwFromHoleNumber(item)
-                    for r in xrange(*item.indices(len(self)))]
+                    for r in range(*item.indices(len(self)))]
         elif isinstance(item, list) or isinstance(item, n.ndarray):
             if len(item) == 0:
                 return []
@@ -269,7 +268,7 @@ class BarcodeH5Fofn(object):
                 self._byMovie[bc.movieName].append(bc)
 
         self.mpReaders = { movieName: parts[0] if len(parts) == 1 else MPBarcodeH5Reader(parts)
-                           for movieName, parts in self._byMovie.iteritems() }
+                           for movieName, parts in self._byMovie.items() }
 
     @property
     def holeNumbers(self):
@@ -277,7 +276,7 @@ class BarcodeH5Fofn(object):
                           for hn in reader.holeNumbers])
     @property
     def movieNames(self):
-        return self.mpReaders.keys()
+        return list(self.mpReaders.keys())
     @property
     def barcodeLabels(self):
         return self._bcH5s[0].barcodeLabels
@@ -288,8 +287,7 @@ class BarcodeH5Fofn(object):
 
     def labeledZmwsFromBarcodeLabel(self, item):
         lzmws = reduce(lambda x,y: x + y,
-                      map(lambda z: z.labeledZmwsFromBarcodeLabel(item),
-                          self._bcH5s))
+                      [z.labeledZmwsFromBarcodeLabel(item) for z in self._bcH5s])
         return sorted(lzmws, key=lambda z: z.holeNumber )
 
     def labeledZmwFromName(self, item):
--- a/pbcore/io/VcfIO.py
+++ b/pbcore/io/VcfIO.py
@@ -105,7 +105,7 @@ class Vcf4Record(object):
                 chrom=self.chrom, pos=self.pos, id=self.id,
                 ref=self.ref, alt=self.alt, qual=_fmt(self.qual),
                 filter=self.filter,
-                info=";".join("{0}={1}".format(k, _fmt(v)) for k, v in self.info.iteritems()),
+                info=";".join("{0}={1}".format(k, _fmt(v)) for k, v in self.info.items()),
                 fields="\t".join(_empty_then(self.fields)))
 
 
@@ -135,9 +135,9 @@ def merge_vcfs_sorted(vcf_files, output_
     sorted_files = sorted(fst_recs, key=lambda x: x[0])
     nrec = 0
     with open(output_file_name, "w") as oh:
-        for m, _ in meta.iteritems():
+        for m, _ in meta.items():
             print(m, file=oh)
-        print("#{0}".format("\t".join(h for h, _ in hdr.iteritems())), file=oh)
+        print("#{0}".format("\t".join(h for h, _ in hdr.items())), file=oh)
         for _, f in sorted_files:
             with open(f) as h:
                 for line in h:
--- a/pbcore/io/align/CmpH5IO.py
+++ b/pbcore/io/align/CmpH5IO.py
@@ -50,9 +50,9 @@ _cBasemap = { 0b0000 : ord("-"),
 _basemapArray  = np.ndarray(shape=(max(_basemap.keys()) + 1,), dtype=np.byte)
 _cBasemapArray = np.ndarray(shape=(max(_basemap.keys()) + 1,), dtype=np.byte)
 
-for (e, v) in _basemap.iteritems():
+for (e, v) in _basemap.items():
     _basemapArray[e] = v
-for (e, v) in _cBasemap.iteritems():
+for (e, v) in _cBasemap.items():
     _cBasemapArray[e] = v
 
 _baseEncodingToInt = np.array([-1]*16)
@@ -574,7 +574,7 @@ class CmpH5Alignment(AlignmentRecordMixi
         transcript = self.transcript(style="exonerate+")
         refPos = self.referencePositions()
         refPosString = "".join([str(pos % 10) for pos in refPos])
-        for i in xrange(0, len(alignedRef), COLUMNS):
+        for i in range(0, len(alignedRef), COLUMNS):
             val += "\n"
             val += "  " + refPosString[i:i+COLUMNS] + "\n"
             val += "  " + alignedRef  [i:i+COLUMNS] + "\n"
@@ -739,7 +739,7 @@ class CmpH5Reader(ReaderBase, IndexedAli
         for (alnGroupId, alnGroupPath) in zip(self.file["/AlnGroup/ID"][:],
                                               self.file["/AlnGroup/Path"][:]):
             alnGroup = self.file[alnGroupPath]
-            self._alignmentGroupById[alnGroupId] = dict(alnGroup.items())
+            self._alignmentGroupById[alnGroupId] = dict(list(alnGroup.items()))
 
 
     def _loadMovieInfo(self):
@@ -753,10 +753,10 @@ class CmpH5Reader(ReaderBase, IndexedAli
             timeScale = [1.0] * numMovies
 
         self._movieInfoTable = np.rec.fromrecords(
-            zip(self.file["/MovieInfo/ID"],
+            list(zip(self.file["/MovieInfo/ID"],
                 self.file["/MovieInfo/Name"],
                 frameRate,
-                timeScale),
+                timeScale)),
             dtype=[("ID"                  , int),
                    ("Name"                , object),
                    ("FrameRate"           , float),
@@ -773,13 +773,13 @@ class CmpH5Reader(ReaderBase, IndexedAli
         # missing chemistry info.
         assert (self._readGroupTable is None) and (self._readGroupDict is None)
         self._readGroupTable = np.rec.fromrecords(
-            zip(self._movieInfoTable.ID,
+            list(zip(self._movieInfoTable.ID,
                 self._movieInfoTable.Name,
                 [self.readType] * len(self._movieInfoTable.ID),
                 self.sequencingChemistry,
                 self._movieInfoTable.FrameRate,
                 ["UnnamedSample"] * len(self._movieInfoTable.ID),
-                [frozenset(self.baseFeaturesAvailable())] * len(self._movieInfoTable.ID)),
+                [frozenset(self.baseFeaturesAvailable())] * len(self._movieInfoTable.ID))),
             dtype=[("ID"                 , np.int32),
                    ("MovieName"          , "O"),
                    ("ReadType"           , "O"),
@@ -792,18 +792,18 @@ class CmpH5Reader(ReaderBase, IndexedAli
 
     def _loadReferenceInfo(self):
         _referenceGroupTbl = np.rec.fromrecords(
-            zip(self.file["/RefGroup/ID"][:],
+            list(zip(self.file["/RefGroup/ID"][:],
                 self.file["/RefGroup/RefInfoID"][:],
-                [path[1:] for path in self.file["/RefGroup/Path"]]),
+                [path[1:] for path in self.file["/RefGroup/Path"]])),
             dtype=[("ID"       , int),
                    ("RefInfoID", int),
                    ("Name"     , object)])
 
         _referenceInfoTbl = np.rec.fromrecords(
-            zip(self.file["/RefInfo/ID"][:],
+            list(zip(self.file["/RefInfo/ID"][:],
                 self.file["/RefInfo/FullName"][:],
                 self.file["/RefInfo/Length"][:],
-                self.file["/RefInfo/MD5"][:]) ,
+                self.file["/RefInfo/MD5"][:])) ,
             dtype=[("RefInfoID", int),
                    ("FullName" , object),
                    ("Length"   , int),
@@ -862,10 +862,10 @@ class CmpH5Reader(ReaderBase, IndexedAli
 
         if "Barcode" in self.file["/AlnInfo"]:
             # Build forward and backwards id<->label lookup tables
-            self._barcodeName = OrderedDict(zip(self.file["/BarcodeInfo/ID"],
-                                                self.file["/BarcodeInfo/Name"]))
-            self._barcode     = OrderedDict(zip(self.file["/BarcodeInfo/Name"],
-                                                self.file["/BarcodeInfo/ID"]))
+            self._barcodeName = OrderedDict(list(zip(self.file["/BarcodeInfo/ID"],
+                                                self.file["/BarcodeInfo/Name"])))
+            self._barcode     = OrderedDict(list(zip(self.file["/BarcodeInfo/Name"],
+                                                self.file["/BarcodeInfo/ID"])))
             # Barcode ID per row
             self._barcodes = self.file["/AlnInfo/Barcode"].value[:,1]
 
@@ -1038,8 +1038,8 @@ class CmpH5Reader(ReaderBase, IndexedAli
             >>> c.versionAtLeast("1.3.0")
             False
         """
-        myVersionTuple = map(int, self.version.split(".")[:3])
-        minimalVersionTuple = map(int, minimalVersion.split(".")[:3])
+        myVersionTuple = list(map(int, self.version.split(".")[:3]))
+        minimalVersionTuple = list(map(int, minimalVersion.split(".")[:3]))
         return myVersionTuple >= minimalVersionTuple
 
     def softwareVersion(self, programName):
@@ -1047,8 +1047,8 @@ class CmpH5Reader(ReaderBase, IndexedAli
         Return the version of program `programName` that processed
         this file.
         """
-        filelog = dict(zip(self.file["/FileLog/Program"],
-                           self.file["/FileLog/Version"]))
+        filelog = dict(list(zip(self.file["/FileLog/Program"],
+                           self.file["/FileLog/Version"])))
         return filelog.get(programName, None)
 
     @property
@@ -1068,7 +1068,7 @@ class CmpH5Reader(ReaderBase, IndexedAli
 
     @property
     def movieNames(self):
-        return set([mi.Name for mi in self._movieDict.values()])
+        return set([mi.Name for mi in list(self._movieDict.values())])
 
     @property
     def ReadGroupID(self):
@@ -1179,8 +1179,8 @@ class CmpH5Reader(ReaderBase, IndexedAli
             False
 
         """
-        return all(featureName in alnGroup.keys()
-                   for alnGroup in self._alignmentGroupById.values())
+        return all(featureName in list(alnGroup.keys())
+                   for alnGroup in list(self._alignmentGroupById.values()))
 
     def baseFeaturesAvailable(self):
         """
@@ -1192,9 +1192,9 @@ class CmpH5Reader(ReaderBase, IndexedAli
             [u'QualityValue', u'IPD', u'PulseWidth', u'InsertionQV', u'DeletionQV']
 
         """
-        baseFeaturesByMovie = [ alnGroup.keys()
-                                 for alnGroup in self._alignmentGroupById.values() ]
-        baseFeaturesAvailableAsSet = set.intersection(*map(set, baseFeaturesByMovie))
+        baseFeaturesByMovie = [ list(alnGroup.keys())
+                                 for alnGroup in list(self._alignmentGroupById.values()) ]
+        baseFeaturesAvailableAsSet = set.intersection(*list(map(set, baseFeaturesByMovie)))
         baseFeaturesAvailableAsSet.discard("AlnArray")
         return list(baseFeaturesAvailableAsSet)
 
@@ -1245,7 +1245,7 @@ class CmpH5Reader(ReaderBase, IndexedAli
             return CmpH5Alignment(self, rowNumbers)
         elif isinstance(rowNumbers, slice):
             return [CmpH5Alignment(self, r)
-                    for r in xrange(*rowNumbers.indices(len(self)))]
+                    for r in range(*rowNumbers.indices(len(self)))]
         elif isinstance(rowNumbers, list) or isinstance(rowNumbers, np.ndarray):
             if len(rowNumbers) == 0:
                 return []
@@ -1258,7 +1258,7 @@ class CmpH5Reader(ReaderBase, IndexedAli
         raise TypeError("Invalid type for CmpH5Reader slicing")
 
     def __iter__(self):
-        return (self[i] for i in xrange(len(self)))
+        return (self[i] for i in range(len(self)))
 
     def __len__(self):
         return len(self.alignmentIndex)
--- a/pbcore/io/dataset/DataSetIO.py
+++ b/pbcore/io/dataset/DataSetIO.py
@@ -19,7 +19,11 @@ import tempfile
 import uuid
 import xml.dom.minidom
 import numpy as np
-from urlparse import urlparse
+import sys
+if sys.version.startswith('2.'):
+    from urlparse import urlparse
+else:
+    from urllib.parse import urlparse
 from functools import wraps, partial
 from collections import defaultdict, Counter
 from pbcore.util.Process import backticks
@@ -249,7 +253,7 @@ def splitKeys(keys, chunks):
 
 def _fileExists(fname):
     """Assert that a file exists with a useful failure mode"""
-    if not isinstance(fname, basestring):
+    if not isinstance(fname, str):
         fname = fname.resourceId
     if not os.path.exists(fname):
         raise InvalidDataSetIOError("Resource {f} not found".format(f=fname))
@@ -427,12 +431,12 @@ class DataSet(object):
             for fname in self.toExternalFiles():
                 # due to h5 file types, must be unpythonic:
                 found = False
-                for allowed in self._metaTypeMapping().keys():
+                for allowed in list(self._metaTypeMapping().keys()):
                     if fname.endswith(allowed):
                         found = True
                         break
                 if not found:
-                    allowed = self._metaTypeMapping().keys()
+                    allowed = list(self._metaTypeMapping().keys())
                     extension = fname.split('.')[-1]
                     raise IOError(errno.EIO,
                                   "Cannot create {c} with resource of type "
@@ -1434,7 +1438,7 @@ class DataSet(object):
             else:
                 self.metadata = newMetadata
 
-        for key, value in kwargs.items():
+        for key, value in list(kwargs.items()):
             self.metadata.addMetadata(key, value)
 
     def updateCounts(self):
@@ -1552,7 +1556,7 @@ class DataSet(object):
         ExternalResources and record order within each file"""
         if self.isIndexed:
             # this uses the index to respect filters
-            for i in xrange(len(self)):
+            for i in range(len(self)):
                 yield self[i]
         else:
             # this uses post-filtering to respect filters
@@ -1978,7 +1982,7 @@ class DataSet(object):
             indexTuples = self._indexMap[index]
             return [self.resourceReaders()[ind[0]][ind[1]] for ind in
                     indexTuples]
-        elif isinstance(index, basestring):
+        elif isinstance(index, str):
             if 'id' in self.index.dtype.names:
                 row = np.nonzero(self.index.id == index)[0][0]
                 return self[row]
@@ -2154,14 +2158,14 @@ class ReadSet(DataSet):
             log.info("No barcodes found in BAM file, skipping split")
             return [self.copy()]
         barcodes = defaultdict(int)
-        for bcTuple in itertools.izip(self.index.bcForward,
+        for bcTuple in zip(self.index.bcForward,
                                       self.index.bcReverse):
             if bcTuple != (-1, -1):
                 barcodes[bcTuple] += 1
 
-        log.debug("{i} barcodes found".format(i=len(barcodes.keys())))
+        log.debug("{i} barcodes found".format(i=len(list(barcodes.keys()))))
 
-        atoms = barcodes.items()
+        atoms = list(barcodes.items())
 
         # The number of reads per barcode is used for balancing
         balanceKey = lambda x: x[1]
@@ -2205,7 +2209,7 @@ class ReadSet(DataSet):
             return [self.copy()]
 
         atoms = self.index.qId
-        movs = zip(*np.unique(atoms, return_counts=True))
+        movs = list(zip(*np.unique(atoms, return_counts=True)))
 
         # Zero chunks requested == 1 chunk per movie.
         if chunks == 0 or chunks > len(movs):
@@ -2379,7 +2383,7 @@ class ReadSet(DataSet):
             # reassign qIds if dupes:
             if len(set(tbr['ID'])) < len(tbr):
                 self._readGroupTableIsRemapped = True
-                tbr['ID'] = range(len(tbr))
+                tbr['ID'] = list(range(len(tbr)))
             return tbr.view(np.recarray)
         else:
             return responses[0]
@@ -2428,10 +2432,10 @@ class ReadSet(DataSet):
                      "MovieID field.")
         if self._readGroupTableIsRemapped:
             log.debug("Must correct index qId's")
-            qIdMap = dict(zip(rr.readGroupTable.ID,
-                              rr.readGroupTable.MovieName))
+            qIdMap = dict(list(zip(rr.readGroupTable.ID,
+                              rr.readGroupTable.MovieName)))
             nameMap = self.movieIds
-            for qId in qIdMap.keys():
+            for qId in list(qIdMap.keys()):
                 qId_acc(indices)[qId_acc(indices) == qId] = nameMap[
                     qIdMap[qId]]
 
@@ -2557,7 +2561,7 @@ class ReadSet(DataSet):
                          "lost")
             else:
                 for extres in self.externalResources:
-                    extres.reference = refCounts.keys()[0]
+                    extres.reference = list(refCounts.keys())[0]
         # reset the indexmap especially, as it is out of date:
         self._index = None
         self._indexMap = None
@@ -2821,13 +2825,13 @@ class AlignmentSet(ReadSet):
 
         if correctIds and self._stackedReferenceInfoTable:
             log.debug("Must correct index tId's")
-            tIdMap = dict(zip(rr.referenceInfoTable['ID'],
-                              rName(rr.referenceInfoTable)))
+            tIdMap = dict(list(zip(rr.referenceInfoTable['ID'],
+                              rName(rr.referenceInfoTable))))
             unfilteredRefTable = self._buildRefInfoTable(filterMissing=False)
-            rname2tid = dict(zip(unfilteredRefTable['Name'],
-                            unfilteredRefTable['ID']))
+            rname2tid = dict(list(zip(unfilteredRefTable['Name'],
+                            unfilteredRefTable['ID'])))
             #nameMap = self.refIds
-            for tId in tIdMap.keys():
+            for tId in list(tIdMap.keys()):
                 tId_acc(indices)[tId_acc(indices) == tId] = rname2tid[
                     tIdMap[tId]]
 
@@ -2943,7 +2947,7 @@ class AlignmentSet(ReadSet):
             return [self.copy()]
 
         atoms = self.index.tId
-        refs = zip(*np.unique(atoms, return_counts=True))
+        refs = list(zip(*np.unique(atoms, return_counts=True)))
 
         # Zero chunks requested == 1 chunk per reference.
         if chunks == 0 or chunks > len(refs):
@@ -3143,7 +3147,7 @@ class AlignmentSet(ReadSet):
         rnames = defaultdict(list)
         for atom in atoms:
             rnames[atom[0]].append(atom)
-        for rname, rAtoms in rnames.iteritems():
+        for rname, rAtoms in rnames.items():
             if len(rAtoms) > 1:
                 contour = self.intervalContour(rname)
                 splits = self.splitContour(contour, len(rAtoms))
@@ -3175,15 +3179,15 @@ class AlignmentSet(ReadSet):
         # pull both at once so you only have to mess with the
         # referenceInfoTable once.
         refLens = self.refLengths
-        refNames = refLens.keys()
+        refNames = list(refLens.keys())
         log.debug("{i} references found".format(i=len(refNames)))
 
         log.debug("Finding contigs")
         if byRecords:
             log.debug("Counting records...")
             atoms = [(rn, 0, 0, count)
-                     for rn, count in zip(refNames, map(self.countRecords,
-                                                        refNames))
+                     for rn, count in zip(refNames, list(map(self.countRecords,
+                                                        refNames)))
                      if count != 0]
             balanceKey = lambda x: self.countRecords(*x)
         else:
@@ -3322,10 +3326,10 @@ class AlignmentSet(ReadSet):
             # abstraction.
             if len(result._filters) > 100:
                 meanNum = self.numRecords//len(chunks)
-                result.numRecords = long(round(meanNum,
+                result.numRecords = int(round(meanNum,
                                                (-1 * len(str(meanNum))) + 3))
                 meanLen = self.totalLength//len(chunks)
-                result.totalLength = long(round(meanLen,
+                result.totalLength = int(round(meanLen,
                                                 (-1 * len(str(meanLen))) + 3))
             elif updateCounts:
                 result._openReaders = self._openReaders
@@ -3701,7 +3705,7 @@ class AlignmentSet(ReadSet):
         name as a unique key (or ID, if you really have to)"""
 
         # Convert it to a name if you have to:
-        if not isinstance(refName, basestring):
+        if not isinstance(refName, str):
             refName = str(refName)
         if refName.isdigit():
             if not refName in self.refNames:
@@ -3838,8 +3842,8 @@ class AlignmentSet(ReadSet):
         name. TODO(mdsmith)(2016-01-27): pick a better name for this method...
 
         """
-        return zip(self.referenceInfoTable['Name'],
-                   self.referenceInfoTable[key])
+        return list(zip(self.referenceInfoTable['Name'],
+                   self.referenceInfoTable[key]))
 
     def _idToRname(self, rId):
         """Map the DataSet.referenceInfoTable.ID to the superior unique
@@ -3996,7 +4000,7 @@ class ContigSet(DataSet):
                 matches[conId] = [con]
             else:
                 matches[conId].append(con)
-        for name, match_list in matches.items():
+        for name, match_list in list(matches.items()):
             matches[name] = np.array(match_list)
 
         writeTemp = False
@@ -4006,7 +4010,7 @@ class ContigSet(DataSet):
         if self._filters and not self.noFiltering:
             writeTemp = True
         if not writeTemp:
-            writeTemp = any([len(m) > 1 for n, m in matches.items()])
+            writeTemp = any([len(m) > 1 for n, m in list(matches.items())])
 
         def _get_windows(match_list):
             # look for the quiver window indication scheme from quiver:
@@ -4018,7 +4022,7 @@ class ContigSet(DataSet):
                                      "matching id, consolidation aborted")
             return windows
 
-        for name, match_list in matches.items():
+        for name, match_list in list(matches.items()):
             if len(match_list) > 1:
                 try:
                     windows = _get_windows(match_list)
@@ -4136,7 +4140,7 @@ class ContigSet(DataSet):
         for pos in possibilities:
             if not pos.isdigit():
                 return None
-        return np.array(map(int, possibilities))
+        return np.array(list(map(int, possibilities)))
 
     def _updateMetadata(self):
         # update contig specific metadata:
