File: ciffile.py

package info (click to toggle)
prody 2.4.1%2Bdfsg-2
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 106,704 kB
  • sloc: python: 47,651; ansic: 6,865; cpp: 4,032; xml: 1,728; javascript: 146; makefile: 72
file content (543 lines) | stat: -rw-r--r-- 18,926 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
# -*- coding: utf-8 -*-
"""This module defines functions for parsing `mmCIF files`_.

.. _mmCIF files: http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html"""


from collections import OrderedDict
import os.path
import numpy as np

from prody.atomic import AtomGroup
from prody.atomic import flags
from prody.atomic import ATOMIC_FIELDS
from prody.utilities import openFile
from prody import LOGGER, SETTINGS

from .localpdb import fetchPDB
from .starfile import parseSTARLines, StarDict, parseSTARSection
from .cifheader import getCIFHeaderDict
from .header import buildBiomolecules, assignSecstr, isHelix, isSheet

__all__ = ['parseMMCIFStream', 'parseMMCIF']


class MMCIFParseError(Exception):
    pass


_parseMMCIFdoc = """
    :arg title: title of the :class:`.AtomGroup` instance, default is the
        PDB filename or PDB identifier
    :type title: str

    :arg chain: chain identifiers for parsing specific chains, e.g.
        ``chain='A'``, ``chain='B'``, ``chain='DE'``, by default all
        chains are parsed
    :type chain: str

    :arg subset: a predefined keyword to parse subset of atoms, valid keywords
        are ``'calpha'`` (``'ca'``), ``'backbone'`` (``'bb'``), or **None**
        (read all atoms), e.g. ``subset='bb'``
    :type subset: str

    :arg model: model index or None (read all models), e.g. ``model=10``
    :type model: int, list

    :arg altloc: if a location indicator is passed, such as ``'A'`` or ``'B'``,
         only indicated alternate locations will be parsed as the single
         coordinate set of the AtomGroup, if *altloc* is set **True** all
         alternate locations will be parsed and each will be appended as a
         distinct coordinate set, default is ``"A"``
    :type altloc: str
    """

_PDBSubsets = {'ca': 'ca', 'calpha': 'ca', 'bb': 'bb', 'backbone': 'bb'}


def parseMMCIF(pdb, **kwargs):
    """Returns an :class:`.AtomGroup` and/or a :class:`.StarDict` containing header data
    parsed from an mmCIF file. If not found, the mmCIF file will be downloaded
    from the PDB. It will be downloaded in uncompressed format regardless of
    the compressed keyword.

    This function extends :func:`.parseMMCIFStream`.

    :arg pdb: a PDB identifier or a filename
        If needed, mmCIF files are downloaded using :func:`.fetchPDB()` function.
    :type pdb: str

    :arg chain: comma separated string or list-like of chain IDs
    :type chain: str, tuple, list, :class:`~numpy.ndarray`

    :arg unite_chains: unite chains with the same segment name
        Default is *False*
    :type unite_chains: bool
    """
    chain = kwargs.pop('chain', None)
    title = kwargs.get('title', None)
    unite_chains = kwargs.get('unite_chains', False)
    auto_bonds = SETTINGS.get('auto_bonds')
    get_bonds = kwargs.get('bonds', auto_bonds)
    if get_bonds:
        LOGGER.warn('Parsing struct_conn information from mmCIF is current unsupported and no bond information is added to the results')
    if not os.path.isfile(pdb):
        if len(pdb) == 5 and pdb.isalnum():
            if chain is None:
                chain = pdb[-1]
                pdb = pdb[:4]
            else:
                raise ValueError('Please provide chain as a keyword argument '
                                 'or part of the PDB ID, not both')
        else:
            chain = chain

        if len(pdb) == 4 and pdb.isalnum():
            if title is None:
                title = pdb
                kwargs['title'] = title

            if os.path.isfile(pdb + '.cif'):
                filename = pdb + '.cif'
            elif os.path.isfile(pdb + '.cif.gz'):
                filename = pdb + '.cif.gz'
            else:
                filename = fetchPDB(pdb, report=True,
                                    format='cif', compressed=False)
                if filename is None:
                    raise IOError('mmCIF file for {0} could not be downloaded.'
                                  .format(pdb))
            pdb = filename
        else:
            raise IOError('{0} is not a valid filename or a valid PDB '
                          'identifier.'.format(pdb))
    if title is None:
        title, ext = os.path.splitext(os.path.split(pdb)[1])
        if ext == '.gz':
            title, ext = os.path.splitext(title)
        if len(title) == 7 and title.startswith('pdb'):
            title = title[3:]
        kwargs['title'] = title
    cif = openFile(pdb, 'rt')
    result = parseMMCIFStream(cif, chain=chain, **kwargs)
    cif.close()
    if unite_chains:
        result.setSegnames(result.getChids())
    return result


def parseMMCIFStream(stream, **kwargs):
    """Returns an :class:`.AtomGroup` and/or a class:`.StarDict` 
    containing header data parsed from a stream of CIF lines.

    :arg stream: Anything that implements the method ``readlines``
        (e.g. :class:`file`, buffer, stdin)
        
    """

    model = kwargs.get('model')
    subset = kwargs.get('subset')
    chain = kwargs.get('chain')
    altloc = kwargs.get('altloc', 'A')
    header = kwargs.get('header', False)
    assert isinstance(header, bool), 'header must be a boolean'

    if model is not None:
        if isinstance(model, int):
            if model < 0:
                raise ValueError('model must be greater than 0')
        else:
            raise TypeError('model must be an integer, {0} is invalid'
                            .format(str(model)))
    title_suffix = ''
    if subset:
        try:
            subset = _PDBSubsets[subset.lower()]
        except AttributeError:
            raise TypeError('subset must be a string')
        except KeyError:
            raise ValueError('{0} is not a valid subset'
                             .format(repr(subset)))
        title_suffix = '_' + subset
    if chain is not None:
        if not isinstance(chain, str):
            raise TypeError('chain must be a string')
        elif len(chain) == 0:
            raise ValueError('chain must not be an empty string')
        title_suffix = '_' + chain + title_suffix

    ag = None
    if 'ag' in kwargs:
        ag = kwargs['ag']
        if not isinstance(ag, AtomGroup):
            raise TypeError('ag must be an AtomGroup instance')
        n_csets = ag.numCoordsets()
    elif model != 0:
        ag = AtomGroup(str(kwargs.get('title', 'Unknown')) + title_suffix)
        n_csets = 0

    biomol = kwargs.get('biomol', False)
    auto_secondary = SETTINGS.get('auto_secondary')
    secondary = kwargs.get('secondary', auto_secondary)
    hd = None
    if model != 0:
        LOGGER.timeit()
        try:
            lines = stream.readlines()
        except AttributeError as err:
            try:
                lines = stream.read().split('\n')
            except AttributeError:
                raise err
        if not len(lines):
            raise ValueError('empty PDB file or stream')
        if header or biomol or secondary:
            hd = getCIFHeaderDict(lines)

        _parseMMCIFLines(ag, lines, model, chain, subset, altloc)

        if ag.numAtoms() > 0:
            LOGGER.report('{0} atoms and {1} coordinate set(s) were '
                          'parsed in %.2fs.'.format(ag.numAtoms(),
                                                    ag.numCoordsets() - n_csets))
        else:
            ag = None
            LOGGER.warn('Atomic data could not be parsed, please '
                        'check the input file.')
    elif header:
        hd = getCIFHeaderDict(stream)

    if ag is not None and isinstance(hd, dict):
        if secondary:
            if auto_secondary:
                try:
                    ag = assignSecstr(hd, ag)
                except ValueError:
                    pass
            else:
                ag = assignSecstr(hd, ag)
        if biomol:
            ag = buildBiomolecules(hd, ag)

            if isinstance(ag, list):
                LOGGER.info('Biomolecular transformations were applied, {0} '
                            'biomolecule(s) are returned.'.format(len(ag)))
            else:
                LOGGER.info('Biomolecular transformations were applied to the '
                            'coordinate data.')    

    if model != 0:
        if header:
            return ag, hd
        else:
            return ag
    else:
        return hd


parseMMCIFStream.__doc__ += _parseMMCIFdoc


def _parseMMCIFLines(atomgroup, lines, model, chain, subset,
                     altloc_torf):
    """Returns an AtomGroup. See also :func:`.parsePDBStream()`.

    :arg lines: mmCIF lines
    """

    if subset is not None:
        if subset == 'ca':
            subset = set(('CA',))
        elif subset in 'bb':
            subset = flags.BACKBONE
        protein_resnames = flags.AMINOACIDS

    asize = 0
    i = 0
    models = []
    nModels = 0
    fields = OrderedDict()
    fieldCounter = -1
    foundAtomBlock = False
    foundAtomFields = False
    doneAtomBlock = False
    start = 0
    stop = 0
    while not doneAtomBlock:
        line = lines[i]
        if line[:11] == '_atom_site.':
            fieldCounter += 1
            fields[line.split('.')[1].strip()] = fieldCounter
            foundAtomFields = True

        elif foundAtomFields and line.strip() != '#':
            if not foundAtomBlock:
                foundAtomBlock = True
                start = i
            models.append(line.split()[fields['pdbx_PDB_model_num']])
            if len(models) == 1 or (models[asize] != models[asize-1]):
                nModels += 1
            asize += 1

        else:
            if foundAtomBlock:
                doneAtomBlock = True
                stop = i
                
        if i == len(lines) - 1:
            if foundAtomBlock:
                doneAtomBlock = True
                stop = i
            else:
                raise MMCIFParseError('mmCIF file contained no atoms.')

        i += 1

    new_start = start
    new_stop = stop
    if model is not None:
        startp1 = start + 1
        for i in range(start, stop):
            if int(models[i-startp1]) != model and int(models[i+1-startp1]) == model:
                new_start = i
            if model != nModels and (int(models[i-startp1]) == model and int(models[i+1-startp1]) != model):
                new_stop = i
                break
        if not str(model) in models:
            raise MMCIFParseError('model {0} is not found'.format(model))

    start = new_start
    stop = new_stop

    addcoords = False
    if atomgroup.numCoordsets() > 0:
        addcoords = True

    if isinstance(altloc_torf, str):
        if altloc_torf == 'all':
            which_altlocs = 'all'
        elif altloc_torf.strip() != 'A':
            LOGGER.info('Parsing alternate locations {0}.'
                        .format(altloc_torf))
            which_altlocs = '.' + ''.join(altloc_torf.split())
        else:
            which_altlocs = '.A'
        altloc_torf = False
    else:
        which_altlocs = '.A'
        altloc_torf = True

    coordinates = np.zeros((asize, 3), dtype=float)
    atomnames = np.zeros(asize, dtype=ATOMIC_FIELDS['name'].dtype)
    resnames = np.zeros(asize, dtype=ATOMIC_FIELDS['resname'].dtype)
    resnums = np.zeros(asize, dtype=ATOMIC_FIELDS['resnum'].dtype)
    chainids = np.zeros(asize, dtype=ATOMIC_FIELDS['chain'].dtype)
    segnames = np.zeros(asize, dtype=ATOMIC_FIELDS['segment'].dtype)
    hetero = np.zeros(asize, dtype=bool)
    termini = np.zeros(asize, dtype=bool)
    altlocs = np.zeros(asize, dtype=ATOMIC_FIELDS['altloc'].dtype)
    icodes = np.zeros(asize, dtype=ATOMIC_FIELDS['icode'].dtype)
    serials = np.zeros(asize, dtype=ATOMIC_FIELDS['serial'].dtype)
    elements = np.zeros(asize, dtype=ATOMIC_FIELDS['element'].dtype)
    bfactors = np.zeros(asize, dtype=ATOMIC_FIELDS['beta'].dtype)
    occupancies = np.zeros(asize, dtype=ATOMIC_FIELDS['occupancy'].dtype)

    n_atoms = atomgroup.numAtoms()
    if n_atoms > 0:
        asize = n_atoms

    acount = 0
    for line in lines[start:stop]:
        startswith = line.split()[fields['group_PDB']]

        try:
            atomname = line.split()[fields['auth_atom_id']]
        except KeyError:
            try:
                atomname = line.split()[fields['label_atom_id']]
            except KeyError:
                raise MMCIFParseError('mmCIF file is missing required atom IDs.')
 

        if atomname.startswith('"') and atomname.endswith('"'):
            atomname = atomname[1:-1]
        
        try:
            resname = line.split()[fields['auth_comp_id']]
        except KeyError:
            try:
                resname = line.split()[fields['label_comp_id']]
            except KeyError:
                raise MMCIFParseError('mmCIF file is missing required component IDs.')
                

        if subset is not None:
            if not (atomname in subset and resname in protein_resnames):
                continue

        chID = line.split()[fields['auth_asym_id']]
        if chain is not None:
            if isinstance(chain, str):
                chain = chain.split(',')
            if not chID in chain:
                continue

        segID = line.split()[fields['label_asym_id']]

        alt = line.split()[fields['label_alt_id']]
        if alt not in which_altlocs and which_altlocs != 'all':
            continue

        if alt == '.':
            alt = ' '

        coordinates[acount] = [line.split()[fields['Cartn_x']],
                               line.split()[fields['Cartn_y']],
                               line.split()[fields['Cartn_z']]]
        atomnames[acount] = atomname
        resnames[acount] = resname
        chainids[acount] = chID
        segnames[acount] = segID
        hetero[acount] = startswith == 'HETATM' # True or False

        try:
            resnums[acount] = line.split()[fields['auth_seq_id']]
        except KeyError:
            try:
                resnums[acount] = line.split()[fields['label_seq_id']]
            except KeyError:
                raise MMCIFParseError('mmCIF file is missing required sequence IDs.')


        if chainids[acount] != chainids[acount-1]: 
            termini[acount-1] = True

        altlocs[acount] = alt
        
        try:
            icodes[acount] = line.split()[fields['pdbx_PDB_ins_code']]
        except KeyError:
            icodes[acount] = ''

        if icodes[acount] == '?' or icodes[acount] == '.':
            icodes[acount] = ''

        serials[acount] = line.split()[fields['id']]
        elements[acount] = line.split()[fields['type_symbol']]
        bfactors[acount] = line.split()[fields['B_iso_or_equiv']]
        occupancies[acount] = line.split()[fields['occupancy']]

        acount += 1

    if model is None:
        modelSize = acount//nModels
    else:
        modelSize = acount

    if addcoords:
        atomgroup.addCoordset(coordinates[:modelSize])
    else:
        atomgroup._setCoords(coordinates[:modelSize])

    atomgroup.setNames(atomnames[:modelSize])
    atomgroup.setResnames(resnames[:modelSize])
    atomgroup.setResnums(resnums[:modelSize])
    atomgroup.setSegnames(segnames[:modelSize])
    atomgroup.setChids(chainids[:modelSize])
    atomgroup.setFlags('hetatm', hetero[:modelSize])
    atomgroup.setFlags('pdbter', termini[:modelSize])
    atomgroup.setFlags('selpdbter', termini[:modelSize])
    atomgroup.setAltlocs(altlocs[:modelSize])
    atomgroup.setIcodes(icodes[:modelSize])
    atomgroup.setSerials(serials[:modelSize])

    atomgroup.setElements(elements[:modelSize])
    from prody.utilities.misctools import getMasses
    atomgroup.setMasses(getMasses(elements[:modelSize]))
    atomgroup.setBetas(bfactors[:modelSize])
    atomgroup.setOccupancies(occupancies[:modelSize])

    anisou = None
    siguij = None
    try:
        data = parseSTARSection(lines, "_atom_site_anisotrop")
        x = data[0] # check if data has anything in it
    except IndexError:
        LOGGER.warn("No anisotropic B factors found")
    else:
        anisou = np.zeros((acount, 6),
                          dtype=ATOMIC_FIELDS['anisou'].dtype)
        
        if "_atom_site_anisotrop.U[1][1]_esd" in data[0].keys():
            siguij = np.zeros((acount, 6),
                              dtype=ATOMIC_FIELDS['siguij'].dtype)

        for entry in data:
            try:
                index = np.where(atomgroup.getSerials() == int(
                    entry["_atom_site_anisotrop.id"]))[0][0]
            except:
                continue
            
            anisou[index, 0] = entry['_atom_site_anisotrop.U[1][1]']
            anisou[index, 1] = entry['_atom_site_anisotrop.U[2][2]']
            anisou[index, 2] = entry['_atom_site_anisotrop.U[3][3]']
            anisou[index, 3] = entry['_atom_site_anisotrop.U[1][2]']
            anisou[index, 4] = entry['_atom_site_anisotrop.U[1][3]']
            anisou[index, 5] = entry['_atom_site_anisotrop.U[2][3]'] 

            if siguij is not None:
                try:
                    siguij[index, 0] = entry['_atom_site_anisotrop.U[1][1]_esd']
                    siguij[index, 1] = entry['_atom_site_anisotrop.U[2][2]_esd']
                    siguij[index, 2] = entry['_atom_site_anisotrop.U[3][3]_esd']
                    siguij[index, 3] = entry['_atom_site_anisotrop.U[1][2]_esd']
                    siguij[index, 4] = entry['_atom_site_anisotrop.U[1][3]_esd']
                    siguij[index, 5] = entry['_atom_site_anisotrop.U[2][3]_esd']
                except:
                    pass

        atomgroup.setAnisous(anisou) # no division needed anymore

        if not np.any(siguij):
            atomgroup.setAnistds(siguij)  # no division needed anymore

    if model is None:
        for n in range(1, nModels):
            atomgroup.addCoordset(coordinates[n*modelSize:(n+1)*modelSize])

    return atomgroup


def writeMMCIF(filename, atoms, csets=None, autoext=True, **kwargs):
    """Write *atoms* in MMTF format to a file with name *filename* and return
    *filename*.  If *filename* ends with :file:`.gz`, a compressed file will
    be written.
    
    :arg atoms: an object with atom and coordinate data
    :type atoms: :class:`.Atomic`

    :arg csets: coordinate set indices, default is all coordinate sets

    :arg autoext: when not present, append extension :file:`.cif` to *filename*

    :keyword header: header to write too
    :type header: dict
    """
    try:
        from Bio.PDB import MMCIFIO
    except ImportError:
        raise ImportError('Biopython MMCIFIO could not be imported. '
            'Reinstall ProDy or install Biopython '
            'to solve the problem.')

    header = kwargs.get('header', None)

    if autoext and not filename.lower().endswith('.cif'):
        filename += '.cif'

    structure = atoms.toBioPythonStructure(header=header, csets=csets)
    io=MMCIFIO()
    io.set_structure(structure)
    io.save(filename)
    return filename