File: format.py

package info (click to toggle)
python-ihm 2.7-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 3,368 kB
  • sloc: python: 30,422; ansic: 5,990; sh: 24; makefile: 20
file content (1365 lines) | stat: -rw-r--r-- 51,372 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
"""Utility classes to handle CIF format.

   This module provides classes to read in and write out mmCIF files. It is
   only concerned with handling syntactically correct CIF - it does not know
   the set of tables or the mapping to ihm objects. For that,
   see :mod:`ihm.dumper` for writing and :mod:`ihm.reader` for reading.

   See also the `stream parser example <https://github.com/ihmwg/python-ihm/blob/main/examples/stream_parser.py>`_
   and the `token reader example <https://github.com/ihmwg/python-ihm/blob/main/examples/token_reader.py>`_.
"""  # noqa: E501

import textwrap
import operator
import ihm
from io import StringIO
import inspect
import re
try:
    from . import _format
except ImportError:
    _format = None


def _write_multiline(val, fh):
    fh.write("\n;")
    fh.write(val)
    if not val.endswith('\n'):
        fh.write("\n")
    fh.write(";\n")


class _LineWriter:
    def __init__(self, writer, line_len=80):
        self.writer = writer
        self.line_len = line_len
        self.column = 0

    def write(self, val):
        if isinstance(val, str) and '\n' in val:
            _write_multiline(val, self.writer.fh)
            self.column = 0
            return
        val = '.' if val is None else self.writer._repr(val)
        if self.column > 0:
            if self.line_len and self.column + len(val) + 1 > self.line_len:
                self.writer.fh.write("\n")
                self.column = 0
            else:
                self.writer.fh.write(" ")
                self.column += 1
        self.writer.fh.write(val)
        self.column += len(val)


class _CifCategoryWriter:
    def __init__(self, writer, category):
        self.writer = writer
        self.category = category

    def write(self, **kwargs):
        self.writer._write(self.category, kwargs)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass


class _CifLoopWriter:
    def __init__(self, writer, category, keys, line_wrap=True):
        self._line_wrap = line_wrap
        self.writer = writer
        self.category = category
        self.keys = keys
        # Remove characters that we can't use in Python identifiers
        self.python_keys = [k.replace('[', '').replace(']', '') for k in keys]
        self._empty_loop = True

    def write(self, **kwargs):
        if self._empty_loop:
            f = self.writer.fh
            f.write("#\nloop_\n")
            for k in self.keys:
                f.write("%s.%s\n" % (self.category, k))
            self._empty_loop = False
        lw = _LineWriter(self.writer, line_len=80 if self._line_wrap else 0)
        for k in self.python_keys:
            lw.write(kwargs.get(k, None))
        self.writer.fh.write("\n")

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        if not self._empty_loop:
            self.writer.fh.write("#\n")


class _Writer:
    """Base class for all writers"""

    omitted = '.'
    unknown = '?'

    _boolmap = {False: 'NO', True: 'YES'}

    def __init__(self, fh):
        self.fh = fh


class CifWriter(_Writer):
    """Write information to a CIF file.
       The constructor takes a single argument - a Python filelike object
       to write to - and provides methods to write Python objects to
       that file. Most simple Python types are supported (string, float,
       bool, int). The Python bool type is mapped to CIF strings
       'NO' and 'YES'. Floats are always represented with 3 decimal places
       (or in scientific notation with 3 digits of precision if smaller
       than 1e-3); if a different amount of precision is desired, convert
       the float to a string first."""

    _line_wrap = True

    @classmethod
    def _set_line_wrap(cls, line_wrap):
        cls._line_wrap = line_wrap

    def flush(self):
        # noop - data is written as it is encountered
        pass

    def start_block(self, name):
        """Start a new data block in the file with the given name."""
        self.fh.write('data_%s\n' % name)

    def end_block(self):
        # noop - mmCIF has no end-of-block indicator
        pass

    def category(self, category):
        """Return a context manager to write a CIF category.
           A CIF category is a simple list of key:value pairs.

           :param str category: the name of the category
                                (e.g. "_struct_conf_type").
           :return: an object with a single method `write` which takes
                    keyword arguments.

           For example::

               with writer.category("_struct_conf_type") as l:
                   l.write(id='HELX_P', criteria=writer.unknown)
           """
        return _CifCategoryWriter(self, category)

    def loop(self, category, keys):
        """Return a context manager to write a CIF loop.

           :param str category: the name of the category
                                (e.g. "_struct_conf")
           :param list keys: the field keys in that category
           :return: an object with a single method `write` which takes
                    keyword arguments; this can be called any number of
                    times to add entries to the loop. Any field keys in `keys`
                    that are not provided as arguments to `write`, or values
                    that are the Python value `None`, will get the CIF
                    omitted value ('.'), while arguments to `write` that
                    are not present in `keys` will be ignored.

           For example::

               with writer.loop("_struct_conf", ["id", "conf_type_id"]) as l:
                   for i in range(5):
                       l.write(id='HELX_P1%d' % i, conf_type_id='HELX_P')
           """
        return _CifLoopWriter(self, category, keys, line_wrap=self._line_wrap)

    def write_comment(self, comment):
        """Write a simple comment to the CIF file.
           The comment will be wrapped if necessary for readability.
           See :meth:`_set_line_wrap`."""
        if self._line_wrap:
            for line in textwrap.wrap(comment, 78):
                self.fh.write('# ' + line + '\n')
        else:
            self.fh.write('# ' + comment + '\n')

    def _write(self, category, kwargs):
        for key, val in sorted(kwargs.items(), key=operator.itemgetter(0)):
            if isinstance(val, str) and '\n' in val:
                self.fh.write("%s.%s" % (category, key))
                _write_multiline(val, self.fh)
            else:
                self.fh.write("%s.%s %s\n" % (category, key,
                                              self.omitted if val is None
                                              else self._repr(val)))

    def _repr(self, obj):
        if isinstance(obj, str) and '"' not in obj \
           and "'" not in obj and " " not in obj \
           and len(obj) > 0 \
           and not obj.startswith('_') \
           and not obj.startswith('global_') \
           and not obj.startswith('[') \
           and obj[:5] not in ('data_', 'save_', 'loop_', 'stop_', '?', '.'):
            return obj
        elif isinstance(obj, float):
            if abs(obj) < 1e-3:
                return "%.3g" % obj
            else:
                return "%.3f" % obj
        elif isinstance(obj, bool):
            return self._boolmap[obj]
        elif isinstance(obj, str):
            return repr(obj)
        else:
            return str(obj)


# Acceptable 'whitespace' characters in CIF
_WHITESPACE = set(" \t")


class CifParserError(Exception):
    """Exception raised for invalid format mmCIF files"""
    pass


class _Token:
    """A token in an mmCIF file"""
    pass


class _ValueToken(_Token):
    """The value of a variable in mmCIF"""
    pass


class _OmittedValueToken(_ValueToken):
    """A value that is deliberately omitted (the '.' string in mmCIF)"""
    def as_mmcif(self):
        return "."


class _UnknownValueToken(_ValueToken):
    """A value that is unknown (the '?' string in mmCIF)"""
    def as_mmcif(self):
        return "?"


class _TextValueToken(_ValueToken):
    """The value of a variable in mmCIF as a piece of text"""
    __slots__ = ['txt', 'quote']

    def __init__(self, txt, quote):
        self.txt = txt
        self.quote = quote

    def as_mmcif(self):
        if '\n' in self.txt or self.quote == ';':
            suffix = ";\n" if self.txt.endswith('\n') else "\n;\n"
            return ";" + self.txt + suffix
        elif self.quote == "'":
            return "'" + self.txt + "'"
        elif self.quote == '"' or ' ' in self.txt:
            return '"' + self.txt + '"'
        else:
            return self.txt


class _VariableToken(_Token):
    """A variable name, e.g. _entry.id, in mmCIF"""

    __slots__ = ['category', 'keyword']

    def __init__(self, val, linenum):
        # mmCIF categories and keywords are case insensitive, so make
        # everything lowercase
        self.category, _, self.keyword = val.lower().partition('.')
        if not self.category or not self.keyword:
            raise CifParserError("Malformed mmCIF variable name "
                                 "(%s) on line %d" % (val, linenum))


class _PreservingVariableToken(_VariableToken):
    """A variable name that preserves the original case of the keyword"""

    __slots__ = ['category', 'keyword', 'orig_keyword']

    def __init__(self, val, linenum):
        super().__init__(val, linenum)
        _, _, self.orig_keyword = val.partition('.')

    def as_mmcif(self):
        if self.orig_keyword and self.orig_keyword.lower() == self.keyword:
            return self.category + '.' + self.orig_keyword
        else:
            return self.category + '.' + self.keyword


class _CommentToken(_Token):
    """A comment in mmCIF without the leading '#'"""
    __slots__ = ['txt']

    def __init__(self, txt):
        self.txt = txt

    def as_mmcif(self):
        return "#" + self.txt


class _WhitespaceToken(_Token):
    """Space between other mmCIF tokens"""
    __slots__ = ['txt']

    def __init__(self, txt):
        self.txt = txt

    def as_mmcif(self):
        return self.txt


class _EndOfLineToken(_Token):
    """End of a line in an mmCIF file"""
    def as_mmcif(self):
        return "\n"


class _NullToken(_Token):
    """Null token"""
    def as_mmcif(self):
        return ""

    # Return dummy values for filters that expect a variable or value token
    keyword = property(lambda self: None)


class _DataToken(_Token):
    """A data_* keyword in mmCIF, denoting a new data block"""
    __slots__ = ['txt']

    def __init__(self, txt):
        self.txt = txt

    def as_mmcif(self):
        return 'data_' + self.txt


class _LoopToken(_Token):
    """A loop_ keyword in mmCIF, denoting the start of a loop construct"""
    def as_mmcif(self):
        return "loop_"


class _SaveToken(_Token):
    """A save_* keyword in mmCIF, denoting the start or end of a save frame"""
    pass


class _Reader:
    """Base class for reading a file and extracting some or all of its data."""

    def _add_category_keys(self):
        """Populate _keys for each category by inspecting its __call__
           method"""
        def python_to_cif(field):
            # Map valid Python identifiers to mmCIF keywords
            if field.startswith('tr_vector') or field.startswith('rot_matrix'):
                return re.sub(r'(\d)', r'[\1]', field)
            else:
                return field

        def fill_keys(h, s, attr, typ):
            if not hasattr(h, attr):
                setattr(h, attr, frozenset(
                    python_to_cif(k) for k, v in s.annotations.items()
                    if v is typ))

        def check_extra(h, attr):
            extra = frozenset(getattr(h, attr)) - frozenset(h._keys)
            if extra:
                raise ValueError("For %s, %s not in _keys: %s"
                                 % (h, attr, ", ".join(extra)))

        for h in self.category_handler.values():
            s = inspect.getfullargspec(h.__call__)
            if not hasattr(h, '_keys'):
                h._keys = [python_to_cif(x) for x in s.args[1:]]
            fill_keys(h, s, '_int_keys', int)
            fill_keys(h, s, '_float_keys', float)
            fill_keys(h, s, '_bool_keys', bool)
            bad_keys = frozenset(k for k, v in s.annotations.items()
                                 if v not in (int, float, str, bool))
            if bad_keys:
                raise ValueError("For %s, bad annotations: %s"
                                 % (h, ", ".join(bad_keys)))
            check_extra(h, '_int_keys')
            check_extra(h, '_float_keys')
            check_extra(h, '_bool_keys')


class _CifTokenizer:
    def __init__(self, fh):
        self.fh = fh
        self._tokens = []
        self._token_index = 0
        self._linenum = 0

    # Read a line from the file. Treat it as ASCII (not Unicode)
    # but be tolerant of 8-bit characters by assuming latin-1 encoding
    def _read_line(self):
        line = self.fh.readline()
        if isinstance(line, bytes):
            return line.decode('latin-1')
        else:
            return line

    def _read_multiline_token(self, first_line, ignore_multiline):
        """Read a semicolon-delimited (multiline) token"""
        lines = [first_line[1:]]  # Skip initial semicolon
        start_linenum = self._linenum
        while True:
            self._linenum += 1
            nextline = self._read_line()
            if nextline == '':
                raise CifParserError(
                    "End of file while reading multiline "
                    "string which started on line %d" % start_linenum)
            elif nextline.startswith(';'):
                # Strip last newline
                lines[-1] = lines[-1].rstrip('\r\n')
                self._tokens = [_TextValueToken("".join(lines), ';')]
                return
            elif not ignore_multiline:
                lines.append(nextline)

    def _handle_quoted_token(self, line, strlen, start_pos, quote_type):
        """Given the start of a quoted string, find the end and add a token
           for it"""
        quote = line[start_pos]
        # Get the next quote that is followed by whitespace (or line end).
        # In mmCIF a quote within a string is not considered an end quote as
        # long as it is not followed by whitespace.
        end = start_pos
        while True:
            end = line.find(quote, end + 1)
            if end == -1:
                raise CifParserError("%s-quoted string not terminated "
                                     "at line %d"
                                     % (quote_type, self._linenum))
            elif end == strlen - 1 or line[end + 1] in _WHITESPACE:
                # A quoted string is always a literal string, even if it is
                # "?" or ".", not an unknown/omitted value
                self._tokens.append(_TextValueToken(line[start_pos + 1:end],
                                                    quote))
                return end + 1  # Step past the closing quote

    def _skip_initial_whitespace(self, line, strlen, start_pos):
        while start_pos < strlen and line[start_pos] in _WHITESPACE:
            start_pos += 1
        return start_pos

    def _extract_line_token(self, line, strlen, start_pos):
        """Extract the next token from the given line starting at start_pos,
           populating self._tokens. The new start_pos is returned."""
        start_pos = self._skip_initial_whitespace(line, strlen, start_pos)
        if start_pos >= strlen:
            return strlen
        if line[start_pos] == '"':
            return self._handle_quoted_token(line, strlen, start_pos, "Double")
        elif line[start_pos] == "'":
            return self._handle_quoted_token(line, strlen, start_pos, "Single")
        elif line[start_pos] == "#":
            # Comment - discard the rest of the line
            self._handle_comment(line, start_pos)
            return strlen
        else:
            # Find end of token (whitespace or end of line)
            end_pos = start_pos
            while end_pos < strlen and line[end_pos] not in _WHITESPACE:
                end_pos += 1
            val = line[start_pos:end_pos]
            if val == 'loop_':
                tok = _LoopToken()
            elif val.startswith('data_'):
                tok = _DataToken(val[5:])
            elif val.startswith('save_'):
                tok = _SaveToken()
            elif val.startswith('_'):
                tok = self._handle_variable_token(val, self._linenum)
            elif val == '.':
                tok = _OmittedValueToken()
            elif val == '?':
                tok = _UnknownValueToken()
            else:
                # Note that we do no special processing for other reserved
                # words (global_, save_, stop_). But the probability of
                # them occurring where we expect a value is pretty small.
                tok = _TextValueToken(val, None)  # don't alter case of values
            self._tokens.append(tok)
            return end_pos

    def _handle_variable_token(self, val, linenum):
        return _VariableToken(val, linenum)

    def _handle_comment(self, line, start_pos):
        """Potentially handle a comment that spans line[start_pos:]."""
        pass

    def _tokenize(self, line):
        """Break up a line into tokens, populating self._tokens"""
        self._tokens = []
        if line.startswith('#'):
            self._handle_comment(line, 0)
            return  # Skip comment lines
        start_pos = 0
        strlen = len(line)
        while start_pos < strlen:
            start_pos = self._extract_line_token(line, strlen, start_pos)

    def _unget_token(self):
        """Push back the last token returned by _get_token() so it can
           be read again"""
        self._token_index -= 1

    def _get_token(self, ignore_multiline=False):
        """Get the next :class:`_Token` from an mmCIF file, or None
           on end of file.
           If ignore_multiline is TRUE, the string contents of any multiline
           value tokens (those that are semicolon-delimited) are not stored
           in memory.
        """
        while len(self._tokens) <= self._token_index:
            # No tokens left - read the next non-blank line in
            self._linenum += 1
            line = self._read_line()
            if line == '':  # End of file
                return
            if line.startswith(';'):
                self._read_multiline_token(line, ignore_multiline)
            else:
                self._tokenize(line.rstrip('\r\n'))
            self._token_index = 0
        self._token_index += 1
        return self._tokens[self._token_index - 1]


class _PreservingCifTokenizer(_CifTokenizer):
    """A tokenizer subclass which preserves comments, case and whitespace"""

    def _tokenize(self, line):
        _CifTokenizer._tokenize(self, line)
        self._tokens.append(_EndOfLineToken())

    def _handle_comment(self, line, start_pos):
        self._tokens.append(_CommentToken(line[start_pos + 1:]))

    def _handle_variable_token(self, val, linenum):
        return _PreservingVariableToken(val, linenum)

    def _skip_initial_whitespace(self, line, strlen, start_pos):
        end_pos = start_pos
        while end_pos < strlen and line[end_pos] in _WHITESPACE:
            end_pos += 1
        if end_pos > start_pos:
            self._tokens.append(_WhitespaceToken(line[start_pos:end_pos]))
        return end_pos


class _CategoryTokenGroup:
    """A group of tokens which set a single data item"""
    def __init__(self, vartoken, valtoken):
        self.vartoken, self.valtoken = vartoken, valtoken

    def __str__(self):
        return ("<_CategoryTokenGroup(%s, %s)>"
                % (self.vartoken.as_mmcif(), self.valtoken.token.as_mmcif()))

    def as_mmcif(self):
        return self.vartoken.as_mmcif() + self.valtoken.as_mmcif() + "\n"

    def __set_value(self, val):
        self.valtoken.value = val

    category = property(lambda self: self.vartoken.category)
    keyword = property(lambda self: self.vartoken.keyword)
    value = property(lambda self: self.valtoken.value, __set_value)


class _LoopHeaderTokenGroup:
    """A group of tokens that form the start of a loop_ construct"""
    def __init__(self, looptoken, category, keywords, end_spacers):
        self._loop, self.category = looptoken, category
        self.keywords = keywords
        self.end_spacers = end_spacers

    def keyword_index(self, keyword):
        """Get the zero-based index of the given keyword, or ValueError"""
        return [k.token.keyword for k in self.keywords].index(keyword)

    def __str__(self):
        return ("<_LoopHeaderTokenGroup(%s, %s)>"
                % (self.category,
                   str([k.token.keyword for k in self.keywords])))

    def as_mmcif(self):
        all_tokens = [self._loop] + self.keywords + self.end_spacers
        return "".join(x.as_mmcif() for x in all_tokens)


class _LoopRowTokenGroup:
    """A group of tokens that represent one row in a loop_ construct"""
    def __init__(self, items):
        self.items = items

    def as_mmcif(self):
        return "".join(x.as_mmcif() for x in self.items)


class _SpacedToken:
    """A token with zero or more leading whitespace or newline tokens"""
    def __init__(self, spacers, token):
        self.spacers, self.token = spacers, token

    def as_mmcif(self):
        return ("".join(x.as_mmcif() for x in self.spacers)
                + self.token.as_mmcif())

    def __get_value(self):
        if isinstance(self.token, _OmittedValueToken):
            return None
        elif isinstance(self.token, _UnknownValueToken):
            return ihm.unknown
        else:
            return self.token.txt

    def __set_value(self, val):
        if val is None:
            self.token = _OmittedValueToken()
        elif val is ihm.unknown:
            self.token = _UnknownValueToken()
        elif isinstance(self.token, _TextValueToken):
            self.token.txt = val
        else:
            self.token = _TextValueToken(val, quote=None)

    value = property(__get_value, __set_value)


class Filter:
    """Base class for filters used by :meth:`CifTokenReader.read_file`.

       Typically, a subclass such as :class:`ChangeValueFilter` is used when
       reading an mmCIF file.

       :param str target: the mmCIF data item this filter should act on.
              It can be the full name of the data item (including category)
              such as ``_entity.type``; or just the attribute or keyword name
              such as ``.type_symbol`` which would match any category
              (e.g. ``_atom_site.type_symbol``).
    """
    def __init__(self, target):
        ts = target.lower().split('.')
        if len(ts) == 1 or not ts[0]:
            self.category = None
        elif ts[0].startswith('_'):
            self.category = ts[0]
        else:
            self.category = '_' + ts[0]
        self.keyword = ts[-1]

    def _set_category_from_target(self, target):
        if target.startswith('_'):
            self.category = target
        else:
            self.category = '_' + target
        self.keyword = None

    def match_token_category(self, tok):
        """Return true iff the given token matches the target's category"""
        return self.category is None or tok.category == self.category

    def match_token_keyword(self, tok):
        """Return true iff the given token matches the target's category
           and keyword"""
        return self.match_token_category(tok) and tok.keyword == self.keyword

    def filter_category(self, tok):
        """Filter the given category token.

           :return: the original token (which may have been modified),
                    a replacement token, or None if the token should be
                    deleted.
        """
        raise NotImplementedError

    def filter_loop_header(self, tok):
        """Filter the given loop header token.

           :return: the original token (which must not have been modified),
                    a replacement token, or None if the token should be
                    deleted. If the header token is replaced or deleted,
                    all of the original loop rows will also be deleted.
        """
        return tok

    def get_loop_filter(self, tok):
        """Given a loop header token, potentially return a handler for each
           loop row token. This function is also permitted to alter the
           header in place (but not replace or remove it). Keywords should
           not be removed from the header (as that may confuse other filters)
           but can be replaced with null tokens.

           :return: a callable which will be called for each loop row token
                    (and acts like :meth:`filter_category`), or None if no
                    filtering is needed for this loop.
        """
        raise NotImplementedError


class ChangeValueFilter(Filter):
    """Change any token that sets a data item to ``old`` to be ``new``.

       For example, this could be used to rename certain chains, or change
       all residues of a certain type.

       :param str old: The existing value of the data item.
       :param str new: The new value of the data item.

       See :class:`Filter` for a description of the ``target`` parameter.
    """
    def __init__(self, target, old, new):
        super().__init__(target)
        self.old, self.new = old, new

    def filter_category(self, tok):
        if self.match_token_keyword(tok) and tok.value == self.old:
            tok.value = self.new
        return tok

    def get_loop_filter(self, tok):
        if self.match_token_category(tok):
            try:
                keyword_index = tok.keyword_index(self.keyword)
            except ValueError:
                return

            def loop_filter(t):
                if t.items[keyword_index].value == self.old:
                    t.items[keyword_index].value = self.new
                return t
            return loop_filter


class ChangeFuncValueFilter(Filter):
    """Change any token that sets a data item to x to be f(x).

       For example, this could be used to perform a search and replace on
       a string, or match against a regex.

       :param callable func: A function that is given the existing value
              of the data item, the category name (e.g. ``_atom_site``),
              and the keyword name (e.g. ``auth_seq_id``), and should return
              the new value of the data item (perhaps unchanged).

       See :class:`Filter` for a description of the ``target`` parameter.
    """
    def __init__(self, target, func):
        super().__init__(target)
        self.func = func

    def filter_category(self, tok):
        if self.match_token_keyword(tok):
            tok.value = self.func(tok.value, tok.category, tok.keyword)
        return tok

    def get_loop_filter(self, tok):
        if self.match_token_category(tok):
            try:
                keyword_index = tok.keyword_index(self.keyword)
            except ValueError:
                return

            def loop_filter(t):
                item = t.items[keyword_index]
                item.value = self.func(item.value, tok.category, self.keyword)
                return t
            return loop_filter


class RemoveItemFilter(Filter):
    """Remove any token from the file that sets the given data item.

       See :class:`Filter` for a description of the ``target`` parameter.
    """
    def filter_category(self, tok):
        if self.match_token_keyword(tok):
            return None
        else:
            return tok

    def get_loop_filter(self, tok):
        if self.match_token_category(tok):
            try:
                keyword_index = tok.keyword_index(self.keyword)
            except ValueError:
                return
            # Remove keyword from loop header
            tok.keywords[keyword_index].spacers = []
            tok.keywords[keyword_index].token = _NullToken()

            def loop_filter(t):
                # Remove item from loop row (we don't want to pop from
                # t.items as other filters may reference later indexes)
                spc = t.items[keyword_index].spacers
                if len(spc) > 0 and isinstance(spc[0], _EndOfLineToken):
                    del spc[1:]
                else:
                    t.items[keyword_index].spacers = []
                t.items[keyword_index].token = _NullToken()
                return t
            return loop_filter


class ChangeKeywordFilter(Filter):
    """Change the keyword in any applicable token to be ``new``.

       :param str new: The new keyword.

       See :class:`Filter` for a description of the ``target`` parameter.
    """
    def __init__(self, target, new):
        super().__init__(target)
        self.new = new

    def filter_category(self, tok):
        if self.match_token_keyword(tok):
            tok.vartoken.keyword = self.new
        return tok

    def get_loop_filter(self, tok):
        if self.match_token_category(tok):
            try:
                keyword_index = tok.keyword_index(self.keyword)
            except ValueError:
                return
            tok.keywords[keyword_index].token.keyword = self.new


class ReplaceCategoryFilter(Filter):
    """Replace any token from the file that sets the given category.

       This can also be used to completely remove a category if no
       replacement is given.

       :param str target: the mmCIF category name this filter should act on,
              such as ``_entity``.
       :param str raw_cif: if given, text in mmCIF format which should replace
              the first instance of the category.
       :param dumper: if given, a dumper object that should generate mmCIF
              output to replace the first instance of the category.
       :type dumper: :class:`ihm.dumper.Dumper`
       :param system: the System that the given dumper will work on.
       :type system: :class:`ihm.System`
    """

    class _RawCifToken(_Token):
        __slots__ = ['txt']
        category = keyword = None

        def __init__(self, txt):
            self.txt = txt

        def as_mmcif(self):
            return self.txt

    def __init__(self, target, raw_cif=None, dumper=None, system=None):
        self._set_category_from_target(target)
        self.raw_cif = raw_cif
        self.dumper = dumper
        self.system = system
        #: The number of times the category was found in the mmCIF file
        self.num_matches = 0

    def _get_replacement_token(self):
        if self.num_matches > 1:
            return None
        if self.raw_cif:
            return self._RawCifToken(self.raw_cif)
        elif self.dumper and self.system:
            fh = StringIO()
            writer = CifWriter(fh)
            self.dumper.finalize(self.system)
            self.dumper.dump(self.system, writer)
            return self._RawCifToken(fh.getvalue())

    def filter_category(self, tok):
        if self.match_token_category(tok):
            self.num_matches += 1
            return self._get_replacement_token()
        else:
            return tok

    def filter_loop_header(self, tok):
        return self.filter_category(tok)

    def get_loop_filter(self, tok):
        return None


class CifTokenReader(_PreservingCifTokenizer):
    """Read an mmCIF file and break it into tokens.

       Unlike :class:`CifReader` which extracts selected data from an mmCIF
       file, this class operates on the file at a lower level, splitting
       it into tokens, and preserving data such as comments and whitespace.
       This can be used for various housekeeping tasks directly on an mmCIF
       file, such as changing chain IDs or renaming categories or data items.

       Use :meth:`read_file` to actually read the file.

       :param file fh: Open handle to the mmCIF file
    """
    def __init__(self, fh):
        super().__init__(fh)

    def read_file(self, filters=None):
        """Read the file and yield tokens and/or token groups. The exact type
           of the tokens is subject to change and is not currently documented;
           however, each token or group object has an ``as_mmcif`` method
           which returns the corresponding text in mmCIF format. Thus, the
           file can be reconstructed by concatenating the result of
           ``as_mmcif`` for all tokens.

           :exc:`CifParserError` will be raised if the file cannot be parsed.

           :param filters: if a list of :class:`Filter` objects is provided,
                  the read tokens will be modified or removed by each of these
                  filters before being returned.
           :type filters: sequence of :class:`Filter`

           :return: tokens and/or token groups.
        """
        if filters is None:
            return self._read_file_internal()
        else:
            return self._read_file_with_filters(filters)

    def _read_file_with_filters(self, filters):
        loop_filters = None
        remove_all_loop_rows = False
        for tok in self._read_file_internal():
            if isinstance(tok, _CategoryTokenGroup):
                tok = self._filter_category(tok, filters)
            elif isinstance(tok, ihm.format._LoopHeaderTokenGroup):
                new_tok = self._filter_loop_header(tok, filters)
                if new_tok is not tok:
                    tok = new_tok
                    remove_all_loop_rows = True
                else:
                    remove_all_loop_rows = False
                    loop_filters = [f.get_loop_filter(tok) for f in filters]
                    loop_filters = [f for f in loop_filters if f is not None]
                    # Did filters remove all keywords from the loop?
                    if all(isinstance(k.token, _NullToken)
                           for k in tok.keywords):
                        tok = None
                        remove_all_loop_rows = True
            elif isinstance(tok, ihm.format._LoopRowTokenGroup):
                if remove_all_loop_rows:
                    tok = None
                elif loop_filters:
                    tok = self._filter_loop(tok, loop_filters)
            if tok is not None:
                yield tok

    def _filter_category(self, tok, filters):
        for f in filters:
            tok = f.filter_category(tok)
            if tok is None:
                return
        return tok

    def _filter_loop_header(self, tok, filters):
        orig_tok = tok
        for f in filters:
            tok = f.filter_loop_header(tok)
            if tok is not orig_tok:
                break
        return tok

    def _filter_loop(self, tok, filters):
        for f in filters:
            tok = f(tok)
            if tok is None:
                return
        return tok

    def _read_file_internal(self):
        while True:
            token = self._get_token()
            if token is None:
                break
            if isinstance(token, _VariableToken):
                yield self._read_value(token)
            elif isinstance(token, _LoopToken):
                for tok in self._read_loop(token):
                    yield tok
                # Did we hit the end of the file?
                if self._token_index < 0:
                    break
            else:
                yield token

    def _get_spaced_token(self):
        """Get the next token plus any number of leading space/EOL tokens"""
        spacers = []
        while True:
            token = self._get_token()
            if isinstance(token, (_EndOfLineToken, _WhitespaceToken)):
                spacers.append(token)
            else:
                return _SpacedToken(spacers, token)

    def _read_value(self, vartoken):
        """Read a line that sets a single value, e.g. "_entry.id   1YTI"""
        spval = self._get_spaced_token()
        if not isinstance(spval.token, _ValueToken):
            raise CifParserError(
                "No valid value found for %s.%s on line %d"
                % (vartoken.category, vartoken.keyword, self._linenum))
        eoltok = self._get_token()
        if not isinstance(eoltok, _EndOfLineToken):
            raise CifParserError(
                "No end of line after %s.%s on line %d"
                % (vartoken.category, vartoken.keyword, self._linenum))
        return _CategoryTokenGroup(vartoken, spval)

    def _read_loop(self, looptoken):
        """Handle a loop_ construct"""
        header = self._read_loop_header(looptoken)
        # Record original number of keywords, in case the header token
        # is filtered
        num_keywords = len(header.keywords)
        yield header
        for line in self._read_loop_data(num_keywords):
            yield line

    def _read_loop_header(self, looptoken):
        """Read the set of keywords for a loop_ construct"""
        category = None
        keywords = []
        while True:
            spt = self._get_spaced_token()
            if isinstance(spt.token, _VariableToken):
                if category is None:
                    category = spt.token.category
                elif category != spt.token.category:
                    raise CifParserError(
                        "mmCIF files cannot contain multiple "
                        "categories within a single loop at line %d"
                        % self._linenum)
                keywords.append(spt)
            elif isinstance(spt.token, _ValueToken):
                # OK, end of keywords; proceed on to values
                self._unget_token()
                return _LoopHeaderTokenGroup(looptoken, category, keywords,
                                             spt.spacers)
            else:
                raise CifParserError("Was expecting a keyword or value for "
                                     "loop at line %d" % self._linenum)

    def _read_loop_data(self, num_keywords):
        """Read the data for a loop_ construct"""
        while True:
            items = []
            for i in range(num_keywords):
                spt = self._get_spaced_token()
                if isinstance(spt.token, _ValueToken):
                    items.append(spt)
                elif i == 0:
                    # OK, end of the loop
                    for s in spt.spacers:
                        yield s
                    if spt.token is not None:
                        self._unget_token()
                    return
                else:
                    raise CifParserError(
                        "Wrong number of data values in loop "
                        "(should be an exact multiple of the number "
                        "of keys) at line %d" % self._linenum)
            yield _LoopRowTokenGroup(items)


def _int_type_handler(txt, linenum):
    try:
        return int(txt)
    except ValueError as exc:
        raise ValueError("%s at line %d" % (str(exc), linenum))


def _float_type_handler(txt, linenum):
    try:
        return float(txt)
    except ValueError as exc:
        raise ValueError("%s at line %d" % (str(exc), linenum))


class _BoolTypeHandler:
    _bool_map = {'YES': True, 'NO': False}

    def __init__(self, omitted):
        self.omitted = omitted

    def __call__(self, txt, linenum):
        return self._bool_map.get(txt.upper(), self.omitted)


def _str_type_handler(txt, linenum):
    return txt


class CifReader(_Reader, _CifTokenizer):
    """Class to read an mmCIF file and extract some or all of its data.

       Use :meth:`read_file` to actually read the file.

       See also :class:`CifTokenReader` for a class that operates on the
       lower-level structure of an mmCIF file, preserving data such as
       comments and whitespace.

       :param file fh: Open handle to the mmCIF file
       :param dict category_handler: A dict to handle data
              extracted from the file. Keys are category names
              (e.g. "_entry") and values are objects that have a `__call__`
              method and `not_in_file`, `omitted`, and `unknown` attributes.
              The names of the arguments to this `__call__` method
              are mmCIF keywords that are extracted from the file (for the
              keywords tr_vector[N] and rot_matrix[N][M] simply omit the [
              and ] characters, since these are not valid for Python
              identifiers). The object will be called with the data from
              the file as a set of strings, or `not_in_file`, `omitted` or
              `unknown` for any keyword that is not present in the file,
              the mmCIF omitted value (.), or mmCIF unknown value (?)
              respectively. (mmCIF keywords are case insensitive, so this
              class always treats them as lowercase regardless of the
              file contents.)
       :param unknown_category_handler: A callable (or `None`) that is called
              for each category in the file that isn't handled; it is given
              two arguments: the name of the category, and the line in the
              file at which the category was encountered (if known, otherwise
              None).
       :param unknown_keyword_handler: A callable (or `None`) that is called
              for each keyword in the file that isn't handled (within a
              category that is handled); it is given three arguments:
              the names of the category and keyword, and the line in the
              file at which the keyword was encountered (if known,
              otherwise None).
    """
    def __init__(self, fh, category_handler, unknown_category_handler=None,
                 unknown_keyword_handler=None):
        if _format is not None:
            c_file = _format.ihm_file_new_from_python(fh, False)
            self._c_format = _format.ihm_reader_new(c_file, False)
        self.category_handler = category_handler
        self.unknown_category_handler = unknown_category_handler
        self.unknown_keyword_handler = unknown_keyword_handler
        self._category_data = {}
        _CifTokenizer.__init__(self, fh)

    def __del__(self):
        if hasattr(self, '_c_format'):
            _format.ihm_reader_free(self._c_format)

    def _read_value(self, vartoken):
        """Read a line that sets a single value, e.g. "_entry.id   1YTI"""
        # Only read the value if we're interested in this category and key
        if vartoken.category in self.category_handler:
            if vartoken.keyword \
               in self.category_handler[vartoken.category]._keys:
                valtoken = self._get_token()
                if isinstance(valtoken, _ValueToken):
                    ch = self.category_handler[vartoken.category]
                    if vartoken.category not in self._category_data:
                        self._category_data[vartoken.category] = {}
                    if isinstance(valtoken, _OmittedValueToken):
                        val = ch.omitted
                    elif isinstance(valtoken, _UnknownValueToken):
                        val = ch.unknown
                    else:
                        tc = self._get_type_handler(ch, vartoken.keyword)
                        val = tc(valtoken.txt, self._linenum)
                    self._category_data[vartoken.category][vartoken.keyword] \
                        = val
                else:
                    raise CifParserError(
                        "No valid value found for %s.%s on line %d"
                        % (vartoken.category, vartoken.keyword, self._linenum))
            elif self.unknown_keyword_handler is not None:
                self.unknown_keyword_handler(vartoken.category,
                                             vartoken.keyword, self._linenum)
        elif self.unknown_category_handler is not None:
            self.unknown_category_handler(vartoken.category, self._linenum)

    def _read_loop_keywords(self):
        """Read the set of keywords for a loop_ construct"""
        category = None
        keywords = []
        first_line = None
        keyword_lines = []
        while True:
            token = self._get_token()
            if isinstance(token, _VariableToken):
                if category is None:
                    category = token.category
                    first_line = self._linenum
                elif category != token.category:
                    raise CifParserError(
                        "mmCIF files cannot contain multiple "
                        "categories within a single loop at line %d"
                        % self._linenum)
                keywords.append(token.keyword)
                keyword_lines.append(self._linenum)
            elif isinstance(token, _ValueToken):
                # OK, end of keywords; proceed on to values
                self._unget_token()
                return category, keywords, keyword_lines, first_line
            else:
                raise CifParserError("Was expecting a keyword or value for "
                                     "loop at line %d" % self._linenum)

    def _read_loop_data(self, handler, num_wanted_keys, keyword_indices,
                        type_handlers):
        """Read the data for a loop_ construct"""
        data = [handler.not_in_file] * num_wanted_keys
        while True:
            for i, index in enumerate(keyword_indices):
                token = self._get_token()
                if isinstance(token, _ValueToken):
                    if index >= 0:
                        if isinstance(token, _OmittedValueToken):
                            data[index] = handler.omitted
                        elif isinstance(token, _UnknownValueToken):
                            data[index] = handler.unknown
                        else:
                            data[index] = type_handlers[index](token.txt,
                                                               self._linenum)
                elif i == 0:
                    # OK, end of the loop
                    self._unget_token()
                    return
                else:
                    raise CifParserError(
                        "Wrong number of data values in loop "
                        "(should be an exact multiple of the number "
                        "of keys) at line %d" % self._linenum)
            handler(*data)

    def _get_type_handler(self, category_handler, keyword):
        """Return a function that converts keyword string into desired type"""
        if keyword in category_handler._int_keys:
            return _int_type_handler
        elif keyword in category_handler._bool_keys:
            return _BoolTypeHandler(category_handler.omitted)
        elif keyword in category_handler._float_keys:
            return _float_type_handler
        else:
            return _str_type_handler

    def _read_loop(self):
        """Handle a loop_ construct"""
        (category, keywords,
            keyword_lines, first_line) = self._read_loop_keywords()
        # Skip data if we don't have a handler for it
        if category in self.category_handler:
            ch = self.category_handler[category]
            type_handlers = [self._get_type_handler(ch, k) for k in ch._keys]
            wanted_key_index = {}
            for i, k in enumerate(ch._keys):
                wanted_key_index[k] = i
            indices = [wanted_key_index.get(k, -1) for k in keywords]
            if self.unknown_keyword_handler is not None:
                for k, i, line in zip(keywords, indices, keyword_lines):
                    if i == -1:
                        self.unknown_keyword_handler(category, k, line)
            self._read_loop_data(ch, len(ch._keys), indices, type_handlers)
        elif self.unknown_category_handler is not None:
            self.unknown_category_handler(category, first_line)

    def read_file(self):
        """Read the file and extract data.
           Category handlers will be called as data becomes available -
           for ``loop_`` constructs, this will be once for each row in the
           loop; for categories (e.g. ``_entry.id model``), this will be once
           at the very end of the file.

           If the C-accelerated _format module is available, then it is used
           instead of the (much slower) Python tokenizer.

           :exc:`CifParserError` will be raised if the file cannot be parsed.

           :return: True iff more data blocks are available to be read.
        """
        self._add_category_keys()
        if hasattr(self, '_c_format'):
            return self._read_file_c()

        def call_all_categories():
            for cat, data in self._category_data.items():
                ch = self.category_handler[cat]
                ch(*[data.get(k, ch.not_in_file) for k in ch._keys])
            # Clear category data for next call to read_file()
            self._category_data = {}
        ndata = 0
        in_save = False
        while True:
            token = self._get_token(ignore_multiline=True)
            if token is None:
                break
            if isinstance(token, _VariableToken):
                self._read_value(token)
            elif isinstance(token, _DataToken):
                ndata += 1
                # Only read the first data block
                if ndata > 1:
                    # Allow reading the next data block
                    self._unget_token()
                    break
            elif isinstance(token, _LoopToken):
                self._read_loop()
                # Did we hit the end of the file?
                if self._token_index < 0:
                    break
            elif isinstance(token, _SaveToken):
                in_save = not in_save
                if not in_save:
                    call_all_categories()
                    for handler in self.category_handler.values():
                        handler.end_save_frame()
        call_all_categories()
        return ndata > 1

    def _read_file_c(self):
        """Read the file using the C parser"""
        _format.ihm_reader_remove_all_categories(self._c_format)
        for category, handler in self.category_handler.items():
            func = getattr(handler, '_add_c_handler', None) \
                or _format.add_category_handler
            func(self._c_format, category, handler._keys,
                 frozenset(handler._int_keys), frozenset(handler._float_keys),
                 frozenset(handler._bool_keys), handler)
        if self.unknown_category_handler is not None:
            _format.add_unknown_category_handler(self._c_format,
                                                 self.unknown_category_handler)
        if self.unknown_keyword_handler is not None:
            _format.add_unknown_keyword_handler(self._c_format,
                                                self.unknown_keyword_handler)
        try:
            ret_ok, more_data = _format.ihm_read_file(self._c_format)
        except _format.FileFormatError as exc:
            # Convert to the same exception used by the Python code
            raise CifParserError(str(exc))
        return more_data