File: update_pickles.py

package info (click to toggle)
python-cobra 0.29.1-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 11,512 kB
  • sloc: python: 14,703; xml: 12,841; makefile: 137; sh: 32
file content (139 lines) | stat: -rwxr-xr-x 4,369 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#!/usr/bin/env python


"""
Regenerate model pickle files.

This should be performed after updating core classes in order to prevent suble
bugs.

"""


import importlib.resources
from collections import OrderedDict
from json import dump as json_dump
from pickle import dump, load

import cobra
from cobra.io import (
    load_matlab_model,
    load_model,
    save_json_model,
    save_matlab_model,
    save_yaml_model,
    write_sbml_model,
)


config = cobra.Configuration()
config.solver = "glpk"


if __name__ == "__main__":
    # ecoli
    ecoli_model = load_model("iJO1366", cache=False)
    with open("iJO1366.pickle", "wb") as outfile:
        dump(ecoli_model, outfile, protocol=2)

    # salmonella
    salmonella = load_model("salmonella", cache=False)
    with open("salmonella.media", "rb") as infile:
        salmonella.media_compositions = load(infile)
    with open("salmonella.pickle", "wb") as outfile:
        dump(salmonella, outfile, protocol=2)

    # create mini model from textbook
    textbook = load_model("textbook", cache=False)
    mini = cobra.Model("mini_textbook")
    mini.compartments = textbook.compartments

    for r in textbook.reactions:
        if r.id in (
            "GLCpts",
            "PGI",
            "PFK",
            "FBA",
            "TPI",
            "GAPD",
            "PGK",
            "PGM",
            "ENO",
            "PYK",
            "EX_glc__D_e",
            "EX_h_e",
            "H2Ot",
            "ATPM",
            "PIt2r",
        ):
            mini.add_reaction(r.copy())
    mini.reactions.ATPM.upper_bound = mini.reactions.PGI.upper_bound
    mini.objective = ["PFK", "ATPM"]  # No biomass, 2 reactions

    # add in some information from iJO1366
    mini.add_reaction(ecoli_model.reactions.LDH_D.copy())
    mini.add_reaction(ecoli_model.reactions.EX_lac__D_e.copy())
    r = cobra.Reaction("D_LACt2")
    mini.add_reaction(r)
    mini.reactions.GLCpts.gene_reaction_rule = (
        ecoli_model.reactions.GLCptspp.gene_reaction_rule
    )

    # adjust bounds
    for i in ["ATPM", "D_LACt2", "EX_lac__D_e", "LDH_D"]:
        mini.reactions.get_by_id(i).upper_bound = mini.reactions.PGI.upper_bound
    for i in ["D_LACt2", "LDH_D"]:
        mini.reactions.get_by_id(i).lower_bound = mini.reactions.PGI.lower_bound
    # set names and annotation
    for g in mini.genes:
        try:
            tg = textbook.genes.get_by_id(g.id)
        except KeyError:
            continue
        g.name = tg.name
        g.annotation = tg.annotation
    mini.reactions.sort()
    mini.genes.sort()
    mini.metabolites.sort()
    # output to various formats
    with open("mini.pickle", "wb") as outfile:
        dump(mini, outfile, protocol=2)
    save_matlab_model(mini, importlib.resources.files(cobra.data).joinpath("mini.mat"))
    save_json_model(
        mini, importlib.resources.files(cobra.data).joinpath("mini.json"), pretty=True
    )
    save_yaml_model(mini, importlib.resources.files(cobra.data).joinpath("mini.yml"))
    write_sbml_model(mini, "mini_fbc2.xml")
    write_sbml_model(mini, "mini_fbc2.xml.bz2")
    write_sbml_model(mini, "mini_fbc2.xml.gz")
    write_sbml_model(
        mini, importlib.resources.files(cobra.data).joinpath("mini_cobra.xml")
    )
    raven = load_matlab_model("raven.mat")
    with open("raven.pickle", "wb") as outfile:
        dump(raven, outfile, protocol=2)

    # TODO:these need a reference solutions rather than circular solution checking!

    # fva results
    fva_result = cobra.flux_analysis.flux_variability_analysis(textbook)
    clean_result = OrderedDict()
    for key in sorted(fva_result):
        clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
    with open("textbook_fva.json", "w") as outfile:
        json_dump(clean_result, outfile)

    # fva with pfba constraint
    fva_result = cobra.flux_analysis.flux_variability_analysis(
        textbook, pfba_factor=1.1
    )
    clean_result = OrderedDict()
    for key in sorted(fva_result):
        clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
    with open("textbook_pfba_fva.json", "w") as outfile:
        json_dump(clean_result, outfile)

    # textbook solution
    solution = cobra.flux_analysis.parsimonious.pfba(textbook)
    with open("textbook_solution.pickle", "wb") as f:
        dump(solution, f, protocol=2)