File: datasets_converter.py

package info (click to toggle)
spades 3.13.1+dfsg-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye, sid
  • size: 22,172 kB
  • sloc: cpp: 136,213; ansic: 48,218; python: 16,809; perl: 4,252; sh: 2,115; java: 890; makefile: 507; pascal: 348; xml: 303
file content (70 lines) | stat: -rwxr-xr-x 2,906 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/usr/bin/python3

############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################


import os
import sys

sys.path.append(os.path.join(sys.path[0], "../../spades_pipeline/"))
from process_cfg import *

########################################################################

# for pretty-printing
canonical_order       = ["paired_reads", "single_reads", "jumping_first", "jumping_second", "jumping_single_first", "jumping_single_second", "RL", "IS", "delta", "jump_is", "jump_rl", "single_cell", "is_var", "reference_genome"]
max_property_name_len = len(max(canonical_order, key=len))
tabulation            = "    " 

########################################################################

if len(sys.argv) != 4:
	print ("Splits old-style datasets.info file into separate files per each dataset (new format)\n")
	print ("Usage: " + sys.argv[0] + " old_style_datasets.info folder_for_separate_datasets folder_with_reads (for relative paths in new format datasets)")
	exit(0)

output_folder = sys.argv[2]
reads_folder  = sys.argv[3]

if not os.path.exists(output_folder):
    os.makedirs(output_folder)

old_datasets = load_config_from_info_file(sys.argv[1])
for (key, value) in old_datasets.iteritems():
    if key != "common" and key != "OPPA": # oops, hard-code (for datasets_archive)
        cur_dataset = open(os.path.join(output_folder, key + ".info"), 'w')
        cur_ds_dict = dict()
        paired_reads = '"'
        single_reads = '"'
        for (prop, value) in value.__dict__.iteritems():
            if prop in ["RL", "IS", "delta", "jump_is", "jump_rl", "is_var"]:
                cur_ds_dict[prop] = str(value)                
            elif prop == "single_cell":
                cur_ds_dict[prop] = bool_to_str(value)
            else:                
                value = os.path.relpath(os.path.join(reads_folder, value), output_folder)
                if prop in ["first", "second"]:
                    paired_reads += value + ' '
                elif prop in ["single_first", "single_second"]:
                    single_reads += value + ' '
                else:
                    cur_ds_dict[prop] = value                

        paired_reads += '"'
        single_reads += '"'
        if paired_reads != '""':
            cur_ds_dict["paired_reads"] = paired_reads
        if single_reads != '""':
            cur_ds_dict["single_reads"] = single_reads
        
        # pretty-printing
        for prop in canonical_order:
            if cur_ds_dict.has_key(prop):
                cur_dataset.write(prop.ljust(max_property_name_len) + tabulation + cur_ds_dict[prop] + "\n")

        cur_dataset.close()