1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
|
from os.path import basename, splitext, join, isfile, dirname, abspath, exists, getsize
import glob
import re
import json
import getpass
import subprocess
import sys
import re
import datetime
from jinja2 import *
from collections import OrderedDict
import numpy
import yaml
import gzip
import input_utils
from pandas import (read_csv, Series, DataFrame,
concat, to_numeric, MultiIndex, melt)
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import base64
try:
plt.style.use('ggplot')
except:
pass
try:
from StringIO import StringIO
except:
from io import StringIO
#wildcard_constraints: sample = "[^\/]+"
qcumber_path = os.path.abspath(workflow.basedir)
#import pdb; pdb.set_trace()
main_path = "QCResults"
log_path= main_path + "/_logfiles"
data_path= main_path + "/_data"
sav_path= main_path + "/SAV"
fastqc_path= main_path + "/FastQC"
trimming_path= main_path + "/Trimmed"
trimbetter_path= main_path + "/trimBetter" # temp folder
mapping_path= main_path + "/Mapping"
classification_path= main_path + "/Classification"
try:
with open('samples.yaml', 'r') as sample_h:
sample_info_new = yaml.safe_load(sample_h)
sample_info_new_complex = dict((x, y)
for x, y in sample_info.items()
if isinstance(y, list))
sample_info_new_simple = dict((x, y)
for x, y in sample_info.items()
if not isinstance(y, list))
except:
pass
# max threads for all rules
max_threads = 10
with open(os.path.join(data_path,'general_information.json'),'r') as info:
geninfo_config = json.load(info)
onsuccess:
if len(geninfo_config["Sample information"]["join_reads"]) != 0:
try:
shell("rm -r QCResults/tmp")
except:
pass
if cmd_input["trimBetter"]:
try:
shell("rm -r {trimbetter_path}".format(trimbetter_path=trimbetter_path))
except:
print("Could not remove %s" % trimbetter_path)
#---------------------------------------------< Functions >------------------------------------------------------------#
cmap = matplotlib.cm.get_cmap('Set3')
# convert images to base64
def to_base64(file):
with open(file, "rb") as imgfile:
imgstring = base64.b64encode(imgfile.read())
return 'data:image/png;base64,' + imgstring.decode("utf-8")
def bam_to_fastq(bamfile):
return ["{path}/tmp/bam_to_fastq/{sample}.fastq".format(path=main_path, sample=get_name(bamfile))]
def get_name(abs_name):
new_name = basename(abs_name)
while splitext(new_name)[-1] !="":
new_name = splitext(new_name)[0]
return new_name
def get_all_reads(wildcards, raw = False):
if config["notrimming"]:
if any([x.endswith(".bam") for x in unique_samples[wildcards.sample]]):
return bam_to_fastq(unique_samples[wildcards.sample][0]) # array of bam files should always be of length 1
else: # geninfo_config["Sample information"]["samples"][wildcards.sample]
return unique_samples[wildcards.sample]
elif raw: # get raw reads
if any([x.endswith(".bam") for x in geninfo_config["Sample information"]["samples"][wildcards.sample]]):
return bam_to_fastq(geninfo_config["Sample information"]["samples"][wildcards.sample][0])
else:
return geninfo_config["Sample information"]["samples"][wildcards.sample]
# get trimmed reads
elif wildcards.sample in geninfo_config["Sample information"]["join_lanes"]:
if (geninfo_config["Sample information"]["type"] == "PE"):
return expand("{path}/{sample}.{read}.fastq.gz", read=["1P", "1U", "2P", "2U"],
sample=geninfo_config["Sample information"]["join_lanes"][wildcards.sample],
path = trimming_path)
else:
return expand("{path}/{sample}.fastq.gz",
sample=geninfo_config["Sample information"]["join_lanes"][wildcards.sample],
path = trimming_path)
else:
if (geninfo_config["Sample information"]["type"] == "PE"):
return expand("{path}/{sample}.{read}.fastq.gz", read=["1P", "1U", "2P", "2U"],
sample=wildcards.sample,
path = trimming_path)
elif (geninfo_config["Sample information"]["type"] == "SE"):
return expand("{path}/{sample}.fastq.gz",
sample=wildcards.sample,
path = trimming_path)
def get_total_number(filename):
try:
with open(filename, "r") as fastqc_data:
for line in fastqc_data.readlines():
if line.startswith("Total Sequences"):
return int(re.search("Total Sequences\s+(?P<reads>\d+)", line).group("reads"))
except:
return 0
# Plot BOXPLOTS
boxplots = {"Per_sequence_quality_scores": {
"title": "Per sequence quality scores",
"ylab": "Mean Sequence Quality (Phred Score)",
"xlab": "Sample"},
"Sequence_Length_Distribution":{
"title": "Sequence Length Distribution",
"ylab": "Sequence Length (bp)",
"xlab": "Sample"},
"Per_sequence_GC_content":{
"title": "Per sequence GC content",
"ylab": "Mean GC content (%)",
"xlab": "Sample"} }
def plot_summary(csv, outfile):
df = read_csv(csv, header=None, sep=",")
df.columns = ["Sample", "Type", "Read", "Value", "Count"]
# workaround for weighted boxplots
new_df = DataFrame()
for i in range(len(df.index)):
if int(df.ix[i, "Count"]) != 0:
new_df = new_df.append(DataFrame([df.ix[i, :-1]] * int(df.ix[i, "Count"])), ignore_index=True)
print(new_df.columns)
g = sns.FacetGrid(new_df, col="Type", size=4, aspect=.7)
(g.map(sns.boxplot, "Sample", "Value", "Read")
.despine(left=True)
.add_legend(title = "Read"))
plt.savefig(outfile)
#------------------------------------------< make config files >-------------------------------------------------------#
parameter = yaml.safe_load(open(os.path.join(geninfo_config["QCumber_path"], "config", "parameter.txt"), "r"))
sample_dict = dict([(geninfo_config["Sample information"]["rename"][get_name(x)], x) for x in sum(geninfo_config["Sample information"]["samples"].values(), [])])
if any([x for x in sum( geninfo_config["Sample information"]["samples"].values(), []) if x.endswith(".bam")]):
rule bam_to_fastq:
input:
lambda wildcards: sample_dict[wildcards.sample]
output:
temp(main_path + "/tmp/{sample}.fastq")
message:
"Convert bam to fastq"
run:
shell("samtools bam2fq {input} > {output}")
joined_samples = dict((x, sum(
[geninfo_config["Sample information"]["samples"][val] for val in geninfo_config["Sample information"]["join_lanes"][x]], [])) for x
in geninfo_config["Sample information"]["join_lanes"].keys())
unique_samples = dict(joined_samples, **dict(
(x, geninfo_config["Sample information"]["samples"][x]) for x in geninfo_config["Sample information"]["samples"].keys()
if x not in sum(geninfo_config["Sample information"]["join_lanes"].values(), [])))
cmd_input = yaml.safe_load(open("config.yaml","r"))
if config["reference"] or config["index"]:
config["nomapping"] = False
else:
config["nomapping"] = True
rule preprocess_join_readfiles:
input:
lambda wildcards: geninfo_config["Sample information"]["join_reads"]["QCResults/tmp/join_reads/"+wildcards.sample ]
output:
temp("QCResults/tmp/join_reads/{sample}")
shell:
"cat {input} > {output}"
|