-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathproteoformer_pipeline_py2.sh
executable file
·208 lines (162 loc) · 8.49 KB
/
proteoformer_pipeline_py2.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
#!/bin/bash
echo ' ________ ________ ________ _________ _______ ________ ________ ________ ________ _____ ______ _______ ________ '
echo '|\ __ \|\ __ \|\ __ \|\___ ___\\ ___ \ |\ __ \|\ _____\\ __ \|\ __ \|\ _ \ _ \|\ ___ \ |\ __ \ '
echo '\ \ \|\ \ \ \|\ \ \ \|\ \|___ \ \_\ \ __/|\ \ \|\ \ \ \__/\ \ \|\ \ \ \|\ \ \ \\\__\ \ \ \ __/|\ \ \|\ \ '
echo ' \ \ ____\ \ _ _\ \ \\\ \ \ \ \ \ \ \_|/_\ \ \\\ \ \ __\\ \ \\\ \ \ _ _\ \ \\|__| \ \ \ \_|/_\ \ _ _\ '
echo ' \ \ \___|\ \ \\ \\ \ \\\ \ \ \ \ \ \ \_|\ \ \ \\\ \ \ \_| \ \ \\\ \ \ \\ \\ \ \ \ \ \ \ \_|\ \ \ \\ \| '
echo ' \ \__\ \ \__\\ _\\ \_______\ \ \__\ \ \_______\ \_______\ \__\ \ \_______\ \__\\ _\\ \__\ \ \__\ \_______\ \__\\ _\ '
echo ' \|__| \|__|\|__|\|_______| \|__| \|_______|\|_______|\|__| \|_______|\|__|\|__|\|__| \|__|\|_______|\|__|\|__|'
echo -e "\n\n"
## GLOBAL VARIABLES
declare -A datasets=(["OHMX20200619_005"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/20191030_Ribo1_MHU-L-041709_S177_L008_R1_001.fastq.gz" ["OHMX20200619_006"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/20191030_Ribo2_MHU-L-061019_S178_L008_R1_001.fastq.gz" ["OHMX20200619_007"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/20191030_Ribo3_MHU-L-091307_S179_L008_R1_001.fastq.gz" ["OHMX20200619_001"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/a20190013_001_FKDL202574961-1a-AK2178-AK1031_HK7NYDRXX_L1.fq.gz" ["OHMX20200619_004"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/a20190013_004_FKDL202574961-1a-AK2180-AK10751_HK7NYDRXX_L1.fq.gz" ["OHMX20200619_002"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/a20190013_002_FKDL202574961-1a-AK870-AK10750_HK7NYDRXX_L1.fq.gz" ["OHMX20200619_003"]="/data/gerbenm/20200619_Novo_GST_Frame/Novo/a20190013_003_FKDL202574961-1a-AK1958-AK2941_HK7NYDRXX_L1.fq.gz")
##Input arguments##
echo "INPUT ARUGMENTS:"
ENSEMBL_ANNOT="100"
SPECIES="human"
SPECIES_SHORT="hsa"
ORF="plastid" # Mostly standard or plastid
PRICE="Y"
CORES="20"
UNIQUEMAPPING="Y"
CLIPPER="trimmomatic" # Mostly trimmomatic or fastx
ADAPTORSEQ="TGGAATTCTCGGGTGCCAAGG"
PHIX="Y" # Y or N
RRNA="Y" # Y or N
SNORNA="Y" # Y or N
TRNA="Y" # Y or N
IGENOMESROOT="/data/igenomes/"
ENSEMBLDICT="/share/steven/Ensembl/"
ENSEMBLDB="ENS_${SPECIES_SHORT}_${ENSEMBL_ANNOT}.db"
COMPLOGO='ohmx' # ohmx or biobix
SCRIPTDIR="/home/gerben/scripts/"
BASEDIR="/data/gerbenm/20200619_Novo_GST_Frame/Novo"
echo "basedir = $BASEDIR"
echo -e "scriptdir = $SCRIPTDIR\n"
echo "Ensembl annotation = $ENSEMBL_ANNOT"
echo "Species = $SPECIES"
echo "Offset calling method = $ORF"
echo "PRICE-adapted mapping files = $PRICE"
echo "Cores = $CORES"
echo "Unique mapping = $UNIQUEMAPPING"
echo "Adaptor sequence = $ADAPTORSEQ"
echo "Clipper = $CLIPPER"
echo "Phix filtering = $PHIX"
echo "rRNA filtering = $RRNA"
echo "tRNA filtering = $TRNA"
echo "sn(o)RNA filtering = $SNORNA"
echo "iGenomes root folder = $IGENOMESROOT"
echo "Ensembl DB location = ${ENSEMBLDICT}${ENSEMBLDB}"
echo -e "\n\n"
##Activate PROTEOFORMER Conda Environment##
source activate proteoformer
##Reference information##
#echo -e "Download reference info\n\n"
#python $SCRIPTDIR/proteoformer/Additional_tools/ENS_db.py -v $ENSEMBL_ANNOT -s $SPECIES
#chmod 755 $ENSEMBLDB
#mv $ENSEMBLDB $ENSEMBLDICT
#python $SCRIPTDIR/proteoformer/Additional_tools/get_igenomes.py -v $ENSEMBL_ANNOT -s $SPECIES -d $IGENOMESROOT -c 15
##Internal dict structure##
mkdir $BASEDIR/fastqc_raw
mkdir $BASEDIR/fastqc_mapped
mkdir $BASEDIR/statistics
mkdir $BASEDIR/mqc
# Cat files for statistics
TOTAL_STATS_FILE=${BASEDIR}/statistics/total_stats.csv
TEMP_TOTAL_STATS=${BASEDIR}/statistics/tmp_total_stats.csv
rm -rf ${TOTAL_STATS_FILE}
touch ${TOTAL_STATS_FILE}
##Start loop
for i in "${!datasets[@]}"
do
:
ID=$i
FILE=${datasets[$i]}
echo -e "START NEW LOOP:\n\t${ID}\n\t${FILE}\n"
##gunzip if necessary
if [[ "$FILE" =~ .*\.gz$ ]]
then
pigz -p $CORES -d $FILE
#gunzip $FILE
UNZIPFILE="${FILE%.*}"
echo "File $FILE unzipped"
else
UNZIPFILE=$FILE
fi
echo -e "Unzipped file: $UNZIPFILE \n"
mkdir $ID
cd $ID
echo -e "1) FastQC raw file $ID \n"
fastqc $UNZIPFILE -o $BASEDIR/fastqc_raw -t $CORES
echo -e "FastQC raw file $ID done \n"
echo -e "2) Mapping $ID \n"
perl $SCRIPTDIR/proteoformer/1_mapping/mapping.pl --inputfile1 $UNZIPFILE --readtype ribo_untr --name $ID --species $SPECIES --ensembl $ENSEMBL_ANNOT --cores $CORES --unique $UNIQUEMAPPING --igenomes_root $IGENOMESROOT --clipper $CLIPPER --adaptor $ADAPTORSEQ --phix $PHIX --rRNA $RRNA --snRNA $SNORNA --tRNA $TRNA --rpf_split N --price_files $PRICE --suite $ORF --suite_tools_loc $SCRIPTDIR/proteoformer/1_mapping/ > $BASEDIR/$ID/mapping_$ID.txt 2>&1
#Copy stats for multiQC
cp STAR/fastq1/Log.final.out $BASEDIR/fastqc_mapped/${ID}_fastqc.Log.final.out
echo -e "Mapping done for $ID \n"
ln -s $BASEDIR/$ID/STAR/fastq1/untreat.bam $BASEDIR/$ID/$ID.bam
echo -e "3) FastQC mapped file $ID \n"
fastqc $BASEDIR/$ID/$ID.bam -o $BASEDIR/fastqc_mapped -t $CORES
echo -e "FastQC on the mapped file done for $ID\n"
sqlite3 SQLite/results.db <<END_SQL
.timeout 2000
.header on
.mode csv
.output ${BASEDIR}/statistics/${ID}_stats.csv
select * from statistics order by total desc;
END_SQL
echo -e "Statistics written for $ID \n"
mkdir $BASEDIR/mqc/$ID
echo -e "4) mQC $ID \n"
perl $SCRIPTDIR/proteoformer/2_mappingQC/mappingQC.pl --samfile $BASEDIR/$ID/STAR/fastq1/untreat.sam --treated untreated --cores $CORES --result_db $BASEDIR/$ID/SQLite/results.db --unique $UNIQUEMAPPING --ens_db $ENSEMBLDICT/$ENSEMBLDB --offset $ORF --offset_img $BASEDIR/$ID/plastid/${ID}_untreated_p_offsets.png --tool_dir $SCRIPTDIR/proteoformer/2_mappingQC/mqc_tools/ --plotrpftool pyplot3D --output_folder $BASEDIR/mqc/$ID --html $BASEDIR/mqc/mqc_$ID.html --zip $BASEDIR/mqc/mqc_$ID.zip --tmp $BASEDIR/$ID/tmp/ --comp_logo $COMPLOGO > $BASEDIR/$ID/mQC_$ID.txt 2>&1
rm -rf $BASEDIR/$ID/tmp/mappingqc_untreated
echo -e "mQC performed for $ID \n"
echo -e "\n"
rm -rf fastq/fastq1_nophix.fq fastq/nophix/ fastq/fastq1_clip* fastq/Unmapped.out.mate1 fastq/Log.* fastq/SJ.out.tab fastq/Aligned.out.sam fastq/fastq1_norrna.fq fastq/fastq1_norrna_nosnrna.fq fastq/fastq1_norrna_nosnrna_notrna.fq
echo -e "5) Transcript calling $ID \n"
perl $SCRIPTDIR/proteoformer/3_tr_calling/Rule-based/ribo_translation.pl --in_sqlite $BASEDIR/$ID/SQLite/results.db --out_sqlite $BASEDIR/$ID/SQLite/results.db --ens_db $ENSEMBLDICT/$ENSEMBLDB > $BASEDIR/$ID/tr_translation_$ID.txt 2>&1
echo -e "Transcript calling performed on $ID \n"
##To run price, another ENV needs to be loaded.
conda deactivate
source activate price
echo -e "6) PRICE $ID \n"
python $SCRIPTDIR/proteoformer/4_ORF_calling/using_PRICE/PRICE.py -r $BASEDIR/$ID/SQLite/results.db > $BASEDIR/$ID/PRICE_orf_$ID.txt 2>&1
echo -e "PRICE ORF calling performed on $ID \n"
conda deactivate
source activate proteoformer
##To run spectre, another ENV needs to be loaded.
conda deactivate
source activate spectre
echo -e "7) SPECTRE $ID \n"
python $SCRIPTDIR/proteoformer/4_ORF_calling/using_SPECtre/SPECtre.py -r $BASEDIR/$ID/SQLite/results.db -o 28:12,29:12,30:12 -c 60 -x 3 > $BASEDIR/$ID/SPECtre_orf_$ID.txt 2>&1
echo -e "SPECTRE ORF calling performed on $ID \n"
conda deactivate
source activate proteoformer
##Go back to the BaseDir
cd ..
pigz -p $CORES $UNZIPFILE
#gzip $UNZIPFILE
echo "File $UNZIPFILE zipped"
echo -e "\n\n"
done
#End loop
cd $BASEDIR/fastqc_raw
multiqc -i "Raw Read Data" .
cd $BASEDIR
cd $BASEDIR/fastqc_mapped
multiqc -i "Mapped Read Data" .
cd $BASEDIR
echo -e "MultiQC on all raw/mapped data performed\n"
##Cat all stats files
#Do this in a sorted ID manner
readarray -t sorted_datasets < <(for a in "${!datasets[@]}"; do echo "$a"; done | sort)
for i in "${sorted_datasets[@]}"
do
ID=$i
#Delete the header lines
tail -n +2 "${BASEDIR}/statistics/${ID}_stats.csv" > ${BASEDIR}/statistics/tmp_stats.csv
cat ${TOTAL_STATS_FILE} ${BASEDIR}/statistics/tmp_stats.csv >> ${TEMP_TOTAL_STATS}
rm -rf ${BASEDIR}/statistics/tmp_stats.csv
mv ${TEMP_TOTAL_STATS} ${TOTAL_STATS_FILE}
done
##Deactivate conda environment
conda deactivate