Commit 9ff804a1 authored by Jerome Mariette's avatar Jerome Mariette
Browse files

No commit message

No commit message
parent 433bb153
......@@ -71,7 +71,7 @@ $(function () {
var index = $(this).attr("id").split("_")[2];
var c_table = new Array();
for (var i=min; i<=max; i += step) {
c_table.push(0);
c_table.push(1);
}
var index_array = $("#contigs_length_"+index).val().split(","),
value_array = $("#contigs_length_count_"+index).val().split(",");
......@@ -84,6 +84,7 @@ $(function () {
}
}
}
console.log(c_table)
c_table = c_table.slice(0, -1);
y_table.push({
name: $("#sample_id_"+index).html(),
......@@ -91,6 +92,8 @@ $(function () {
});
});
console.log(y_table)
chart = new Highcharts.Chart({
chart: {
renderTo: 'highcharts_container',
......@@ -114,7 +117,18 @@ $(function () {
title: {
text: "Number of sequences"
},
min: 0,
min: 1,
labels: {
formatter: function() {
if (this.value ==1) {
return this.value-1;
} else {
return this.value;
}
}
},
type: 'logarithmic',
minorTickInterval: 0.1,
plotLines: [{
value: 0,
width: 1,
......@@ -123,8 +137,9 @@ $(function () {
},
tooltip: {
formatter: function() {
return '<b>'+ this.series.name +'</b><br/>'+
this.x +'bp : '+ this.y +'sequences';
var real_nb_seq = parseInt(this.y) -1;
return '<b>'+ this.series.name +'</b><br/>'+
this.x +'bp : '+ real_nb_seq +'sequences';
}
},
credits: { enabled: false },
......
#
# Copyright (C) 2012 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from ng6.ng6workflow import NG6Workflow
from ng6.utils import Utils
class MiSeqDiversity (NG6Workflow):
def process(self):
# handle if run name have spaces
run_name = "_".join(self.runobj.name.split())
# manage the sequences files
group_prefix = None
if self.args['casava_directory'] is not None :
if self.args['lane_number'] is None :
raise ValueError, "lane-number must be specified with casava-directory."
mids_desc_array, self.read1_files, self.read2_files = Utils.filesFromCasava( self.args['casava_directory'], self.project.get_name(), self.args['lane_number'] )
group_prefix = (Utils.get_group_basenames(self.read1_files+self.read2_files, "read")).keys()
self.runobj.add_mids_description(mids_desc_array)
elif (self.args['read_1'] is not None) and (len(self.args['read_1']) > 0) :
self.read1_files = []
self.read2_files = []
for file in self.args["read_1"]:
if os.path.isfile(file):
self.read1_files.append(file)
else:
raise IOError, file + " file does not exists."
if self.args["read_2"]:
for file in self.args["read_2"]:
if os.path.isfile(file):
self.read2_files.append(file)
else:
raise IOError, file + " file does not exists."
else:
raise ValueError, "[casava-directory and lane-number] OR [read(s)] must be specified."
is_paired_end = len(self.read2_files) > 0
if self.args['casava_directory'] is not None and self.args["keep_reads"] != "all" :
# fastq illumina filter
fastqilluminafilter = self.add_component("FastqIlluminaFilter", [self.read1_files+self.read2_files, self.args["keep_reads"], group_prefix, run_name+"_fastqilluminafilter.tar.gz"])
# list filtered files
if is_paired_end :
# split read 1 and read 2 from filtered files list
[filtered_read1_files, filtered_read2_files] = Utils.split_pair(fastqilluminafilter.fastq_files_filtered, (group_prefix is not None))
else:
filtered_read1_files = fastqilluminafilter.fastq_files_filtered
filtered_read2_files = []
filtered_read1_files = sorted(filtered_read1_files)
filtered_read2_files = sorted(filtered_read2_files)
else:
fastqilluminafilter = None
filtered_read1_files = self.read1_files
filtered_read2_files = self.read2_files
# archive the files
saved_files = filtered_read1_files + filtered_read2_files
reads_prefixes = None
if group_prefix is not None :
# concatenate fastq
reads_prefixes = (Utils.get_group_basenames(saved_files, "read")).keys()
concatenatefastq = self.add_component("ConcatenateFilesGroups", [saved_files, reads_prefixes])
saved_files = concatenatefastq.concat_files
addrawfiles = self.add_component("AddRawFiles", [self.runobj, saved_files, self.args["compression"]])
# make some statistics on raw file
fastqc = self.add_component("FastQC", [filtered_read1_files+filtered_read2_files, (group_prefix is not None), True, run_name+"_fastqc.tar.gz"], parent = fastqilluminafilter)
# list concatenated files
if is_paired_end and (group_prefix is not None):
# split read 1 and read 2 from filtered files list
[concat_read1_files, concat_read2_files] = Utils.split_pair(concatenatefastq.concat_files, (group_prefix is not None))
elif group_prefix is not None:
concat_read1_files = concatenatefastq.concat_files
concat_read2_files = []
else:
concat_read1_files = filtered_read1_files
concat_read2_files = filtered_read2_files
concat_read1_files = sorted(concat_read1_files)
concat_read2_files = sorted(concat_read2_files)
#check if files are compressed
fileFormat = '.gz'
fileExtension = os.path.splitext(self.args['read_1'][0])[1]
if fileExtension == fileFormat:
gunzip = self.add_component("GunZipFiles",[concat_read1_files,concat_read2_files])
makecontigs = self.add_component("MothurMakeContigs", kwargs={'read1_files':gunzip.fastq_R1,'read2_files':gunzip.fastq_R2,\
'sample_name': self.args["sample_name"],'maxambig':'0','maxlength':self.args["max_contigs_length"]}, component_prefix="test", parent=fastqilluminafilter)
else:
makecontigs = self.add_component("MothurMakeContigs", kwargs={'read1_files':concat_read1_files,'read2_files':concat_read2_files,\
'sample_name': self.args["sample_name"],'maxambig':'0','maxlength':self.args["max_contigs_length"]}, parent=fastqilluminafilter)
uniqueseqs = self.add_component("MothurUniqueSeqs", [makecontigs.good_fasta_files])
countseqs = self.add_component("MothurCountSeqs", [uniqueseqs.unique_names_files,makecontigs.good_groups_files])
pcrseqs = self.add_component("MothurPcrSeqs", kwargs={'fasta_files':self.args["reference_alignment"], 'forward_primer':self.args["forward_primer"],\
'reverse_primer':self.args["reverse_primer"]})
alignseqs = self.add_component("MothurAlign", kwargs={'fasta_files':uniqueseqs.unique_fasta_files,'reference_alignment_files':pcrseqs.pcr_fasta_files,\
'count_table_files':countseqs.count_table_files, 'maxhomop':8}, parent=makecontigs)
filterseqs = self.add_component("MothurFilterSeqs",[alignseqs.good_fasta_files])
uniqueseqs_filter = self.add_component("MothurUniqueSeqs", kwargs={'fasta_files':filterseqs.filtered_fasta_files, \
'count_table_files':alignseqs.good_count_table_files},component_prefix="after_filter")
precluster = self.add_component("MothurPreCluster",kwargs={'fasta_files':uniqueseqs_filter.unique_fasta_files,\
'count_table_files':uniqueseqs_filter.unique_count_table_files})
chimerauchime = self.add_component("MothurChimeraUchime",kwargs={'fasta_files':precluster.precluster_fasta_files,\
'count_table_files':precluster.precluster_count_table_files},parent=alignseqs)
# classify reads with the provided taxonomy
classifyseqs = self.add_component("MothurClassifySeqs",kwargs={'fasta_files':chimerauchime.pick_fasta_files,'template_files':self.args["classify_template"],\
'taxonomy_files':self.args["classify_taxonomy"],'count_table_files':chimerauchime.good_count_table_files}, parent= chimerauchime)
# OTUs approach
distseqs = self.add_component("MothurDistSeqs", [chimerauchime.pick_fasta_files])
cluster = self.add_component("MothurCluster", kwargs={'dist_files':distseqs.dist_files,'count_table_files':chimerauchime.good_count_table_files})
otuanalysis = self.add_component("MothurOTUAnalysis", kwargs={'an_list_files':cluster.an_list_files,'count_table_files':chimerauchime.good_count_table_files,\
'taxonomy_files':classifyseqs.taxonomy_files,'label':self.args["labels"],'tree_label':self.args["labels"]},parent=chimerauchime)
\ No newline at end of file
#
# Copyright (C) 2012 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Copyright (C) 2012 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
[global]
name = miseq_diversity
description = illumina MiSeq pipeline to asses diversity
#
# Parameter section
# param.name: the parameter display name
# .flag: the command line flag to use the argument
# .help: a brief description of what the parameter does
# .default [None]: the value produced if the parameter is not provided
# .type [str]: the parameter type that should be tested (str|int|date|file|bool)
# .choices [None]: a container of the allowable values for the parameter
# .required [False]: whether or not the command-line option may be omitted
# .action [store]: the basic type of action to be taken (store|append)
#
[parameters]
casava_directory.name = casava_directory
casava_directory.flag = --casava-directory
casava_directory.help = Where are stored casava results (see also lane-number)
casava_directory.required = False
lane_number.name = lane_number
lane_number.flag = --lane-number
lane_number.help = Which lane should be processed (mandatory with casava-directory)
lane_number.required = False
read1_files.name = read_1
read1_files.flag = --read-1
read1_files.help = Read1
read1_files.action = append
read2_files.name = read_2
read2_files.flag = --read-2
read2_files.help = Read2
read2_files.action = append
keep_reads.name = keep_reads
keep_reads.flag = --keep
keep_reads.help = Keep reads which pass the Illumina filters or keep reads which not pass the Illumina filters (pass_illumina_filters|not_pass_illumina_filters|all). With other values that "all" the headers of reads must be '@<instrument>:<run number>:<flowcell ID>:<lane>:<tile>:<x-pos>:<y-pos> <read>:<is filtered>:<control number>:<index sequence>'
keep_reads.default = pass_illumina_filters
keep_reads.choices = pass_illumina_filters|not_pass_illumina_filters|all
sample_name.name = sample_name
sample_name.flag = --sample-name
sample_name.help = Sample name
sample_name.action = append
compression.name = compression
compression.flag = --compression
compression.help = How should data be compressed once archived (none|gz|bz2)
compression.default = none
compression.choices = none|gz|bz2
max_contigs_length.name = max_contigs_length
max_contigs_length.flag = --max-contigs-length
max_contigs_length.help = Maximum length sequences after make contigs
reference_alignment.name = reference_alignment
reference_alignment.flag = --reference-alignment
reference_alignment.help = Where is stored the reference alignment
reference_alignment.required = True
forward_primer.name = forward_primer
forward_primer.flag = --forward-primer
forward_primer.help = Which forward primer has been used in the pcr : "primerString"
forward_primer.action = append
reverse_primer.name = reverse_primer
reverse_primer.flag = --reverse-primer
reverse_primer.help = Which reverse primer has been used in the pcr : "primerString"
reverse_primer.action = append
region_start.name = region_start
region_start.flag = --region-start
region_start.type = int
region_start.help = Provide a starting position to trim to
region_end.name = region_end
region_end.flag = --region-end
region_end.type = int
region_end.help = Provide a ending position to trim from
classify_template.name = classify_template
classify_template.flag = --classify-template
classify_template.help = Which template file should be used to classify reads
classify_template.required = True
classify_taxonomy.name = classify_taxonomy
classify_taxonomy.flag = --classify-taxonomy
classify_taxonomy.help = Which taxonomy file should be used to classify reads
classify_taxonomy.required = True
labels.name = labels
labels.flag = --labels
labels.type = str
labels.help = unique-0.03-0.05-0.10
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment