[Vidéo] : The 5 minutes IFB Core Cluster tutorial
mkdir -p optional
cd optional
module load fastqc/0.11.9
mkdir -p Fastqc logs/fastQC
sbatch -J FastQC_SRR1262731_R1 -o logs/fastQC/%x.out -e logs/fastQC/%x.err --cpus-per-task=2 --wrap=" \
fastqc --threads 2 --outdir Fastqc ../fastq/SRR1262731_extract_R1.fq.gz"
sbatch -J FastQC_SRR1262731_R2 -o logs/fastQC/%x.out -e logs/fastQC/%x.err --cpus-per-task=2 --wrap=" \
fastqc --threads 2 --outdir Fastqc ../fastq/SRR1262731_extract_R2.fq.gz"
Cet outil peut également servir au retrait des primers/adaptateurs : https://cutadapt.readthedocs.io/en/stable/guide.html
module load cutadapt/2.10
mkdir -p Cutadapt logs/Cutadapt
sbatch -J Cutadapt_SRR1262731 -o logs/Cutadapt/%x.out -e logs/Cutadapt/%x.err --cpus-per-task=2 --wrap=" \
cutadapt --cores 2 --trim-n --max-n 0.3 --error-rate 0.1 -q 30,30 --minimum-length 50 --pair-filter both \
--paired-output Cutadapt/SRR1262731_extract_R2.trimmed.fq \
--output Cutadapt/SRR1262731_extract_R1.trimmed.fq \
../fastq/SRR1262731_extract_R1.fq.gz \
../fastq/SRR1262731_extract_R2.fq.gz \
> Cutadapt/SRR1262731_extract_trimming_stats.txt"
# Indexation du genome de reference avec bwa et creation des index .fai et .dict
module load bwa/0.7.17 ; module load samtools/1.10 ; module load gatk4/4.1.7.0
mkdir -p logs/genomeIndexing
sbatch -J BWA_index -o logs/genomeIndexing/%x.out -e logs/genomeIndexing/%x.err --wrap="bwa index ../genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa"
sbatch -J samtools_index -o logs/genomeIndexing/%x.out -e logs/genomeIndexing/%x.err --wrap="samtools faidx ../genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa"
sbatch -J GATK_index -o logs/genomeIndexing/%x.out -e logs/genomeIndexing/%x.err --wrap=" \
gatk CreateSequenceDictionary --REFERENCE ../genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa --OUTPUT ../genome/Bos_taurus.UMD3.1.dna.toplevel.6.dict"
# Alignement des données initiales (peut se faire sur les données trimmées de cutadapt)
# L'ajout des read group peut se faire en même temps que BWA
mkdir -p alignment_bwa logs/alignment_bwa
sbatch -J SRR1262731_mapping -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --cpus-per-task=4 --mem=16G --wrap=" \
bwa mem -t 4 -R \"@RG\tID:1\tPL:Illumina\tPU:PU\tLB:LB\tSM:SRR1262731\" \
../genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa ../fastq/SRR1262731_extract_R1.fq.gz ../fastq/SRR1262731_extract_R2.fq.gz \
| samtools view -Sh - -bo alignment_bwa/SRR1262731_extract.bam"
samtools view -h alignment_bwa/SRR1262731_extract.sam | less -S
# Conversion du sam en bam trié
sbatch -J SRR1262731_mappingSort -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --cpus-per-task=4 --mem=16G --wrap=" \
samtools sort -@ 4 --write-index -o alignment_bwa/SRR1262731_extract.sort.bam alignment_bwa/SRR1262731_extract.bam "
# statistique d'alignement
sbatch -J SRR1262731_flagstat -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
samtools flagstat alignment_bwa/SRR1262731_extract.sort.bam > alignment_bwa/SRR1262731.flagstat.txt"
cat alignment_bwa/SRR1262731.flagstat.txt
## 2265873 + 0 in total (QC-passed reads + QC-failed reads)
## 0 + 0 secondary
## 46487 + 0 supplementary
## 0 + 0 duplicates
## 1700879 + 0 mapped (75.07% : N/A)
## 2219386 + 0 paired in sequencing
## 1109693 + 0 read1
## 1109693 + 0 read2
## 621472 + 0 properly paired (28.00% : N/A)
## 1229358 + 0 with itself and mate mapped
## 425034 + 0 singletons (19.15% : N/A)
## 0 + 0 with mate mapped to a different chr
## 0 + 0 with mate mapped to a different chr (mapQ>=5)
# suppression du fichier d'alignement intermédiaire (on ne conserve que la version triée)
rm alignment_bwa/SRR1262731_extract.bam
# Contrôle qualité des données alignées avec qualimap
module load qualimap/2.2.2b
sbatch -J SRR1262731_mappingQC -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --cpus-per-task=4 --mem=6G --wrap=" \
unset DISPLAY; qualimap bamqc -nt 4 -outdir alignment_bwa/SRR1262731_extract_qualimap_report --java-mem-size=4G -bam alignment_bwa/SRR1262731_extract.sort.bam"
Vous devez être placé dans votre dossier tp_variant. Quelle est la distribution des qualités de mapping (MAPQ) ? Et quel pourcentage de reads ont une MAPQ >=30 ?
## Extraction des qualités de mapping stockées dans la colonne n° 5
samtools view alignment_bwa/SRR1262731_extract.sort.bam | cut -f 5 > alignment_bwa/SRR1262731_extract.sort.mapping_qualities.txt
# charge la dernière version installée de R.
module load r
# /!\ si vous utilisez des package précis il faut préciser la version de R que vous souhaitez utilisé
# exemple r/3.6.3
R # ouvre R en interactif
## Chargement des qualités de mapping sous forme de data frame
mapq <- data.frame(read.table("alignment_bwa/SRR1262731_extract.sort.mapping_qualities.txt"))
head(mapq)
## V1
## 1 7
## 2 3
## 3 2
## 4 0
## 5 1
## 6 0
## $breaks
## [1] 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 40 42 44 46 48
## [26] 50 52 54 56 58 60
##
## $counts
## [1] 1416971 75453 72162 61703 49081 33868 27891 26590 29708
## [10] 32737 29416 24800 17347 15971 12385 9575 9596 8942
## [19] 6140 44154 3529 4303 3731 4696 4229 1748 2828
## [28] 1895 845 233579
##
## $density
## [1] 0.3126766152 0.0166498740 0.0159236639 0.0136157234 0.0108304834
## [6] 0.0074734992 0.0061545815 0.0058674957 0.0065555307 0.0072239265
## [11] 0.0064910964 0.0054725044 0.0038278844 0.0035242487 0.0027329422
## [16] 0.0021128722 0.0021175061 0.0019731909 0.0013548862 0.0097432645
## [21] 0.0007787286 0.0009495236 0.0008233030 0.0010362452 0.0009331944
## [26] 0.0003857233 0.0006240420 0.0004181611 0.0001864623 0.0515428270
##
## $mids
## [1] 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 41 43 45 47 49
## [26] 51 53 55 57 59
##
## $xname
## [1] "mapq[, \"V1\"]"
##
## $equidist
## [1] TRUE
##
## attr(,"class")
## [1] "histogram"
### affichage de l'histogramme avec des légendes, un titre et une couleur
png("alignment_bwa/mapq_hist.png")
plot(h,xlab="Mapping Qualities", main="SRR1262731" , col = "lightgray")
invisible(dev.off())
Ouvrez votre fichier png ou relancez uniquement la commande plot()
## Création d'une table de contingence résumant le nombre de reads présentant une MAPQ >=30
table(mapq[,1]>=30)
##
## FALSE TRUE
## 1919170 346703
## Création du data frame contenant les pourcentages de reads non alignés, et alignés avec MAPQ>=30 ou MAPQ<30
## récupération des valeurs dans alignment_bwa/SRR1262731.flagstat.txt et dans le tableau précédent
total <- 2265873
mapped <- 1700879
mapped_q30 <- 346703
pc_unmapped <- round((total-mapped)*100/total)
pc_mapq30 <- round(mapped_q30*100/total)
pc_below_mapq30 <- 100-pc_unmapped-pc_mapq30
df <- data.frame( Group = c("Unmapped", "Mapped mapq>=30", "Mapped mapq<30"),
Value = c(pc_unmapped,pc_mapq30,pc_below_mapq30)
)
df
## Group Value
## 1 Unmapped 25
## 2 Mapped mapq>=30 15
## 3 Mapped mapq<30 60
#Ajout des reads groups s'ils ne sont pas déjà présent (cf commande BWA)
sbatch -J SRR1262731_addRG -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
gatk AddOrReplaceReadGroups -I alignment_bwa/SRR1262731_extract.sort.bam -O alignment_bwa/SRR1262731_extract.sort.rg.bam \
--RGID 1 --RGPL Illumina --RGPU PU --RGSM SRR1262731 --RGLB LB"
sbatch -J SRR1262731_markDup -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --mem=8G --wrap=" \
gatk MarkDuplicates --java-options '-Xmx8G' -I alignment_bwa/SRR1262731_extract.sort.rg.bam --VALIDATION_STRINGENCY SILENT \
-O alignment_bwa/SRR1262731_extract.sort.rg.md.bam -M alignment_bwa/SRR1262731_extract_metrics_md.txt"
sbatch -J SRR1262731_md_flagstat -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
samtools flagstat alignment_bwa/SRR1262731_extract.sort.rg.md.bam > \
alignment_bwa/SRR1262731_extract.md.flagstat.txt"
Suppresion des séquences non alignées et celles alignées avec une qualité inférieur à 30
samtools view -bh -F 4 -q 30 alignment_bwa/SRR1262731_extract.sort.rg.md.bam > alignment_bwa/SRR1262731_extract.sort.rg.md.filt.bam
sbatch -J SRR1262731_filt_flagstat -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
samtools flagstat alignment_bwa/SRR1262731_extract.sort.rg.md.filt.bam > \
alignment_bwa/SRR1262731_extract.filt.flagstat.txt"
module load bedtools/2.29.2
sbatch -J SRR1262731_interBed -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
bedtools intersect -a alignment_bwa/SRR1262731_extract.sort.rg.md.filt.bam -b additionnal_data/QTL_BT6.bed \
> alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam"
sbatch -J SRR1262731_bamIdx -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
samtools index alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam"
Quelle est la couverture moyenne des lectures correctement alignées (MAPQ>=30) sur les régions ciblées ?
# Calcul de la couverture
sbatch -J SRR1262731_bamDepth -o logs/alignment_bwa/%x.out -e logs/alignment_bwa/%x.err --wrap=" \
samtools depth -b additionnal_data/QTL_BT6.bed alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam \
> alignment_bwa/SRR1262731_extract.onTarget.depth.txt"
R # ouvre R en interactif
# Chargement du fichier créé avec samtools depth
cov <- read.table("alignment_bwa/SRR1262731_extract.onTarget.depth.txt",header=FALSE,sep="\t")
head(cov)
## V1 V2 V3
## 1 6 37913111 3
## 2 6 37913112 3
## 3 6 37913113 3
## 4 6 37913114 3
## 5 6 37913115 3
## 6 6 37913116 3
# Changement du nom des colonnes du data frame
colnames(cov) <- c("chr","position","depth")
head(cov)
## chr position depth
## 1 6 37913111 3
## 2 6 37913112 3
## 3 6 37913113 3
## 4 6 37913114 3
## 5 6 37913115 3
## 6 6 37913116 3
# Calcul des différentes métriques (minimum, maximum, moyenne médiane...) de la colonne "depth"
summary(cov[,"depth"])
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 4.000 6.000 6.792 8.000 122.000
# Calcul de la moyenne de profondeur et stockage dans une variable
mean_cov <- mean(cov[,"depth"])
mean_cov
## [1] 6.792332
## Création du fichier pdf qui va contenir le plot
pdf("alignment_bwa/SRR1262731_extract.onTarget.depth.pdf",width=10,height=6)
## Représentation sous forme de baton d'histogramme (type=h) de la profondeur par position des régions ciblées
plot(cov$position,cov$depth,type="h",col="steelblue",xlab="Position", ylab="Coverage at MAPQ>=30",main="SRR1262731 on QLT_BT6.bed")
## Ajout d'une ligne horizontale de couleur rouge représentant la moyenne de profondeur
abline(h=mean_cov,col="red")
## Ecriture et stockage du plot dans un fichier pdf
invisible(dev.off())
Ouvrez votre fichier pdf ou relancez uniquement les lignes plot() et abline()
mkdir -p GATK/vcf logs/GATK
sbatch -J SRR1262731_HC_to_VCF -o logs/GATK/%x.out -e logs/GATK/%x.err --mem=8G --wrap=" \
gatk HaplotypeCaller --java-options '-Xmx8G' --input alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam \
--reference genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa --intervals additionnal_data/QTL_BT6.bed \
--min-base-quality-score 18 --minimum-mapping-quality 30 --emit-ref-confidence \"NONE\" \
--output GATK/vcf/SRR1262731_extract_GATK.vcf "
mkdir -p GATK/gvcf
sbatch -J SRR1262731_HC_to_gVCF -o logs/GATK/%x.out -e logs/GATK/%x.err --mem=8G --wrap=" \
gatk HaplotypeCaller --java-options '-Xmx8G' --input alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam \
--reference genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa --intervals additionnal_data/QTL_BT6.bed \
--min-base-quality-score 18 --minimum-mapping-quality 30 --emit-ref-confidence "GVCF" \
--output GATK/gvcf/SRR1262731_extract_GATK.g.vcf "
# Fusion des fichiers gVCFs en un seul gVCF
sbatch -J CombineGVCFs -o logs/GATK/%x.out -e logs/GATK/%x.err --mem=8G --wrap=" \
gatk CombineGVCFs --java-options '-Xmx8G' \
--variant gvcf/SRR1262731_extract_GATK.g.vcf \
--variant additionnal_data/SRR1205992_extract_GATK.g.vcf \
--variant additionnal_data/SRR1205973_extract_GATK.g.vcf \
--reference genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa --intervals additionnal_data/QTL_BT6.bed \
--output GATK/gvcf/pool_GATK.g.vcf"
# Détection de variants simultanée sur les 3 échantillons du gVCF
sbatch -J GenotypeGVCFs -o logs/GATL/%x.out -e logs/GATK/%x.err --mem=8G --wrap=" \
gatk GenotypeGVCFs --java-options '-Xmx8G' --variant GATK/gvcf/pool_GATK.g.vcf --reference genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
--output vcf/pool_GATK.vcf"
module load varscan/2.4.4
# Creation d'un nouveau dossier
mkdir -p Varscan logs/Varscan
# Conversion du fichier d'alignement bam en format mpileup
# ajout l'option -A pour garder les paires anormales
sbatch -J SRR1262731_mpileup -o logs/Varscan/%x.out -e logs/Varscan/%x.err --mem=8G --wrap=" \
samtools mpileup -q 30 -B -A -d 10000 -f genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
alignment_bwa/SRR1262731_extract.sort.rg.md.filt.onTarget.bam > Varscan/SRR1262731_extract.mpileup"
# Détection de variants avec Varscan
sbatch -J SRR1262731_mpileup2cns -o logs/Varscan/%x.out -e logs/Varscan/%x.err --mem=8G --wrap=" \
varscan mpileup2cns Varscan/SRR1262731_extract.mpileup --output-vcf --variants --min-avg-qual 18 > Varscan/SRR1262731_extract_Varscan.vcf"
module load bcftools/1.10.2
# Renommer l'échantillon dans le VCF
sed -i 's|Sample1|SRR1262731.Varscan|g' Varscan/SRR1262731_extract_Varscan.vcf
# Compression et indexation du fichiers vcf
bgzip Varscan/SRR1262731_extract_Varscan.vcf
tabix -p vcf Varscan/SRR1262731_extract_Varscan.vcf.gz
# Merge des trois échantillons appelés avec Varscan
sbatch -J merge_varscan -o logs/Varscan/%x.out -e logs/Varscan/%x.err --mem=8G --wrap=" \
bcftools merge Varscan/SRR1262731_extract_Varscan.vcf.gz additionnal_data/SRR1205992_extract_Varscan.vcf.gz \
additionnal_data/SRR1205973_extract_Varscan.vcf.gz > Varscan/pool_Varscan.vcf"
# Correction du header ($ grep contig pool_Varscan.vcf)
sbatch -J merged_varscan_updateDict -o logs/Varscan/%x.out -e logs/Varscan/%x.err --mem=8G --wrap=" \
gatk UpdateVcfSequenceDictionary -I Varscan/pool_Varscan.vcf -O Varscan/pool_Varscan_dict.vcf -SD genome/Bos_taurus.UMD3.1.dna.toplevel.6.dict"
# Préparation d'un nouveau répertoire de résultats
mkdir -p filter_and_annot logs/filter
# Extraction des SNVs dans un fichier séparé pour GATK
sbatch -J GATK_SNP -o logs/filter/%x.out -e logs/filter/%x.err --mem=8G --wrap=" \
gatk SelectVariants --java-options '-Xmx8G' -R genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
-V GATK/vcf/pool_GATK.vcf --select-type SNP -O filter_and_annot/pool_GATK.SNP.vcf"
# Extraction des SNVs dans un fichier séparé pour Varscan
sbatch -J Varscan_SNP -o logs/filter/%x.out -e logs/filter/%x.err --mem=8G --wrap=" \
gatk SelectVariants --java-options '-Xmx8G' -R genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
-V Varscan/pool_Varscan_dict.vcf --select-type SNP -O filter_and_annot/pool_Varscan.SNP.vcf"
# Filtrage des SNVs selon les filtres recommandés par GATK
sbatch -J GATK_SNP_filter -o logs/filter/%X.out -e logs/filter/%x.err --mem=8G --wrap=" \
gatk VariantFiltration --java-options '-Xmx8G' -R genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
-V filter_and_annot/pool_GATK.SNP.vcf -O filter_and_annot/pool_GATK.SNP.prefilt.vcf \
-filter 'QD < 2.0' --filter-name 'QD2' -filter 'SOR > 3.0' --filter-name 'SOR3' \
-filter 'FS > 60.0' --filter-name 'FS60' -filter 'MQ < 40.0' --filter-name 'MQ40' \
-filter 'MQRankSum < -12.5' --filter-name 'MQRankSum-12.5' \
-filter 'ReadPosRankSum < -8.0' --filter-name 'ReadPosRankSum-8'"
# Sélection des variants passant ce filtre
sbatch -J GATK_SNP_PASS -o logs/filter/%x.out -e logs/filter/%xGATK_SNP_PASS.err --mem=8G --wrap=" \
gatk SelectVariants --java-options '-Xmx8G' -R genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa \
-V filter_and_annot/pool_GATK.SNP.prefilt.vcf --exclude-filtered -O filter_and_annot/pool_GATK.SNP.filtered.vcf"
# Compression et indexation des fichiers vcfs
bgzip filter_and_annot/pool_GATK.SNP.filtered.vcf
tabix -p vcf filter_and_annot/pool_GATK.SNP.filtered.vcf.gz
bgzip -c filter_and_annot/pool_Varscan.SNP.vcf
tabix -p vcf filter_and_annot/pool_Varscan.SNP.vcf.gz
sbatch -J GATK_varscan_isec -o logs/filter/%x.out -e logs/filter/%x.err --mem=8G --wrap=" \
bcftools isec -f PASS -n +2 -w 1 -O v filter_and_annot/pool_GATK.SNP.filtered.vcf.gz filter_and_annot/pool_Varscan.SNP.vcf.gz \
> filter_and_annot/GATK_varscan_inter.vcf "
# Création de la base de données SnpEff
module load snpeff/4.3.1t
snpEff -version # affiche la version (v4.3t)
echo BosTaurus.genome >> snpeff.config # <genome_name>.genome
mkdir -p BosTaurus
cp genome/Bos_taurus.UMD3.1.dna.toplevel.6.fa BosTaurus/sequences.fa
cp genome/Bos_taurus.UMD3.1.93.chromosome.6.gff3 BosTaurus/genes.gff
echo -e "BosTaurus\nSnpEff4.1" > BosTaurus.db
sbatch -J snpeffBuild -o logs/filter/%x.out -e logs/filter/%x.err --mem=8G --wrap="snpEff build -c snpeff.config -gff3 -v BosTaurus -dataDir ."
# Annotation avec notre base de données
sbatch -J snpeffAnnot -o logs/filter/%.out -e logs/filter/%x.err --mem=8G --wrap=" \
snpEff eff -c snpeff.config -dataDir . BosTaurus -s filter_and_annot/snpeff_res.html filter_and_annot/GATK_varscan_inter.vcf \
> filter_and_annot/GATK_varscan_inter.annot.vcf"
module load snpsift/4.3.1t
# Garder les variants codant qui ne sont pas des synonymes
sbatch -J snpsift1 -o logs/filter/%x.out -e logs/filter/%x.err --mem=8G --wrap=" \
cat filter_and_annot/GATK_varscan_inter.annot.vcf | SnpSift filter -Xmx8G \
\"(ANN[*].EFFECT != 'synonymous_variant') && (ANN[*].BIOTYPE = 'protein_coding')\" > filter_and_annot/GATK_varscan_inter.annot.coding.nosyn.vcf"
# Sélectionner notre variant d'intérêt parmi les variants hétérozygotes ayant un impact (missense)
sbatch -J snpsift2 -o logs/filter/%x..out -e logs/filter/%x.err --mem=8G --wrap=" \
cat filter_and_annot/GATK_varscan_inter.annot.coding.nosyn.vcf | SnpSift filter -Xmx8G \
\"ANN[*].EFFECT = 'missense_variant' & isHet( GEN[2] ) & isVariant( GEN[2] ) & isRef( GEN[0] ) & isRef( GEN[1] ) \" \
> filter_and_annot/GATK_varscan_inter.annot.coding.nosyn.filtered.vcf"
mkdir -p delly logs/delly
module load delly/0.8.3
sbatch -J sample1_delly -o logs/delly/%x.out -e logs/delly/%x.err --mem=8G --wrap=" \
delly call -g Zymoseptoria_tritici.fa -o delly/SV_calling_illumina.bcf mapping_illumina_chr10_500kb.bam"
# Conversion en fichier vcf
sbatch -J sample1_bcf_to_vcf -o logs/delly/%x.out -e logs/delly/%x.err --wrap=" \
bcftools view delly/SV_calling_illumina.bcf > delly/SV_calling_illumina.vcf"
#Récupération du start des variants
grep -v "^#" delly/SV_calling_illumina.vcf | grep -v "LowQual" | grep "<DEL>" | cut -f1,2 > delly/delly_del_start.txt
#Récupération des autres informations
grep -v "^#" delly/SV_calling_illumina.vcf | grep -v "LowQual" | grep "<DEL>" | cut -f8 | cut -d ";" -f1,5,6,14 | sed "s/;/\t/g" > delly/delly_del_info.txt
#Fusion des deux fichiers
paste -d '\t' delly/delly_del_start.txt delly_del_info.txt > delly/delly_del.txt
#Formattage et ménage
awk '{print $1"\t"$2"\t"$4"\t"$3"\t"$5"\t"$6}' delly/delly_del.txt | sed "s/END=//g" > delly/delly_del.csv
rm delly/delly_del_info.txt delly/delly_del_start.txt delly/delly_del.txt
module load sniffles/1.0.11
mkdir -p sniffles logs/sniffles
sbatch -J sample1_sniffles -o logs/sniffles/%x.out -e logs/sniffles/%x.err --mem=8G --wrap=" \
sniffles -l 100 -m mapping_minion_chr10_500kb.bam -v sniffles/SV_calling_minion.vcf"
cat sniffles/SV_calling_minion.vcf | grep ^chr_10 | grep "<DEL>" | cut -f 2 > sniffles/sniffles_del_start.txt
cat sniffles/SV_calling_minion.vcf | grep ^chr_10 | grep "<DEL>" | cut -f 2 > sniffles/sniffles_del_start.txt
cat sniffles/SV_calling_minion.vcf | grep ^chr_10 | grep "<DEL>" | cut -f 8 | cut -d ";" -f 1 > sniffles/sniffles_del_infos.txt
paste sniffles/sniffles_del_start.txt sniffles/sniffles_del_stop.txt sniffles/sniffles_del_infos.txt > sniffles/sniffles_del.csv
rm sniffles/sniffles_del_start.txt sniffles/sniffles_del_stop.txt sniffles/sniffles_del_infos.txt
Test comparatif des tailles des délétions détectées par delly vs sniffles
# chargement des positions de delly
delly <- read.table("delly/delly_del.csv", sep="\t")
# ajout d'une colonne size correspondant à end-start
delly$size <- delly$V3-delly$V2
# chargement des positions de sniffles
sniffles <- read.table("sniffles/sniffles_del.csv",sep="\t")
# ajout d'une colonne size correspondant à end-start
sniffles$size <- sniffles$V3-sniffles$V2
# apperçu des tableaux
head(delly) ; head(sniffles)
## V1 V2 V3 V4 V5 V6 size
## 1 chr_10 29522 29580 PRECISE PE=0 SR=20 58
## 2 chr_10 32733 32783 PRECISE PE=0 SR=20 50
## 3 chr_10 57127 57600 PRECISE PE=3 SR=16 473
## 4 chr_10 80015 80622 PRECISE PE=15 SR=20 607
## 5 chr_10 90255 90309 PRECISE PE=0 SR=7 54
## 6 chr_10 90309 101040 IMPRECISE PE=8 10731
## V1 V2 V3 V4 size
## 1 chr_10 57126 57598 IMPRECISE 472
## 2 chr_10 91233 98159 IMPRECISE 6926
## 3 chr_10 111020 111655 PRECISE 635
## 4 chr_10 257001 257165 IMPRECISE 164
## 5 chr_10 343161 343273 PRECISE 112
## 6 chr_10 360638 361061 PRECISE 423
##
## Welch Two Sample t-test
##
## data: delly$size and sniffles$size
## t = -0.41574, df = 14.769, p-value = 0.6836
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -27195.43 18328.23
## sample estimates:
## mean of x mean of y
## 6168.0 10601.6