Commit 8bc49ae2 authored by matbuoro's avatar matbuoro
Browse files

first push

parents
# Folders
/GT
/doc
/raw
*.zip
*.7z
# Results
*.pdf
# History files
.Rhistory
.Rapp.history
# Session Data files
.RData
*.Rdata
*.RData
*.Rout
# Example code in package build process
*-Ex.R
# Output files from R CMD build
/*.tar.gz
# Output files from R CMD check
/*.Rcheck/
# RStudio files
.Rproj.user/
*.Rproj
# produced vignettes
vignettes/*.html
vignettes/*.pdf
# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
.httr-oauth
# knitr and R markdown default cache directories
/*_cache/
/cache/
# Temporary files created by R markdown
*.utf8.md
*.knit.md
.Rproj.user
################################################################################
### Model of CMR data to estimate spawners population size ###
### of Salmo salar in Bresle river. ###
### Sabrina Servanty & Etienne Prévost ###
### January 2015 ###
################################################################################
model {
############################################################################################
## Difference with Sébastien's report (Delmotte et al. 2010):
## - set probability of capture at Eu in 2000 and 2001 to be smaller than the other years (partial trapping)
## - didn't consider recapture probability in 1989 and 1993 because trap in Beauchamps was not working those years (and so in 2000 and 2001 as in the report)
## - adding a flow effect in pi_Eu. Considering a different temporal window depending on sea age. Flow data are standardized (ln(Q[t]) - mean(ln(Q)))/sd(ln(Q)) within the model. Residuals are standardized and followed.
########### - 15 june - 31 august for 1SW
########### - 15 april - 30 june for MSW
## - adding another effect of flow corresponding to the second peak of migration.Flow data are standardized within the model.
########### Same temporal window for 1SW and MSW: 1 octobre - 30 november
## - calculating calculating R² (% of variation explained by the two covariates in the probability of capture at Eu)
###############################################################################################
############################################################################################
## Used indices:
## t: year; 1 to Y - from 1994 to Y ##
## a: sea age;
## 1-1SW (Grisle),
## 2-MSW (salmon)
#####################################################################################################
#####################################################################################################
## DATA
## --------------
## Y: length of the time series (number of years)
## C_Eu[t,a]: Annual number of fish captured at Eu per sea age category (1SW/MSW)
## Cm_Eu[t,a]: Annual number of fish captured and marked at Eu per sea age category
## Cm_B[t,a]: Annual number of marked fish captured at Beauchamps per sea age category
## Cum_B[t,a]: Annual number of unmarked fish captured at Beauchamps per sea age category
## Q[t,a]: Annual mean flow associated to sea age category
## Q2pic[t]: Annual mean flow associated to the second peak of migration
######################################################################################
#################################################################################
# PROBABILITIES (p)
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
## HYPER-PARAMETERS
## ------
#################################################################################
### Mean and standard deviation of probabilities depending on sea age.
# Probabilities to be captured at Eu.
for (a in 1:2) {
pi_Eu00[a] ~ dbeta(1,1) # year 2000 is considered to be different (partial trapping)
pi_Eu01[a] ~ dbeta(1,1) # year 2001 is considered to be different (partial trapping)
logit_int_Eu[a] ~ dunif(-10,10) #intercept
logit_flow_Eu[a] ~ dunif(-10,10) #slope for flow data (1SW at Eu: 15 june - 31 august, MSW: 15 april - 30 june)
sigmapi_Eu[a] ~ dunif(0,20)
lflow_fall_Eu[a] ~ dunif(-10,10) # slope for flow data, second peak of migration (same period for both 1SW and MSW)
# Probability to be captured at Beauchamps (after reproduction)
mupi_B[a] ~ dbeta(1,1)
sigmapi_B[a] ~ dunif(0,20)
}
################################################################################
## PROBABILITY DISTRIBUTIONS
## -------------------------
## pi_Eu[t,a]:annual probability to be captured at Eu given sea age
## pi_S[t,a]: annual survival until reproduction given sea age
## pi_B[t,a]: annual probability to be captured at Beauchamps given sea age (after reproduction)
####################################################################################
### Probabilities to be captured at Eu (time and sea age dependent)
for (t in 1:Y) {
lQ2pic[t] <- log(Q2pic[t]) # ln transformation of autumn covariate
stlQ2pic[t] <- (lQ2pic[t] - mean(lQ2pic[]))/sd(lQ2pic[]) # standardized covariate
} #end of loop over years
for (a in 1:2) {
varpi_Eu[a] <- (sigmapi_Eu[a])*(sigmapi_Eu[a])
precpi_Eu[a] <- 1/(varpi_Eu[a]) # precision
for (t in 1:Y) { #pi_Eu exchangeable from 1984 to 1999
logQ[t,a] <- log(Q[t,a]) # ln transformation of covariate
stlogQ[t,a] <- (logQ[t,a] - mean(logQ[,a]))/sd(logQ[,a]) # standardized covariate
logit_mupi_Eu[t,a] <- logit_int_Eu[a] + logit_flow_Eu[a] * stlogQ[t,a] + lflow_fall_Eu[a] * stlQ2pic[t]
logit_pi_Eu[t,a] ~ dnorm(logit_mupi_Eu[t,a],precpi_Eu[a])
pi_Eu[t,a] <- exp(logit_pi_Eu[t,a])/(1+exp(logit_pi_Eu[t,a])) # back-transformation on the probability scale
epsilon_Eu[t,a] <- (logit_pi_Eu[t,a] - logit_mupi_Eu[t,a]) / sigmapi_Eu[a] # standardized residuals
eps_Eu[t,a] <- logit_pi_Eu[t,a] - logit_mupi_Eu[t,a] # residuals not standardized
} ## End of loop over years
#Calculating R² = 1 -(E(variance of residuals (/!\ not standardized!) / E(variance of capture probabilities)))
# See Gelman & Pardoe 2006
sdeps_Eu[a] <- sd(eps_Eu[,a])
vareps_Eu[a] <- sdeps_Eu[a] * sdeps_Eu[a]
sdlpi_Eu[a] <- sd(logit_pi_Eu[,a])
varlpi_Eu[a] <- sdlpi_Eu[a] * sdlpi_Eu[a]
R2[a] <- 1 - (mean(vareps_Eu[a])/mean(varlpi_Eu[a]))
}# end of loop over sea age
test[1] <- step(logit_flow_Eu[1]) # is logit_flow >=0 for 1SW?
test[2] <- step(logit_flow_Eu[2]) # is logit_flow >=0 for MSW?
test[3] <- step(lflow_fall_Eu[1]) # is logit_flow_fall >=0 for 1SW?
test[4] <- step(lflow_fall_Eu[2]) # is logit_flow_fall >=0 for 1SW?
diff_flow <- logit_flow_Eu[1] - logit_flow_Eu[2]
test[5] <- step(diff_flow) #is difference in slope >=0 for 1SW? (1SW>MSW)
diff_fall <- lflow_fall_Eu[1] - lflow_fall_Eu[2]
test[6] <- step(diff_fall) # is difference in slope in fall >=0 for 1SW? (1SW>MSW)
### Probabilities to be captured at Beauchamps after reproduction (time and sea age dependent)
for (a in 1:2) {
logit_mupi_B[a] <- log(mupi_B[a]/(1-mupi_B[a])) # logit transformation
varpi_B[a] <- (sigmapi_B[a])*(sigmapi_B[a])
precpi_B[a] <- 1/(varpi_B[a]) # precision
for (t in 1:5) { ## pi_B: Exchangeable from 1984 to 1988
logit_pi_B[t,a] ~ dnorm(logit_mupi_B[a],precpi_B[a])
pi_B[t,a] <- exp(logit_pi_B[t,a])/(1+exp(logit_pi_B[t,a])) # back-transformation on the probability scale
} ## End of loop over years
pi_B[6,a] <- 0 # Trap not working in 1989
for (t in 7:9) { ## pi_B: Exchangeable from 1990 to 1992
logit_pi_B[t,a] ~ dnorm(logit_mupi_B[a],precpi_B[a])
pi_B[t,a] <- exp(logit_pi_B[t,a])/(1+exp(logit_pi_B[t,a])) # back-transformation on the probability scale
} ## End of loop over years
pi_B[10,a] <- 0 # Trap not working in 1993
for (t in 11:16) { ## pi_B: Exchangeable from 1994 to 1999
logit_pi_B[t,a] ~ dnorm(logit_mupi_B[a],precpi_B[a])
pi_B[t,a] <- exp(logit_pi_B[t,a])/(1+exp(logit_pi_B[t,a])) # back-transformation on the probability scale
} ## End of loop over years
pi_B[17,a] <- 0 # Trap not working in 2000
pi_B[18,a] <- 0 # Trap not working in 2001
for (t in 19:Y) { ## pi_B: Exchangeable from 2002 to now on
logit_pi_B[t,a] ~ dnorm(logit_mupi_B[a],precpi_B[a])
pi_B[t,a] <- exp(logit_pi_B[t,a])/(1+exp(logit_pi_B[t,a])) # back-transformation on the probability scale
} ## End of loop over years
} # end of loop over sea age
######################################################################################################################################
######################################################################################################################################
## DATA
## --------------
## C_Eu[t,a]: Annual number of fish captured at Eu per sea age category (1SW/MSW)
## Cm_Eu[t,a]: Annual number of fish captured and marked at Eu per sea age category
## Cm_B[t,a]: Annual number of marked fish captured at Beauchamps per sea age category
## Cum_B[t,a]: Annual number of unmarked fish captured at Beauchamps per sea age category
######################################################################################
## NUMBERS PER GROUP
## |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
## n_tot[t]: annual total number of fish
## n_1SW[t]: annual Number of 1SW
## n_MSW[t]: annual Number of MSW
## We included a hierarchy on breeding category.
####################################################
## PRIORS
## ------
####################################################################################
## Annual returns
for (t in 1:Y) {
n_tot[t] <- sum(n[t,]) # total number
n_1SW[t] <- n[t,1] # Number of males and females 1SW
n_MSW[t] <- n[t,2] # Number of males and females MSW
} ## End of loop over years
# Shape and rate parameter for gamma distribution for negative binomial (see Gelman, 2d edition, p446)
shape_lambda ~ dgamma(0.001,0.001)
rate_lambda ~ dgamma(0.001,0.001)
#To be able to initiate lambda_tot ensuite
lambda_tot0 ~ dgamma(shape_lambda,rate_lambda)
Plambda0[1:2] ~ ddirich(s[])
# Hyperprior for lambda_n
for (t in 1:Y) {
lambda_tot[t] ~ dgamma(shape_lambda,rate_lambda)
Plambda[t,1:2] ~ ddirich(s[])
} # end of loop over years
for (a in 1:2) {
s[a] ~ dexp(0.25)T(0.02,)
} # end of loop over sea age
for (t in 1:Y-1) {
lambda_n[t,1] <- lambda_tot[t] * Plambda[t,1]
lambda_n[t+1,2] <- lambda_tot[t] * Plambda[t,2]
} # end of loop over years
### lambda_n is calculated by smolt cohort (proportion and mean population size of MSW is one year yearlier than 1SW)
## First year for MSW
lambda_n[1,2] <- lambda_tot0 * Plambda0[2]
## Last year for 1SW
lambda_n[Y,1] <- lambda_tot[Y] * Plambda[Y,1]
for (a in 1:2) {
for (t in 1:Y) {
n[t,a] ~ dpois(lambda_n[t,a]) # annual number of fish per sea age
} #end of loop over years
} # end of loop over sea age
#################################
## CAPTURE AT EU
## -------
## n_um[t]: Annual number of fish not captured (not marked) at Eu
##################################
## Likelihood on trapped fish number
for (a in 1:2) {
for (t in 1:16) { # from 1984 to 1999
C_Eu[t,a] ~ dbin(pi_Eu[t,a],n[t,a])
## Fish number avoiding the trap, not marked fish (n_um).
n_um[t,a] <- n[t,a] - C_Eu[t,a]
} ## End of loop over years
#Year 2000
p_Eu00_tot[a] <- pi_Eu[17,a] * pi_Eu00[a] # probability in 2000 to be observed is set to be smaller than later
C_Eu[17,a] ~ dbin(p_Eu00_tot[a],n[17,a]) # 1994 is considered to be different.
n_um[17,a] <- n[17,a] - C_Eu[17,a] ## Fish number avoiding the trap, not marked fish (n_um) in 2000
#Year 2001
p_Eu01_tot[a] <- pi_Eu[18,a] * pi_Eu01[a] # probability in 2000 to be observed is set to be smaller than later
C_Eu[18,a] ~ dbin(p_Eu01_tot[a],n[18,a]) # 1994 is considered to be different.
n_um[18,a] <- n[18,a] - C_Eu[18,a] ## Fish number avoiding the trap, not marked fish (n_um) in 2001
for (t in 19:Y) { # from 2002 to now on
C_Eu[t,a] ~ dbin(pi_Eu[t,a],n[t,a])
## Fish number avoiding the trap, not marked fish (n_um).
n_um[t,a] <- n[t,a] - C_Eu[t,a]
} ## End of loop over years
} # end of loop over sea age
#####################################
## RECAPTURE OF FISH AT BEAUCHAMPS AFTER REPRODUCTION
## -------------------------------------
##
for (a in 1:2) {
for (t in 1:Y) {
## Marked fish
Cm_B[t,a] ~ dbin(pi_B[t,a],Cm_Eu[t,a])
## Unmarked fish
Cum_B[t,a] ~ dbin(pi_B[t,a],n_um[t,a])
} # end of loop over years
} # end of loop over sea age
} # end of the model
\ No newline at end of file
rm(list=ls()) # Clear memory
##------------------ R PACKAGES ------------------------------##
library(R2OpenBUGS)
library(rjags) # require to use "read.bugsdata" function
library(coda)
library(mcmcplots)
# library(dclone)
# library(snow)
# require(ggmcmc)
##-----------------------------INFO ----------------------------------##
year <- "2016"
site <- "Bresle"
stade <- "adult"
## WORKING DIRECTORY:
work.dir<-paste("/home/basp-meco88/Documents/RESEARCH/PROJECTS/ORE/Abundance",site,stade,sep="/")
setwd(work.dir)
##-----------------------------DATA ----------------------------------##
source(paste('data/data_',stade,'.R',sep="")) # creation du fichier Rdata
load(paste('data/data_',stade,"_",year,'.Rdata',sep="")) # chargement des données
#----------------------------PARAMETERS---------------------------------##
source(paste('parameters_',stade,'.R',sep="")) # chargement des paramètres
#------------------------INITS----------------------------------##
source(paste('inits/inits_',stade,'.R',sep="")) # création des inits des données
load(paste('inits/inits_',stade,'.Rdata',sep="")) # chargement des inits
#if(site == "Bresle" && stade == "adult") {inits <- list(read.bugsdata(paste("inits/init-",site,"-",stade,year,".txt",sep="")))}
#if(site == "Nivelle") {inits <- list(read.bugsdata(paste("inits/init-",site,"-",stade,year,".txt",sep="")))}
#------------------------MODEL----------------------------------##
model <- paste("model/",stade,"-",site,".txt",sep="") # path of the model
if(site == "Scorff" && stade == "smolt") {model <- paste("model/",stade,"-",site,"_",year,".R",sep="")} # le modèle Scorrf pour les smolt peut changer tous les ans suivant conditions
model
#---------------------------ANALYSIS-----------------------------##
nChains = length(inits) # Number of chains to run.
adaptSteps = 1000 # Number of steps to "tune" the samplers.
nburnin=1000 # Number of steps to "burn-in" the samplers.
nstore=5000 # Total number of steps in chains to save.
nthin=1 # Number of steps to "thin" (1=keep every step).
#nPerChain = ceiling( ( numSavedSteps * thinSteps ) / nChains ) # Steps per chain.
### Start of the run ###
start.time = Sys.time(); cat("Start of the run\n");
######### BUGS ##########
fit <- bugs(
data
,inits
,model.file = model
,parameters
,n.chains = nChains, n.iter = nstore + nburnin, n.burnin = nburnin, n.thin = nthin
,DIC=FALSE
,codaPkg = FALSE, clearWD=TRUE
#,debug=TRUE
,working.directory=work.dir
)
######### JAGS ##########
## Compile & adapt
#Create, initialize, and adapt the model:
# fit <- jags.model(
# model,
# data,inits,
# n.chains=nChains,
# n.adapt = adaptSteps)
# # Run JAGS in parallel. Each Chain is sent to a seperate core.
# cl <- makeSOCKcluster(nChains) # Request 3 cores. /!\ Need to check how many core your computer has
# fit.mcmc <- jags.parfit(cl,
# data,
# parameters,
# model.dir,
# inits,
# n.chains=nChains,n.adapt=adaptSteps,n.update=nburnin,n.iter=nstore*nthin, thin=nthin
# )
# stopCluster(cl) #### /!\ Really important to do!
# duration of the run
end.time = Sys.time()
elapsed.time = difftime(end.time, start.time, units='mins')
cat("Sample analyzed after ", elapsed.time, ' minutes\n')
## BACKUP
save(fit,file=paste('results/results_',stade,"_",year,'.RData',sep=""))
write.table(fit$summary,file=paste('results/Results_',stade,"_",year,'.csv',sep=""),sep=";")
#------------------------------------------------------------------------------
# EXAMINE THE RESULTS
fit.mcmc <- as.mcmc(fit) # using bugs
# DIAGNOSTICS:
parameterstotest <-parameters # all parameters
# parameterstotest <- c(
# "epsilon_p"
# )
# Start writing to an output file
sink(paste('results/Diagnostics_',stade,"_",year,'.txt',sep=""))
cat("=============================\n")
cat("DIAGNOSTICS\n")
cat("=============================\n")
if (nChains > 1) {
cat("Convergence: gelman-Rubin R test\n")
gelman.diag(fit.mcmc[,which(varnames(fit.mcmc)%in%parameterstotest)])
}
cat("\n---------------------------\n")
cat("Heidelberger and Welch's convergence diagnostic\n")
cat("
heidel.diag is a run length control diagnostic based on a criterion of relative accuracy for the estimate of the mean. The default setting corresponds to a relative accuracy of two significant digits.
heidel.diag also implements a convergence diagnostic, and removes up to half the chain in order to ensure that the means are estimated from a chain that has converged.
\n")
heidel.diag(fit.mcmc[,which(varnames(fit.mcmc)%in%parameterstotest)], eps=0.1, pvalue=0.05)
cat("\n---------------------------\n")
cat("Geweke's convergence diagnostic\n")
cat("
Geweke (1992) proposed a convergence diagnostic for Markov chains based on a test for equality of the means of the first and last part of a Markov chain (by default the first 10% and the last 50%).
If the samples are drawn from the stationary distribution of the chain, the two means are equal and Geweke's statistic has an asymptotically standard normal distribution.
The test statistic is a standard Z-score: the difference between the two sample means divided by its estimated standard error. The standard error is estimated from the spectral density at zero and so takes into account any autocorrelation.
The Z-score is calculated under the assumption that the two parts of the chain are asymptotically independent, which requires that the sum of frac1 and frac2 be strictly less than 1.
\n")
geweke.diag(fit.mcmc[,which(varnames(fit.mcmc)%in%parameterstotest)], frac1 = 0.1, frac2 = 0.5)
cat("\n---------------------------\n")
cat("Raftery and Lewis's diagnostic\n")
raftery.diag(fit.mcmc[,which(varnames(fit.mcmc)%in%parameterstotest)], q=0.025, r=0.005, s=0.95, converge.eps=0.001)
# Stop writing to the file
sink()
## Plot the chains:
pdf(paste('results/Results_',stade,"_",year,'.pdf',sep=""))
#for (i in 1:5){
traplot(fit.mcmc[,which(varnames(fit.mcmc)%in%parameterstotest)])
# caterplot(fit.mcmc,parameters[i])
#}
dev.off()
#------------------------------------------------------------------------------
## SUMMARY
if(site == "Scorff" && stade == "adult") {source("summary_adult.R")}
if(site == "Nivelle" && stade == "tacon") {source("analyse_coda_tacon.R")}
#rm(list=ls()) # Clear memory
library(R2OpenBUGS)
library(rjags)
library(coda)
##-----------------------------INFO ----------------------------------##
year <- 2016
site <- "Bresle"
stade <- "adult"
## WORKING DIRECTORY:
work.dir<-paste('~/Documents/RESEARCH/PROJECTS/ORE/Abundance',site,stade,sep="/")
setwd(work.dir)
##-----------------------------DATA ----------------------------------##
## DATA DIRECTORY:
list <- read.bugsdata(paste("data/","data_list.txt",sep=""))
######################################################################################
## Cm_B[t,a]: Annual number of marked fish captured at Beauchamps per sea age category; 1: 1SW, 2: MSW
## Cum_B[t,a]: Annual number of unmarked fish captured at Beauchamps per sea age category; 1: 1SW, 2: MSW
######################################################################################
fish <- read.table(paste("data/","data_Beauchamps.txt",sep=""),header = TRUE, check.names=FALSE,comment.char = "#",colClasses="character")
fish <- as.matrix(fish);mode(fish)<- "numeric"
Cm_B=as.matrix(fish[,1:2])
Cum_B=as.matrix(fish[,3:4])
#####################################################################################################
## C_Eu[t,a]: Annual number of fish captured at Eu per sea age category; 1:1SW, 2:MSW
## Cm_Eu[t,a]: Annual number of fish captured and marked at Eu per sea age category; 1:1SW, 2:MSW
#####################################################################################################
Eu <- read.table(paste("data/","data_Eu.txt",sep=""),header = TRUE, check.names=FALSE,comment.char = "#",colClasses="character")
Eu <- as.matrix(Eu);mode(Eu)<- "numeric"
C_Eu=as.matrix(Eu[,1:2])
Cm_Eu=as.matrix(Eu[,3:4])
######################################################################################################################################
## Mean flow (l/s): - 15 june - 31 august for 1SW ([,1])
## - 15 april - 30 june for MSW ([,2])
## Covariate is standardized within WinBUGS
######################################################################################################################################
Q <- read.table(paste("data/","data_flow_sea-age.txt",sep=""),header = TRUE, check.names=FALSE,comment.char = "#",colClasses="character")
Q <- as.matrix(Q);mode(Q)<- "numeric"
data <- list(
Y=list$Y, Q2pic=list$Q2pic
, Cm_B=Cm_B,Cum_B=Cum_B,C_Eu=C_Eu, Cm_Eu=Cm_Eu
, Q=Q
)
save(data,file=paste('data/data_',stade,"_",year,'.Rdata',sep="")) # sauvegarde des données
#rm(list=ls()) # Clear memory
library(rjags) # require to use "read.bugsdata" function
##-----------------------------FUNCTIONS ----------------------------------##
invlogit<-function(x) {1/(1+exp(-(x)))}
logit<-function(x) {log(x/(1-x))}
##-----------------------------DATA ----------------------------------##
year <- 2016
site <- "Bresle"
stade <- "adult"