Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
kfino
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Terraform modules
Analyze
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Isabelle Sanchez
kfino
Commits
e92d6e3f
Commit
e92d6e3f
authored
2 years ago
by
sanchezi
Browse files
Options
Downloads
Patches
Plain Diff
MaJ execution de KBO_EM() dans kfino_fit() - par dichotomie
parent
8795d855
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Pipeline
#59110
failed
2 years ago
Stage: build
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
R/kfino.R
+153
-58
153 additions, 58 deletions
R/kfino.R
with
153 additions
and
58 deletions
R/kfino.R
+
153
−
58
View file @
e92d6e3f
...
...
@@ -279,42 +279,25 @@ kfino_fit<-function(datain,Tvar,Yvar,
threshold
=
threshold
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
kappa
=
kappa
)
}
else
if
(
N
>
50
){
# optimization without sub-sampling
# optimization without sub-sampling
, 2 methods, EM or ML
if
(
method
==
"EM"
){
print
(
"-------:"
)
print
(
"Optimisation of initial parameters with EM method - result:"
)
print
(
"no sub-sampling performed:"
)
bornem0
=
quantile
(
Y
[
1
:
N
/
4
],
probs
=
c
(
.2
,
.8
))
m0opt
=
quantile
(
Y
[
1
:
N
/
4
],
probs
=
c
(
.5
))
mmopt
=
quantile
(
Y
[(
3
*
N
/
4
)
:
N
],
probs
=
c
(
.5
))
cat
(
"range m0: "
,
bornem0
,
"\n"
)
cat
(
"initial m0opt: "
,
m0opt
,
"\n"
)
popt
=
0.5
# 1er tour
Vopt
=
KBO_EM
(
param
=
list
(
m0
=
m0opt
,
mm
=
mmopt
,
pp
=
popt
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_mm
=
sigma2_mm
,
sigma2_m0
=
sigma2_m0
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
dix
=
dix
,
kappaOpt
=
kappaOpt
)
$
likelihood
# boucle
N_etape_EM
=
10
t
=
1
for
(
m0_tmp
in
seq
(
bornem0
[
1
],
bornem0
[
2
],
2
)
){
t
=
t
+1
print
(
"-------:"
)
print
(
"Optimisation of initial parameters with EM method - result:"
)
print
(
"no sub-sampling performed:"
)
bornem0
=
quantile
(
Y
[
1
:
N
/
2
],
probs
=
c
(
.2
,
.8
))
cat
(
"range m0: "
,
bornem0
,
"\n"
)
#--- par dichotomie
# borne basse
N_etape_EM
=
10
m0_tmp
=
m0_low
=
bornem0
[
1
]
m_tmp
=
m0_tmp
p_tmp
=
0.5
diff_m0
=
diff_mm
=
diff_p
=
2
diff_m0
=
diff_mm
=
diff_p
=
2
0
k
=
1
#for (k in 1:N_etape_EM){
while
(
diff_m0
>
0.5
&&
diff_p
>
0.0001
){
while
(
diff_m0
>
0.5
&&
diff_p
>
0.0001
&&
diff_mm
>
2
){
print
(
"ici boucle low"
)
print
(
k
)
Res_EM
=
KBO_EM
(
param
=
list
(
mm
=
m_tmp
,
pp
=
p_tmp
,
m0
=
m0_tmp
,
...
...
@@ -328,39 +311,151 @@ kfino_fit<-function(datain,Tvar,Yvar,
kappaOpt
=
kappaOpt
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
dix
=
dix
)
diff_m0
=
abs
(
m0_tmp
-
Res_EM
$
m0
[[
1
]])
diff_p
=
abs
(
p_tmp
-
Res_EM
$
pp
)
diff_mm
=
abs
(
m_tmp
-
Res_EM
$
mm
[[
1
]])
k
<-
k
+1
m0_tmp
=
Res_EM
$
m0
[[
1
]]
m_tmp
=
Res_EM
$
mm
[[
1
]]
p_tmp
=
Res_EM
$
pp
print
(
Res_EM
$
likelihood
)
if
(
k
==
N_etape_EM
)
break
}
V
=
Res_EM
$
likelihood
if
(
V
>
Vopt
){
Vopt
=
V
m0opt
=
m0_tmp
mmopt
=
m_tmp
popt
=
p_tmp
Vopt_low
=
Res_EM
$
likelihood
m0opt_low
<-
Res_EM
$
m0
[[
1
]]
mmopt_low
<-
Res_EM
$
mm
[[
1
]]
popt_low
<-
Res_EM
$
pp
# borne haute
N_etape_EM
=
10
m0_tmp
=
m0_up
=
bornem0
[
2
]
m_tmp
=
m0_tmp
p_tmp
=
0.5
diff_m0
=
diff_mm
=
diff_p
=
20
k
=
1
while
(
diff_m0
>
0.5
&&
diff_p
>
0.0001
&&
diff_mm
>
2
){
print
(
"ici boucle up"
)
print
(
k
)
Res_EM
=
KBO_EM
(
param
=
list
(
mm
=
m_tmp
,
pp
=
p_tmp
,
m0
=
m0_tmp
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_m0
=
sigma2_m0
,
sigma2_mm
=
sigma2_mm
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
kappaOpt
=
kappaOpt
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
dix
=
dix
)
diff_m0
=
abs
(
m0_tmp
-
Res_EM
$
m0
[[
1
]])
diff_p
=
abs
(
p_tmp
-
Res_EM
$
pp
)
diff_mm
=
abs
(
m_tmp
-
Res_EM
$
mm
[[
1
]])
k
<-
k
+1
m0_tmp
=
Res_EM
$
m0
[[
1
]]
m_tmp
=
Res_EM
$
mm
[[
1
]]
p_tmp
=
Res_EM
$
pp
print
(
Res_EM
$
likelihood
)
if
(
k
==
N_etape_EM
)
break
}
}
print
(
"Optimised parameters with EM method: "
)
cat
(
"Optimized m0: "
,
m0opt
,
"\n"
)
cat
(
"Optimized pp: "
,
popt
,
"\n"
)
print
(
"-------:"
)
resultat
=
KBO_known
(
param
=
list
(
mm
=
mmopt
,
pp
=
popt
,
m0
=
m0opt
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_m0
=
sigma2_m0
,
sigma2_mm
=
sigma2_mm
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
threshold
=
threshold
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
kappa
=
kappa
)
Vopt_up
=
Res_EM
$
likelihood
m0opt_up
<-
Res_EM
$
m0
[[
1
]]
mmopt_up
<-
Res_EM
$
mm
[[
1
]]
popt_up
<-
Res_EM
$
pp
diff_m0range
<-
abs
(
m0opt_up
-
m0opt_low
)
diff_pprange
<-
abs
(
popt_up
-
popt_low
)
diff_mmrange
<-
abs
(
mmopt_up
-
mmopt_low
)
# test quasi equality
while
(
diff_m0range
>
0.5
&&
diff_pprange
>
0.0001
&&
diff_mmrange
>
2
){
m0_tmp
=
m0_med
=
(
m0_up
+
m0_low
)
/
2
m_tmp
=
m0_tmp
p_tmp
=
0.5
diff_m0
=
diff_mm
=
diff_p
=
20
k
=
1
while
(
diff_m0
>
0.5
&&
diff_p
>
0.0001
&&
diff_mm
>
2
){
print
(
k
)
Res_EM
=
KBO_EM
(
param
=
list
(
mm
=
m_tmp
,
pp
=
p_tmp
,
m0
=
m0_tmp
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_m0
=
sigma2_m0
,
sigma2_mm
=
sigma2_mm
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
kappaOpt
=
kappaOpt
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
dix
=
dix
)
diff_m0
=
abs
(
m0_tmp
-
Res_EM
$
m0
[[
1
]])
diff_p
=
abs
(
p_tmp
-
Res_EM
$
pp
)
diff_mm
=
abs
(
m_tmp
-
Res_EM
$
mm
[[
1
]])
k
<-
k
+1
m0_tmp
=
Res_EM
$
m0
[[
1
]]
m_tmp
=
Res_EM
$
mm
[[
1
]]
p_tmp
=
Res_EM
$
pp
if
(
k
==
N_etape_EM
)
break
}
if
(
Vopt_up
<
Vopt_low
){
Vopt_up
<-
Res_EM
$
likelihood
m0opt_up
<-
Res_EM
$
m0
[[
1
]]
mmopt_up
<-
Res_EM
$
mm
[[
1
]]
popt_up
<-
Res_EM
$
pp
m0_up
<-
m0_med
}
else
{
Vopt_low
<-
Res_EM
$
likelihood
m0opt_low
<-
Res_EM
$
m0
[[
1
]]
mmopt_low
<-
Res_EM
$
mm
[[
1
]]
popt_low
<-
Res_EM
$
pp
m0_low
<-
m0_med
}
diff_m0range
<-
abs
(
m0opt_up
-
m0opt_low
)
diff_pprange
<-
abs
(
popt_up
-
popt_low
)
}
# challenger
m0opt
=
quantile
(
Y
[
1
:
N
/
4
],
probs
=
c
(
.5
))
mmopt
=
quantile
(
Y
[(
3
*
N
/
4
)
:
N
],
probs
=
c
(
.5
))
popt
=
0.5
Vopt
=
KBO_EM
(
param
=
list
(
m0
=
m0opt
,
mm
=
mmopt
,
pp
=
popt
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_mm
=
sigma2_mm
,
sigma2_m0
=
sigma2_m0
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
dix
=
dix
,
kappaOpt
=
kappaOpt
)
$
likelihood
if
(
Vopt_low
>
Vopt
){
m0opt
<-
m0opt_low
mmopt
<-
mmopt_low
popt
<-
popt_low
}
print
(
"Optimised parameters with EM method: "
)
cat
(
"Optimized m0: "
,
m0opt
,
"\n"
)
cat
(
"Optimized mm: "
,
mmopt
,
"\n"
)
cat
(
"Optimized pp: "
,
popt
,
"\n"
)
print
(
"-------:"
)
resultat
=
KBO_known
(
param
=
list
(
mm
=
mmopt
,
pp
=
popt
,
m0
=
m0opt
,
aa
=
aa
,
expertMin
=
expertMin
,
expertMax
=
expertMax
,
sigma2_m0
=
sigma2_m0
,
sigma2_mm
=
sigma2_mm
,
sigma2_pp
=
sigma2_pp
,
K
=
K
),
threshold
=
threshold
,
Y
=
Y
,
Tps
=
Tps
,
N
=
N
,
kappa
=
kappa
)
}
else
if
(
method
==
"ML"
){
print
(
"-------:"
)
print
(
"Optimisation of initial parameters with ML method - result:"
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment