... | ... |
@@ -6,8 +6,8 @@ Date: 2014-05-01 |
6 | 6 |
Author: Aaron Taudt, David Porubsky |
7 | 7 |
Maintainer: Aaron Taudt <a.s.taudt@umcg.nl>, David Porubsky <d.porubsky@umcg.nl> |
8 | 8 |
Description: not yet |
9 |
-Depends: R (>= 3.1.0), GenomicRanges, IRanges, ggplot2, reshape2 |
|
10 |
-Imports: Rsamtools, Biostrings, GenomicAlignments |
|
9 |
+Depends: R (>= 3.1.0), GenomicRanges, IRanges, ggplot2, reshape2, biovizBase |
|
10 |
+Imports: Rsamtools, Biostrings, GenomicAlignments, doParallel, foreach, grid |
|
11 | 11 |
Suggests: |
12 | 12 |
License: GPL |
13 | 13 |
LazyLoad: yes |
... | ... |
@@ -36,5 +36,5 @@ mixedorder |
36 | 36 |
|
37 | 37 |
# Import all packages listed as Imports or Depends |
38 | 38 |
import( |
39 |
- GenomicRanges, IRanges, ggplot2, reshape2, Rsamtools, Biostrings, GenomicAlignments |
|
39 |
+ GenomicRanges, IRanges, ggplot2, reshape2, Rsamtools, Biostrings, GenomicAlignments, doParallel, foreach, grid, biovizBase |
|
40 | 40 |
) |
... | ... |
@@ -38,8 +38,6 @@ get.state.table <- function(hmm.list, numCPU=1) { |
38 | 38 |
rownames(majorstate.f) <- rownames(majorstate) |
39 | 39 |
|
40 | 40 |
## Plot the percentages of aneuploidies/states per chromosome |
41 |
- library(ggplot2) |
|
42 |
- library(reshape2) |
|
43 | 41 |
df <- melt(majorstate.f, varnames=c("model","chromosome"), value.name="state") |
44 | 42 |
df$state <- factor(df$state, levels=state.labels) |
45 | 43 |
ggplt <- ggplot(df) + geom_bar(aes(x=chromosome, fill=state, y=..count..)) + theme_bw() + ylab('number of samples') + scale_fill_manual(values=state.colors) |
... | ... |
@@ -109,7 +107,6 @@ get.reproducibility.one2many <- function(query, num.query.samples=1, subjects, n |
109 | 107 |
|
110 | 108 |
## Overlap each subject's states with query |
111 | 109 |
cat('overlapping each subject\'s states with query\n') |
112 |
- library(doParallel) |
|
113 | 110 |
cl <- makeCluster(numCPU) |
114 | 111 |
registerDoParallel(cl) |
115 | 112 |
constates <- foreach (subject.gr = subjects.grl, .packages='GenomicRanges', .combine='cbind') %dopar% { |
... | ... |
@@ -142,8 +139,6 @@ get.reproducibility.one2many <- function(query, num.query.samples=1, subjects, n |
142 | 139 |
means[itest,] <- meanlevel |
143 | 140 |
} |
144 | 141 |
## Plot the results |
145 |
- library(ggplot2) |
|
146 |
- library(reshape2) |
|
147 | 142 |
df <- melt(data.frame(state=mcols(query.gr)$state, meanstate=meanconstates)) |
148 | 143 |
ggplt <- ggplot(df) + geom_boxplot(aes(x=state, y=value, fill=variable)) + theme_bw() + coord_cartesian(ylim=c(1,length(levels(mcols(query.gr)$state)))) + scale_y_continuous(labels=levels(mcols(query.gr)$state)) + xlab(paste0(num.query.samples,' cell sample')) + ylab(paste0('single cell samples')) |
149 | 144 |
|
... | ... |
@@ -214,8 +209,6 @@ get.reproducibility.many2many <- function(samples, num.tests=10, size.test=10, n |
214 | 209 |
means <- cbind(as.data.frame(means), width=width(consensus)) |
215 | 210 |
|
216 | 211 |
## Plot the results |
217 |
- library(ggplot2) |
|
218 |
- library(reshape2) |
|
219 | 212 |
states <- levels(mcols(samples.grl[[1]])$state) |
220 | 213 |
ggplt1 <- ggplot(as.data.frame(means)) + geom_point(aes(x=V1, y=V2, alpha=width)) + theme_bw() + coord_cartesian(ylim=c(1,length(states))) + scale_x_continuous(breaks=1:length(states), labels=states) + scale_y_continuous(breaks=1:length(states), labels=states) + xlab(paste0(size.test,' single cell samples')) + ylab(paste0(size.test,' single cell samples')) |
221 | 214 |
|
... | ... |
@@ -268,7 +268,6 @@ align2binned <- function(file, format, index=file, pairedEndReads=FALSE, chrom.l |
268 | 268 |
|
269 | 269 |
### GC correction ### |
270 | 270 |
if (GC.correction) { |
271 |
- library(Biostrings) |
|
272 | 271 |
# Correct seqnames 1->chr1 if necessary |
273 | 272 |
if (!grepl('chr',chromosome)) { |
274 | 273 |
chrom <- paste0('chr',chromosome) |
... | ... |
@@ -276,8 +275,8 @@ align2binned <- function(file, format, index=file, pairedEndReads=FALSE, chrom.l |
276 | 275 |
chrom <- chromosome |
277 | 276 |
} |
278 | 277 |
## Calculating GC for whole bins |
279 |
- view.chr <- Views(GC.correction.bsgenome[[chrom]], ranges(i.binned.data)) |
|
280 |
- freq <- alphabetFrequency(view.chr, as.prob = T, baseOnly=T) |
|
278 |
+ view.chr <- Biostrings::Views(GC.correction.bsgenome[[chrom]], ranges(i.binned.data)) |
|
279 |
+ freq <- Biostrings::alphabetFrequency(view.chr, as.prob = T, baseOnly=T) |
|
281 | 280 |
if (nrow(freq) > 1) { |
282 | 281 |
GC.bin <- rowSums(freq[, c("G","C")]) |
283 | 282 |
} else { |
... | ... |
@@ -14,7 +14,6 @@ ptm <- proc.time() |
14 | 14 |
## Cluster the samples |
15 | 15 |
cat("clustering the samples ...") |
16 | 16 |
# Find states along the consensus template |
17 |
- library(foreach) |
|
18 | 17 |
constates <- foreach (gr = grlred, .packages='GenomicRanges', .combine='cbind') %do% { |
19 | 18 |
splt <- split(gr, mcols(gr)$state) |
20 | 19 |
mind <- as.matrix(findOverlaps(consensus, splt)) |
... | ... |
@@ -6,7 +6,6 @@ hmmList2GRangesList <- function(hmm.list, reduce=TRUE, numCPU=1, consensus=FALSE |
6 | 6 |
## Transform to GRanges |
7 | 7 |
cat('transforming to GRanges ...') |
8 | 8 |
if (numCPU > 1) { |
9 |
- suppressMessages( library(doParallel) ) |
|
10 | 9 |
cl <- makeCluster(numCPU) |
11 | 10 |
registerDoParallel(cl) |
12 | 11 |
cfun <- function(...) { GRangesList(...) } |
... | ... |
@@ -53,7 +52,6 @@ hmmList2GRangesList <- function(hmm.list, reduce=TRUE, numCPU=1, consensus=FALSE |
53 | 52 |
|
54 | 53 |
binned2GRanges <- function(binned.data, chrom.length.file=NULL, offset=0) { |
55 | 54 |
|
56 |
- library(GenomicRanges) |
|
57 | 55 |
gr <- GenomicRanges::GRanges( |
58 | 56 |
seqnames = Rle(binned.data$chrom), |
59 | 57 |
ranges = IRanges(start=binned.data$start+offset, end=binned.data$end+offset), |
... | ... |
@@ -73,7 +71,6 @@ binned2GRanges <- function(binned.data, chrom.length.file=NULL, offset=0) { |
73 | 71 |
|
74 | 72 |
hmm2GRanges <- function(hmm, reduce=TRUE) { |
75 | 73 |
|
76 |
-# library(GenomicRanges) |
|
77 | 74 |
### Check user input ### |
78 | 75 |
if (check.univariate.model(hmm)!=0 & check.multivariate.model(hmm)!=0) stop("argument 'hmm' expects a univariate or multivariate hmm object (type ?hmm for help)") |
79 | 76 |
if (check.logical(reduce)!=0) stop("argument 'reduce' expects TRUE or FALSE") |
... | ... |
@@ -87,7 +84,7 @@ hmm2GRanges <- function(hmm, reduce=TRUE) { |
87 | 84 |
) |
88 | 85 |
seqlengths(gr) <- hmm$seqlengths[names(seqlengths(gr))] |
89 | 86 |
# Reorder seqlevels |
90 |
- gr <- GenomicRanges::keepSeqlevels(gr, names(hmm$seqlengths)) |
|
87 |
+ gr <- keepSeqlevels(gr, names(hmm$seqlengths)) |
|
91 | 88 |
|
92 | 89 |
if (reduce) { |
93 | 90 |
# Reduce state by state |
... | ... |
@@ -23,6 +23,9 @@ findCNVs <- function(binned.data, ID, method='univariate', eps=0.001, init="stan |
23 | 23 |
|
24 | 24 |
univariate.findCNVs <- function(binned.data, ID, eps=0.001, init="standard", max.time=-1, max.iter=-1, num.trials=1, eps.try=NULL, num.threads=1, read.cutoff.quantile=0.999, use.gc.corrected.reads=TRUE, strand='*') { |
25 | 25 |
|
26 |
+ ### Define cleanup behaviour ### |
|
27 |
+ on.exit(.C("R_univariate_cleanup")) |
|
28 |
+ |
|
26 | 29 |
## Intercept user input |
27 | 30 |
IDcheck <- ID #trigger error if not defined |
28 | 31 |
if (class(binned.data) != 'GRanges') { |
... | ... |
@@ -151,7 +154,6 @@ univariate.findCNVs <- function(binned.data, ID, eps=0.001, init="standard", max |
151 | 154 |
loglik = double(length=1), # double* loglik |
152 | 155 |
weights = double(length=numstates), # double* weights |
153 | 156 |
distr.type = as.integer(state.distributions), # int* distr_type |
154 |
- ini.proc = as.integer(iniproc), # int* iniproc |
|
155 | 157 |
size.initial = as.vector(size.initial), # double* initial_size |
156 | 158 |
prob.initial = as.vector(prob.initial), # double* initial_prob |
157 | 159 |
lambda.initial = as.vector(lambda.initial), # double* initial_lambda |
... | ... |
@@ -211,7 +213,6 @@ univariate.findCNVs <- function(binned.data, ID, eps=0.001, init="standard", max |
211 | 213 |
loglik = double(length=1), # double* loglik |
212 | 214 |
weights = double(length=numstates), # double* weights |
213 | 215 |
distr.type = as.integer(state.distributions), # int* distr_type |
214 |
- ini.proc = as.integer(iniproc), # int* iniproc |
|
215 | 216 |
size.initial = as.vector(hmm$size), # double* initial_size |
216 | 217 |
prob.initial = as.vector(hmm$prob), # double* initial_prob |
217 | 218 |
lambda.initial = as.vector(hmm$lambda), # double* initial_lambda |
... | ... |
@@ -497,6 +498,8 @@ bivariate.findCNVs <- function(binned.data, ID, eps=0.001, init="standard", max. |
497 | 498 |
} |
498 | 499 |
message(" done\n", appendLF=F) |
499 | 500 |
|
501 |
+ ### Define cleanup behaviour ### |
|
502 |
+ on.exit(.C("R_multivariate_cleanup", as.integer(num.comb.states))) |
|
500 | 503 |
|
501 | 504 |
### Run the multivariate HMM |
502 | 505 |
# Call the C function |
... | ... |
@@ -4,10 +4,6 @@ |
4 | 4 |
# ============================================================ |
5 | 5 |
plot.distribution <- function(model, state=NULL, chrom=NULL, start=NULL, end=NULL) { |
6 | 6 |
|
7 |
- ## Load libraries |
|
8 |
- library(ggplot2) |
|
9 |
- library(reshape2) |
|
10 |
- |
|
11 | 7 |
# ----------------------------------------- |
12 | 8 |
# Get right x limit |
13 | 9 |
get_rightxlim <- function(histdata, reads) { |
... | ... |
@@ -158,8 +154,6 @@ plot.chromosomes.univariate <- function(model, file=NULL) { |
158 | 154 |
} |
159 | 155 |
|
160 | 156 |
## Setup page |
161 |
- library(grid) |
|
162 |
- library(ggplot2) |
|
163 | 157 |
nrows <- 2 # rows for plotting chromosomes |
164 | 158 |
ncols <- ceiling(num.chroms/nrows) |
165 | 159 |
if (!is.null(file)) { |
... | ... |
@@ -250,8 +244,6 @@ plot.chromosomes.bivariate <- function(model, file=NULL) { |
250 | 244 |
custom.xlim <- model$distributions[[1]]['monosomy','mu'] * 10 |
251 | 245 |
|
252 | 246 |
## Setup page |
253 |
- library(grid) |
|
254 |
- library(ggplot2) |
|
255 | 247 |
nrows <- 2 # rows for plotting chromosomes |
256 | 248 |
ncols <- ceiling(num.chroms/nrows) |
257 | 249 |
if (!is.null(file)) { |
... | ... |
@@ -349,8 +341,6 @@ plot.genome.overview <- function(hmm.list, file, bp.per.cm=5e7, chromosome=NULL) |
349 | 341 |
uni.hmm.grl <- lapply(hmm.list, '[[', 'bins') |
350 | 342 |
|
351 | 343 |
## Setup page |
352 |
- library(grid) |
|
353 |
- library(ggplot2) |
|
354 | 344 |
nrows <- length(uni.hmm.grl) # rows for plotting genomes |
355 | 345 |
ncols <- 1 |
356 | 346 |
total.length.bp <- sum(as.numeric(seqlengths(uni.hmm.grl[[1]]))) |
... | ... |
@@ -442,8 +432,6 @@ plot.genome.summary <- function(hmm.list, file='aneufinder_genome_overview') { |
442 | 432 |
flattened_gr_srt <- sort(flattened_gr) |
443 | 433 |
|
444 | 434 |
## Setup page |
445 |
- library(grid) |
|
446 |
- library(ggplot2) |
|
447 | 435 |
nrows <- length(levels(uni.hmm.grl[[1]]$state)) # rows for plotting genomes |
448 | 436 |
ncols <- 1 |
449 | 437 |
pdf(file=paste0(file, '.pdf'), width=ncols*24, height=nrows*2) |
... | ... |
@@ -497,7 +485,7 @@ plot.genome.summary <- function(hmm.list, file='aneufinder_genome_overview') { |
497 | 485 |
strand = Rle(strand("*")), cov=cov |
498 | 486 |
) |
499 | 487 |
|
500 |
- trans_gr <- transformToGenome(gr, space.skip = 0) |
|
488 |
+ trans_gr <- biovizBase::transformToGenome(gr, space.skip = 0) |
|
501 | 489 |
|
502 | 490 |
dfplot <- as.data.frame(trans_gr) |
503 | 491 |
|
504 | 492 |
deleted file mode 100644 |
... | ... |
@@ -1,138 +0,0 @@ |
1 |
-simulate.univariate = function(coordinates, transition, emission, initial=1) { |
|
2 |
- |
|
3 |
- # Calculate some variables |
|
4 |
- numstates = ncol(transition) |
|
5 |
- if (numstates!=3) { |
|
6 |
- stop("The transition matrix is expected to have 3 columns and 3 rows") |
|
7 |
- } |
|
8 |
- numbins = nrow(coordinates) |
|
9 |
- |
|
10 |
- ## Make state vector from transition matrix |
|
11 |
- # Generate sample of random numbers |
|
12 |
- rsample = runif(numbins,0,1) |
|
13 |
- # Integrate transition matrix by row and add -1 in front |
|
14 |
- cumtransition = cbind(rep(-1,numstates), t(apply(transition, 1, cumsum))) |
|
15 |
- # Generate the state vector by going through each state |
|
16 |
- cat("Generating states from transition matrix...") |
|
17 |
- states = matrix(rep(NA,numstates*numbins), ncol=numstates) |
|
18 |
- for (irow in 1:numstates) { |
|
19 |
- states[,irow] = findInterval(rsample, cumtransition[irow,], rightmost.closed=TRUE) |
|
20 |
- } |
|
21 |
- statevec = rep(NA,numbins) |
|
22 |
- statevec[1] = initial |
|
23 |
- for (ibin in 2:numbins) { |
|
24 |
- statevec[ibin] = states[ibin,statevec[ibin-1]] |
|
25 |
- } |
|
26 |
- cat(" done\n") |
|
27 |
- |
|
28 |
- ## Make reads from state vector and distributions |
|
29 |
- # Generate the read counts by drawing from distribution |
|
30 |
- cat("Generating reads from emission parameters and states...") |
|
31 |
- reads = rep(NA, numbins) |
|
32 |
- numbins.in.state = aggregate(rep(1,length(statevec)), list(state=statevec), sum) |
|
33 |
- reads[statevec==1] = 0 |
|
34 |
- if (!is.na(numbins.in.state[2,'x'])) { |
|
35 |
- reads[statevec==2] = rnbinom(numbins.in.state[2,'x'], size=emission[2,'r'], prob=emission[2,'p']) |
|
36 |
- } |
|
37 |
- if (!is.na(numbins.in.state[3,'x'])) { |
|
38 |
- reads[statevec==3] = rnbinom(numbins.in.state[3,'x'], size=emission[3,'r'], prob=emission[3,'p']) |
|
39 |
- } |
|
40 |
- cat(" done\n") |
|
41 |
- |
|
42 |
- # Return the output |
|
43 |
- out = list(coordinates = coordinates, |
|
44 |
- states = statevec, |
|
45 |
- reads = reads, |
|
46 |
- transition = transition, |
|
47 |
- emission = emission |
|
48 |
- ) |
|
49 |
- return(out) |
|
50 |
- |
|
51 |
-} |
|
52 |
- |
|
53 |
- |
|
54 |
- |
|
55 |
-simulate.multivariate = function(coordinates, transition, emissions, weights, sigma, use.states, initial=1) { |
|
56 |
- |
|
57 |
- lib = require(mvtnorm) |
|
58 |
- if (lib == FALSE) { |
|
59 |
- install.packages("mvtnorm") |
|
60 |
- library(mvtnorm) |
|
61 |
- } |
|
62 |
- |
|
63 |
- # Calculate some variables |
|
64 |
- numstates = ncol(transition) |
|
65 |
- numbins = nrow(coordinates) |
|
66 |
- nummod = length(emissions) |
|
67 |
- |
|
68 |
- ## Make state vector from transition matrix |
|
69 |
- # Generate sample of random numbers |
|
70 |
- rsample = runif(numbins,0,1) |
|
71 |
- # Integrate transition matrix by row and add -1 in front |
|
72 |
- cumtransition = cbind(rep(-1,numstates), t(apply(transition, 1, cumsum))) |
|
73 |
- # Generate the state vector by going through each state |
|
74 |
- cat("Generating states from transition matrix...") |
|
75 |
- states = matrix(rep(NA,numstates*numbins), ncol=numstates) |
|
76 |
- for (irow in 1:numstates) { |
|
77 |
- states[,irow] = findInterval(rsample, cumtransition[irow,], rightmost.closed=TRUE) |
|
78 |
- } |
|
79 |
- statevec = rep(NA,numbins) |
|
80 |
- statevec[1] = initial |
|
81 |
- for (ibin in 2:numbins) { |
|
82 |
- statevec[ibin] = states[ibin,statevec[ibin-1]] |
|
83 |
- } |
|
84 |
- # Replace the states by combinatorial states |
|
85 |
- statevec = use.states[statevec] |
|
86 |
- cat(" done\n") |
|
87 |
- |
|
88 |
- ## Make reads from state vector and emission distributions |
|
89 |
- reads = matrix(rep(NA, nummod*numbins), ncol=nummod) |
|
90 |
- |
|
91 |
- rs = unlist(lapply(emissions, "[", 2:3, 'r')) |
|
92 |
- ps = unlist(lapply(emissions, "[", 2:3, 'p')) |
|
93 |
- ws = unlist(lapply(weights, "[", 1)) |
|
94 |
- for (istate in use.states) { |
|
95 |
- cat("Generating reads for state ",istate,"\n") |
|
96 |
- i = which(use.states==istate) |
|
97 |
- |
|
98 |
- # Convert istate to binary representation |
|
99 |
- binary_state = rev(as.integer(intToBits(istate))[1:nummod]) |
|
100 |
- binary_stateTF = unlist(list(c(TRUE,FALSE), c(FALSE,TRUE))[binary_state+1]) |
|
101 |
- |
|
102 |
- # Draw from the multivariate normal |
|
103 |
- n = length(which(statevec==istate)) |
|
104 |
- if (n == 0) next |
|
105 |
- cat("drawing from multivariate normal... \r") |
|
106 |
- z = matrix(rmvnorm(n, mean=rep(0,nummod), sigma=sigma[,,i]), ncol=nummod) |
|
107 |
- # Transform to uniform space |
|
108 |
- cat("transforming to uniform space... \r") |
|
109 |
- u = matrix(apply(z, 2, pnorm), ncol=nummod) |
|
110 |
- # Transform to count space using marginals |
|
111 |
- cat("transform to count space... \r") |
|
112 |
- irs = rs[binary_stateTF] |
|
113 |
- ips = ps[binary_stateTF] |
|
114 |
- for (imod in 1:nummod) { |
|
115 |
- mask = statevec==istate |
|
116 |
- if (binary_state[imod] == 0) { |
|
117 |
- reads[mask,imod] = qzinbinom(u[,imod], w=ws[imod], size=irs[imod], prob=ips[imod]) |
|
118 |
- } else { |
|
119 |
- reads[mask,imod] = qnbinom(u[,imod], size=irs[imod], prob=ips[imod]) |
|
120 |
- } |
|
121 |
- } |
|
122 |
- cat(" \r") |
|
123 |
- |
|
124 |
- } |
|
125 |
- |
|
126 |
- # Return the output |
|
127 |
- out = list(coordinates = coordinates, |
|
128 |
- states = statevec, |
|
129 |
- reads = reads, |
|
130 |
- emissions = emissions, |
|
131 |
- transition = transition, |
|
132 |
- sigma = sigma, |
|
133 |
- use.states = use.states, |
|
134 |
- weights = weights |
|
135 |
- ) |
|
136 |
- return(out) |
|
137 |
- |
|
138 |
-} |
... | ... |
@@ -1,59 +1,63 @@ |
1 | 1 |
#include "utility.h" |
2 | 2 |
#include "scalehmm.h" |
3 | 3 |
#include "loghmm.h" |
4 |
-// #include <omp.h> // parallelization options |
|
4 |
+#include <omp.h> // parallelization options |
|
5 |
+#include <string> // strcmp |
|
6 |
+ |
|
7 |
+static ScaleHMM* hmm; // declare as static outside the function because we only need one and this enables memory-cleanup on R_CheckUserInterrupt() |
|
8 |
+static double** multiD; |
|
5 | 9 |
|
6 | 10 |
// =================================================================================================================================================== |
7 | 11 |
// This function takes parameters from R, creates a univariate HMM object, creates the distributions, runs the Baum-Welch and returns the result to R. |
8 | 12 |
// =================================================================================================================================================== |
9 | 13 |
extern "C" { |
10 |
-void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, double* prob, double* lambda, int* maxiter, int* maxtime, double* eps, int* states, double* A, double* proba, double* loglik, double* weights, int* distr_type, int* iniproc, double* initial_size, double* initial_prob, double* initial_lambda, double* initial_A, double* initial_proba, bool* use_initial_params, int* num_threads, int* error, int* read_cutoff) |
|
14 |
+void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, double* prob, double* lambda, int* maxiter, int* maxtime, double* eps, int* states, double* A, double* proba, double* loglik, double* weights, int* distr_type, double* initial_size, double* initial_prob, double* initial_lambda, double* initial_A, double* initial_proba, bool* use_initial_params, int* num_threads, int* error, int* read_cutoff) |
|
11 | 15 |
{ |
12 | 16 |
|
13 | 17 |
// Define logging level |
14 | 18 |
// FILE* pFile = fopen("chromStar.log", "w"); |
15 | 19 |
// Output2FILE::Stream() = pFile; |
16 |
- FILELog::ReportingLevel() = FILELog::FromString("NONE"); |
|
20 |
+// FILELog::ReportingLevel() = FILELog::FromString("NONE"); |
|
17 | 21 |
// FILELog::ReportingLevel() = FILELog::FromString("DEBUG2"); |
18 | 22 |
|
19 | 23 |
// // Parallelization settings |
20 |
-// omp_set_num_threads(*num_threads); |
|
24 |
+ omp_set_num_threads(*num_threads); |
|
21 | 25 |
|
22 | 26 |
// Print some information |
23 |
- FILE_LOG(logINFO) << "number of states = " << *N; |
|
27 |
+ //FILE_LOG(logINFO) << "number of states = " << *N; |
|
24 | 28 |
Rprintf("number of states = %d\n", *N); |
25 |
- FILE_LOG(logINFO) << "number of bins = " << *T; |
|
29 |
+ //FILE_LOG(logINFO) << "number of bins = " << *T; |
|
26 | 30 |
Rprintf("number of bins = %d\n", *T); |
27 | 31 |
if (*maxiter < 0) |
28 | 32 |
{ |
29 |
- FILE_LOG(logINFO) << "maximum number of iterations = none"; |
|
33 |
+ //FILE_LOG(logINFO) << "maximum number of iterations = none"; |
|
30 | 34 |
Rprintf("maximum number of iterations = none\n"); |
31 | 35 |
} else { |
32 |
- FILE_LOG(logINFO) << "maximum number of iterations = " << *maxiter; |
|
36 |
+ //FILE_LOG(logINFO) << "maximum number of iterations = " << *maxiter; |
|
33 | 37 |
Rprintf("maximum number of iterations = %d\n", *maxiter); |
34 | 38 |
} |
35 | 39 |
if (*maxtime < 0) |
36 | 40 |
{ |
37 |
- FILE_LOG(logINFO) << "maximum running time = none"; |
|
41 |
+ //FILE_LOG(logINFO) << "maximum running time = none"; |
|
38 | 42 |
Rprintf("maximum running time = none\n"); |
39 | 43 |
} else { |
40 |
- FILE_LOG(logINFO) << "maximum running time = " << *maxtime << " sec"; |
|
44 |
+ //FILE_LOG(logINFO) << "maximum running time = " << *maxtime << " sec"; |
|
41 | 45 |
Rprintf("maximum running time = %d sec\n", *maxtime); |
42 | 46 |
} |
43 |
- FILE_LOG(logINFO) << "epsilon = " << *eps; |
|
47 |
+ //FILE_LOG(logINFO) << "epsilon = " << *eps; |
|
44 | 48 |
Rprintf("epsilon = %g\n", *eps); |
45 | 49 |
|
46 |
- FILE_LOG(logDEBUG3) << "observation vector"; |
|
50 |
+ //FILE_LOG(logDEBUG3) << "observation vector"; |
|
47 | 51 |
for (int t=0; t<50; t++) { |
48 |
- FILE_LOG(logDEBUG3) << "O["<<t<<"] = " << O[t]; |
|
52 |
+ //FILE_LOG(logDEBUG3) << "O["<<t<<"] = " << O[t]; |
|
49 | 53 |
} |
50 | 54 |
|
51 | 55 |
// Flush Rprintf statements to console |
52 | 56 |
R_FlushConsole(); |
53 | 57 |
|
54 | 58 |
// Create the HMM |
55 |
- FILE_LOG(logDEBUG1) << "Creating a univariate HMM"; |
|
56 |
- ScaleHMM* hmm = new ScaleHMM(*T, *N); |
|
59 |
+ //FILE_LOG(logDEBUG1) << "Creating a univariate HMM"; |
|
60 |
+ hmm = new ScaleHMM(*T, *N); |
|
57 | 61 |
// LogHMM* hmm = new LogHMM(*T, *N); |
58 | 62 |
hmm->set_cutoff(*read_cutoff); |
59 | 63 |
// Initialize the transition probabilities and proba |
... | ... |
@@ -72,7 +76,7 @@ void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, d |
72 | 76 |
variance+= pow(O[t] - mean, 2); |
73 | 77 |
} |
74 | 78 |
variance = variance / *T; |
75 |
- FILE_LOG(logINFO) << "data mean = " << mean << ", data variance = " << variance; |
|
79 |
+ //FILE_LOG(logINFO) << "data mean = " << mean << ", data variance = " << variance; |
|
76 | 80 |
Rprintf("data mean = %g, data variance = %g\n", mean, variance); |
77 | 81 |
|
78 | 82 |
// Create the emission densities and initialize |
... | ... |
@@ -80,37 +84,37 @@ void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, d |
80 | 84 |
{ |
81 | 85 |
if (distr_type[i_state] == 1) |
82 | 86 |
{ |
83 |
- FILE_LOG(logDEBUG1) << "Using delta distribution for state " << i_state; |
|
87 |
+ //FILE_LOG(logDEBUG1) << "Using delta distribution for state " << i_state; |
|
84 | 88 |
ZeroInflation *d = new ZeroInflation(O, *T); // delete is done inside ~ScaleHMM() |
85 | 89 |
hmm->densityFunctions.push_back(d); |
86 | 90 |
} |
87 | 91 |
else if (distr_type[i_state] == 2) |
88 | 92 |
{ |
89 |
- FILE_LOG(logDEBUG1) << "Using geometric distribution for state " << i_state; |
|
93 |
+ //FILE_LOG(logDEBUG1) << "Using geometric distribution for state " << i_state; |
|
90 | 94 |
Geometric *d = new Geometric(O, *T, initial_prob[i_state]); // delete is done inside ~ScaleHMM() |
91 | 95 |
hmm->densityFunctions.push_back(d); |
92 | 96 |
} |
93 | 97 |
else if (distr_type[i_state] == 3) |
94 | 98 |
{ |
95 |
- FILE_LOG(logDEBUG1) << "Using negative binomial for state " << i_state; |
|
99 |
+ //FILE_LOG(logDEBUG1) << "Using negative binomial for state " << i_state; |
|
96 | 100 |
NegativeBinomial *d = new NegativeBinomial(O, *T, initial_size[i_state], initial_prob[i_state]); // delete is done inside ~ScaleHMM() |
97 | 101 |
hmm->densityFunctions.push_back(d); |
98 | 102 |
} |
99 | 103 |
else if (distr_type[i_state] == 4) |
100 | 104 |
{ |
101 |
- FILE_LOG(logDEBUG1) << "Using poisson for state " << i_state; |
|
105 |
+ //FILE_LOG(logDEBUG1) << "Using poisson for state " << i_state; |
|
102 | 106 |
Poisson *d = new Poisson(O, *T, initial_lambda[i_state]); // delete is done inside ~ScaleHMM() |
103 | 107 |
hmm->densityFunctions.push_back(d); |
104 | 108 |
} |
105 | 109 |
else if (distr_type[i_state] == 5) |
106 | 110 |
{ |
107 |
- FILE_LOG(logDEBUG1) << "Using binomial for state " << i_state; |
|
111 |
+ //FILE_LOG(logDEBUG1) << "Using binomial for state " << i_state; |
|
108 | 112 |
NegativeBinomial *d = new NegativeBinomial(O, *T, initial_size[i_state], initial_prob[i_state]); // delete is done inside ~ScaleHMM() |
109 | 113 |
hmm->densityFunctions.push_back(d); |
110 | 114 |
} |
111 | 115 |
else |
112 | 116 |
{ |
113 |
- FILE_LOG(logWARNING) << "Density not specified, using default negative binomial for state " << i_state; |
|
117 |
+ //FILE_LOG(logWARNING) << "Density not specified, using default negative binomial for state " << i_state; |
|
114 | 118 |
NegativeBinomial *d = new NegativeBinomial(O, *T, initial_size[i_state], initial_prob[i_state]); |
115 | 119 |
hmm->densityFunctions.push_back(d); |
116 | 120 |
} |
... | ... |
@@ -120,23 +124,23 @@ void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, d |
120 | 124 |
R_FlushConsole(); |
121 | 125 |
|
122 | 126 |
// Do the Baum-Welch to estimate the parameters |
123 |
- FILE_LOG(logDEBUG1) << "Starting Baum-Welch estimation"; |
|
127 |
+ //FILE_LOG(logDEBUG1) << "Starting Baum-Welch estimation"; |
|
124 | 128 |
try |
125 | 129 |
{ |
126 | 130 |
hmm->baumWelch(maxiter, maxtime, eps); |
127 | 131 |
} |
128 | 132 |
catch (std::exception& e) |
129 | 133 |
{ |
130 |
- FILE_LOG(logERROR) << "Error in Baum-Welch: " << e.what(); |
|
134 |
+ //FILE_LOG(logERROR) << "Error in Baum-Welch: " << e.what(); |
|
131 | 135 |
Rprintf("Error in Baum-Welch: %s\n", e.what()); |
132 |
- if (e.what()=="nan detected") { *error = 1; } |
|
136 |
+ if (strcmp(e.what(),"nan detected")==0) { *error = 1; } |
|
133 | 137 |
else { *error = 2; } |
134 | 138 |
} |
135 | 139 |
|
136 |
- FILE_LOG(logDEBUG1) << "Finished with Baum-Welch estimation"; |
|
140 |
+ //FILE_LOG(logDEBUG1) << "Finished with Baum-Welch estimation"; |
|
137 | 141 |
|
138 | 142 |
// // Compute the posteriors and save results directly to the R pointer |
139 |
-// FILE_LOG(logDEBUG1) << "Recode posteriors into column representation"; |
|
143 |
+// //FILE_LOG(logDEBUG1) << "Recode posteriors into column representation"; |
|
140 | 144 |
// #pragma omp parallel for |
141 | 145 |
// for (int iN=0; iN<*N; iN++) |
142 | 146 |
// { |
... | ... |
@@ -147,18 +151,20 @@ void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, d |
147 | 151 |
// } |
148 | 152 |
|
149 | 153 |
// Compute the states from posteriors |
150 |
- FILE_LOG(logDEBUG1) << "Computing states from posteriors"; |
|
151 |
- double posterior_per_t [*N]; |
|
154 |
+ //FILE_LOG(logDEBUG1) << "Computing states from posteriors"; |
|
155 |
+ int ind_max; |
|
156 |
+ std::vector<double> posterior_per_t(*N); |
|
152 | 157 |
for (int t=0; t<*T; t++) |
153 | 158 |
{ |
154 | 159 |
for (int iN=0; iN<*N; iN++) |
155 | 160 |
{ |
156 | 161 |
posterior_per_t[iN] = hmm->get_posterior(iN, t); |
157 | 162 |
} |
158 |
- states[t] = state_labels[argMax(posterior_per_t, *N)]; |
|
163 |
+ ind_max = std::distance(posterior_per_t.begin(), std::max_element(posterior_per_t.begin(), posterior_per_t.end())); |
|
164 |
+ states[t] = state_labels[ind_max]; |
|
159 | 165 |
} |
160 | 166 |
|
161 |
- FILE_LOG(logDEBUG1) << "Return parameters"; |
|
167 |
+ //FILE_LOG(logDEBUG1) << "Return parameters"; |
|
162 | 168 |
// also return the estimated transition matrix and the initial probs |
163 | 169 |
for (int i=0; i<*N; i++) |
164 | 170 |
{ |
... | ... |
@@ -205,8 +211,9 @@ void R_univariate_hmm(int* O, int* T, int* N, int* state_labels, double* size, d |
205 | 211 |
*loglik = hmm->get_logP(); |
206 | 212 |
hmm->calc_weights(weights); |
207 | 213 |
|
208 |
- FILE_LOG(logDEBUG1) << "Deleting the hmm"; |
|
214 |
+ //FILE_LOG(logDEBUG1) << "Deleting the hmm"; |
|
209 | 215 |
delete hmm; |
216 |
+ hmm = NULL; // assign NULL to defuse the additional delete in on.exit() call |
|
210 | 217 |
} |
211 | 218 |
} // extern C |
212 | 219 |
|
... | ... |
@@ -223,35 +230,35 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
223 | 230 |
// Output2FILE::Stream() = pFile; |
224 | 231 |
// FILELog::ReportingLevel() = FILELog::FromString("ITERATION"); |
225 | 232 |
// FILELog::ReportingLevel() = FILELog::FromString("DEBUG2"); |
226 |
- FILELog::ReportingLevel() = FILELog::FromString("ERROR"); |
|
233 |
+// FILELog::ReportingLevel() = FILELog::FromString("ERROR"); |
|
227 | 234 |
|
228 | 235 |
// Parallelization settings |
229 |
-// omp_set_num_threads(*num_threads); |
|
236 |
+ omp_set_num_threads(*num_threads); |
|
230 | 237 |
|
231 | 238 |
// Print some information |
232 |
- FILE_LOG(logINFO) << "number of states = " << *N; |
|
239 |
+ //FILE_LOG(logINFO) << "number of states = " << *N; |
|
233 | 240 |
Rprintf("number of states = %d\n", *N); |
234 |
- FILE_LOG(logINFO) << "number of bins = " << *T; |
|
241 |
+ //FILE_LOG(logINFO) << "number of bins = " << *T; |
|
235 | 242 |
Rprintf("number of bins = %d\n", *T); |
236 | 243 |
if (*maxiter < 0) |
237 | 244 |
{ |
238 |
- FILE_LOG(logINFO) << "maximum number of iterations = none"; |
|
245 |
+ //FILE_LOG(logINFO) << "maximum number of iterations = none"; |
|
239 | 246 |
Rprintf("maximum number of iterations = none\n"); |
240 | 247 |
} else { |
241 |
- FILE_LOG(logINFO) << "maximum number of iterations = " << *maxiter; |
|
248 |
+ //FILE_LOG(logINFO) << "maximum number of iterations = " << *maxiter; |
|
242 | 249 |
Rprintf("maximum number of iterations = %d\n", *maxiter); |
243 | 250 |
} |
244 | 251 |
if (*maxtime < 0) |
245 | 252 |
{ |
246 |
- FILE_LOG(logINFO) << "maximum running time = none"; |
|
253 |
+ //FILE_LOG(logINFO) << "maximum running time = none"; |
|
247 | 254 |
Rprintf("maximum running time = none\n"); |
248 | 255 |
} else { |
249 |
- FILE_LOG(logINFO) << "maximum running time = " << *maxtime << " sec"; |
|
256 |
+ //FILE_LOG(logINFO) << "maximum running time = " << *maxtime << " sec"; |
|
250 | 257 |
Rprintf("maximum running time = %d sec\n", *maxtime); |
251 | 258 |
} |
252 |
- FILE_LOG(logINFO) << "epsilon = " << *eps; |
|
259 |
+ //FILE_LOG(logINFO) << "epsilon = " << *eps; |
|
253 | 260 |
Rprintf("epsilon = %g\n", *eps); |
254 |
- FILE_LOG(logINFO) << "number of modifications = " << *Nmod; |
|
261 |
+ //FILE_LOG(logINFO) << "number of modifications = " << *Nmod; |
|
255 | 262 |
Rprintf("number of modifications = %d\n", *Nmod); |
256 | 263 |
|
257 | 264 |
// Flush Rprintf statements to console |
... | ... |
@@ -259,7 +266,7 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
259 | 266 |
|
260 | 267 |
// Recode the densities vector to matrix representation |
261 | 268 |
// clock_t clocktime = clock(), dtime; |
262 |
- double** multiD = allocDoubleMatrix(*N, *T); |
|
269 |
+ multiD = CallocDoubleMatrix(*N, *T); |
|
263 | 270 |
for (int iN=0; iN<*N; iN++) |
264 | 271 |
{ |
265 | 272 |
for (int t=0; t<*T; t++) |
... | ... |
@@ -268,11 +275,11 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
268 | 275 |
} |
269 | 276 |
} |
270 | 277 |
// dtime = clock() - clocktime; |
271 |
-// FILE_LOG(logDEBUG1) << "recoding densities vector to matrix representation: " << dtime << " clicks"; |
|
278 |
+// //FILE_LOG(logDEBUG1) << "recoding densities vector to matrix representation: " << dtime << " clicks"; |
|
272 | 279 |
|
273 | 280 |
// Create the HMM |
274 |
- FILE_LOG(logDEBUG1) << "Creating the multivariate HMM"; |
|
275 |
- ScaleHMM* hmm = new ScaleHMM(*T, *N, *Nmod, multiD); |
|
281 |
+ //FILE_LOG(logDEBUG1) << "Creating the multivariate HMM"; |
|
282 |
+ hmm = new ScaleHMM(*T, *N, *Nmod, multiD); |
|
276 | 283 |
// Initialize the transition probabilities and proba |
277 | 284 |
hmm->initialize_transition_probs(initial_A, *use_initial_params); |
278 | 285 |
hmm->initialize_proba(initial_proba, *use_initial_params); |
... | ... |
@@ -280,30 +287,30 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
280 | 287 |
// Print logproba and A |
281 | 288 |
// for (int iN=0; iN<*N; iN++) |
282 | 289 |
// { |
283 |
-// FILE_LOG(logDEBUG) << "proba["<<iN<<"] = " <<exp(hmm->logproba[iN]); |
|
290 |
+// //FILE_LOG(logDEBUG) << "proba["<<iN<<"] = " <<exp(hmm->logproba[iN]); |
|
284 | 291 |
// for (int jN=0; jN<*N; jN++) |
285 | 292 |
// { |
286 |
-// FILE_LOG(logDEBUG) << "A["<<iN<<"]["<<jN<<"] = " << hmm->A[iN][jN]; |
|
293 |
+// //FILE_LOG(logDEBUG) << "A["<<iN<<"]["<<jN<<"] = " << hmm->A[iN][jN]; |
|
287 | 294 |
// } |
288 | 295 |
// } |
289 | 296 |
|
290 | 297 |
// Estimate the parameters |
291 |
- FILE_LOG(logDEBUG1) << "Starting Baum-Welch estimation"; |
|
298 |
+ //FILE_LOG(logDEBUG1) << "Starting Baum-Welch estimation"; |
|
292 | 299 |
try |
293 | 300 |
{ |
294 | 301 |
hmm->baumWelch(maxiter, maxtime, eps); |
295 | 302 |
} |
296 | 303 |
catch (std::exception& e) |
297 | 304 |
{ |
298 |
- FILE_LOG(logERROR) << "Error in Baum-Welch: " << e.what(); |
|
305 |
+ //FILE_LOG(logERROR) << "Error in Baum-Welch: " << e.what(); |
|
299 | 306 |
Rprintf("Error in Baum-Welch: %s\n", e.what()); |
300 |
- if (e.what()=="nan detected") { *error = 1; } |
|
307 |
+ if (strcmp(e.what(),"nan detected")==0) { *error = 1; } |
|
301 | 308 |
else { *error = 2; } |
302 | 309 |
} |
303 |
- FILE_LOG(logDEBUG1) << "Finished with Baum-Welch estimation"; |
|
310 |
+ //FILE_LOG(logDEBUG1) << "Finished with Baum-Welch estimation"; |
|
304 | 311 |
|
305 | 312 |
// // Compute the posteriors and save results directly to the R pointer |
306 |
-// FILE_LOG(logDEBUG1) << "Recode posteriors into column representation"; |
|
313 |
+// //FILE_LOG(logDEBUG1) << "Recode posteriors into column representation"; |
|
307 | 314 |
// for (int iN=0; iN<*N; iN++) |
308 | 315 |
// { |
309 | 316 |
// for (int t=0; t<*T; t++) |
... | ... |
@@ -313,18 +320,20 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
313 | 320 |
// } |
314 | 321 |
|
315 | 322 |
// Compute the states from posteriors |
316 |
- FILE_LOG(logDEBUG1) << "Computing states from posteriors"; |
|
317 |
- double posterior_per_t [*N]; |
|
323 |
+ //FILE_LOG(logDEBUG1) << "Computing states from posteriors"; |
|
324 |
+ int ind_max; |
|
325 |
+ std::vector<double> posterior_per_t(*N); |
|
318 | 326 |
for (int t=0; t<*T; t++) |
319 | 327 |
{ |
320 | 328 |
for (int iN=0; iN<*N; iN++) |
321 | 329 |
{ |
322 | 330 |
posterior_per_t[iN] = hmm->get_posterior(iN, t); |
323 | 331 |
} |
324 |
- states[t] = comb_states[argMax(posterior_per_t, *N)]; |
|
332 |
+ ind_max = std::distance(posterior_per_t.begin(), std::max_element(posterior_per_t.begin(), posterior_per_t.end())); |
|
333 |
+ states[t] = comb_states[ind_max]; |
|
325 | 334 |
} |
326 | 335 |
|
327 |
- FILE_LOG(logDEBUG1) << "Return parameters"; |
|
336 |
+ //FILE_LOG(logDEBUG1) << "Return parameters"; |
|
328 | 337 |
// also return the estimated transition matrix and the initial probs |
329 | 338 |
for (int i=0; i<*N; i++) |
330 | 339 |
{ |
... | ... |
@@ -336,9 +345,30 @@ void R_multivariate_hmm(double* D, int* T, int* N, int *Nmod, int* comb_states, |
336 | 345 |
} |
337 | 346 |
*loglik = hmm->get_logP(); |
338 | 347 |
|
339 |
- FILE_LOG(logDEBUG1) << "Deleting the hmm"; |
|
348 |
+ //FILE_LOG(logDEBUG1) << "Deleting the hmm"; |
|
340 | 349 |
delete hmm; |
341 |
- freeDoubleMatrix(multiD, *N); |
|
350 |
+ hmm = NULL; // assign NULL to defuse the additional delete in on.exit() call |
|
351 |
+// FreeDoubleMatrix(multiD, *N); |
|
342 | 352 |
} |
343 | 353 |
} // extern C |
344 | 354 |
|
355 |
+ |
|
356 |
+// ======================================================= |
|
357 |
+// This function make a cleanup if anything was left over |
|
358 |
+// ======================================================= |
|
359 |
+extern "C" { |
|
360 |
+void R_univariate_cleanup() |
|
361 |
+{ |
|
362 |
+// //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; // This message will be shown if interrupt happens before start of C-code |
|
363 |
+ delete hmm; |
|
364 |
+} |
|
365 |
+} |
|
366 |
+ |
|
367 |
+extern "C" { |
|
368 |
+void R_multivariate_cleanup(int* N) |
|
369 |
+{ |
|
370 |
+ delete hmm; |
|
371 |
+ FreeDoubleMatrix(multiD, *N); |
|
372 |
+} |
|
373 |
+} |
|
374 |
+ |
... | ... |
@@ -7,7 +7,7 @@ |
7 | 7 |
// Constructor and Destructor --------------------------------- |
8 | 8 |
Normal::Normal(int* observations, int T, double mean, double variance) |
9 | 9 |
{ |
10 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
10 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
11 | 11 |
this->obs = observations; |
12 | 12 |
this->T = T; |
13 | 13 |
this->mean = mean; |
... | ... |
@@ -17,13 +17,13 @@ Normal::Normal(int* observations, int T, double mean, double variance) |
17 | 17 |
|
18 | 18 |
Normal::~Normal() |
19 | 19 |
{ |
20 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
20 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
21 | 21 |
} |
22 | 22 |
|
23 | 23 |
// Methods ---------------------------------------------------- |
24 | 24 |
void Normal::calc_densities(double* density) |
25 | 25 |
{ |
26 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
26 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
27 | 27 |
for (int t=0; t<this->T; t++) |
28 | 28 |
{ |
29 | 29 |
density[t] = dnorm(this->obs[t], this->mean, this->sd, 0); |
... | ... |
@@ -32,7 +32,7 @@ void Normal::calc_densities(double* density) |
32 | 32 |
|
33 | 33 |
void Normal::calc_logdensities(double* logdensity) |
34 | 34 |
{ |
35 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
35 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
36 | 36 |
for (int t=0; t<this->T; t++) |
37 | 37 |
{ |
38 | 38 |
logdensity[t] = dnorm(this->obs[t], this->mean, this->sd, 1); |
... | ... |
@@ -41,51 +41,52 @@ void Normal::calc_logdensities(double* logdensity) |
41 | 41 |
|
42 | 42 |
void Normal::update(double* weights) |
43 | 43 |
{ |
44 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
44 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
45 |
+ (void)weights; |
|
45 | 46 |
// TODO |
46 | 47 |
} |
47 | 48 |
|
48 | 49 |
// Getter and Setter ------------------------------------------ |
49 | 50 |
DensityName Normal::get_name() |
50 | 51 |
{ |
51 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
52 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
52 | 53 |
return(NORMAL); |
53 | 54 |
} |
54 | 55 |
|
55 | 56 |
double Normal::get_mean() |
56 | 57 |
{ |
57 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
58 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
58 | 59 |
return(this->mean); |
59 | 60 |
} |
60 | 61 |
|
61 | 62 |
void Normal::set_mean(double mean) |
62 | 63 |
{ |
63 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
64 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
64 | 65 |
this->mean = mean; |
65 | 66 |
} |
66 | 67 |
|
67 | 68 |
double Normal::get_variance() |
68 | 69 |
{ |
69 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
70 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
70 | 71 |
return(this->variance); |
71 | 72 |
} |
72 | 73 |
|
73 | 74 |
void Normal::set_variance(double variance) |
74 | 75 |
{ |
75 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
76 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
76 | 77 |
this->variance = variance; |
77 | 78 |
this->sd = sqrt(variance); |
78 | 79 |
} |
79 | 80 |
|
80 | 81 |
double Normal::get_stdev() |
81 | 82 |
{ |
82 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
83 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
83 | 84 |
return(this->sd); |
84 | 85 |
} |
85 | 86 |
|
86 | 87 |
void Normal::set_stdev(double stdev) |
87 | 88 |
{ |
88 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
89 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
89 | 90 |
this->sd = stdev; |
90 | 91 |
this->variance = stdev*stdev; |
91 | 92 |
} |
... | ... |
@@ -98,7 +99,7 @@ void Normal::set_stdev(double stdev) |
98 | 99 |
// Constructor and Destructor --------------------------------- |
99 | 100 |
Poisson::Poisson(int* observations, int T, double lambda) |
100 | 101 |
{ |
101 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
102 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
102 | 103 |
this->obs = observations; |
103 | 104 |
this->T = T; |
104 | 105 |
this->lambda = lambda; |
... | ... |
@@ -107,8 +108,8 @@ Poisson::Poisson(int* observations, int T, double lambda) |
107 | 108 |
if (this->obs != NULL) |
108 | 109 |
{ |
109 | 110 |
this->max_obs = intMax(observations, T); |
110 |
- this->lxfactorials = (double*) calloc(max_obs+1, sizeof(double)); |
|
111 |
- this->lxfactorials[0] = 0.0; // Not necessary, already 0 because of calloc |
|
111 |
+ this->lxfactorials = (double*) Calloc(max_obs+1, double); |
|
112 |
+ this->lxfactorials[0] = 0.0; // Not necessary, already 0 because of Calloc |
|
112 | 113 |
this->lxfactorials[1] = 0.0; |
113 | 114 |
for (int j=2; j<=max_obs; j++) |
114 | 115 |
{ |
... | ... |
@@ -119,25 +120,25 @@ Poisson::Poisson(int* observations, int T, double lambda) |
119 | 120 |
|
120 | 121 |
Poisson::~Poisson() |
121 | 122 |
{ |
122 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
123 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
123 | 124 |
if (this->lxfactorials != NULL) |
124 | 125 |
{ |
125 |
- free(this->lxfactorials); |
|
126 |
+ Free(this->lxfactorials); |
|
126 | 127 |
} |
127 | 128 |
} |
128 | 129 |
|
129 | 130 |
// Methods ---------------------------------------------------- |
130 | 131 |
void Poisson::calc_logdensities(double* logdens) |
131 | 132 |
{ |
132 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
133 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
133 | 134 |
double logl = log(this->lambda); |
134 | 135 |
double l = this->lambda; |
135 | 136 |
double lxfactorial; |
136 | 137 |
// Select strategy for computing densities |
137 | 138 |
if (this->max_obs <= this->T) |
138 | 139 |
{ |
139 |
- FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
140 |
- double logdens_per_read [this->max_obs+1]; |
|
140 |
+ //FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
141 |
+ std::vector<double> logdens_per_read(this->max_obs+1); |
|
141 | 142 |
for (int j=0; j<=this->max_obs; j++) |
142 | 143 |
{ |
143 | 144 |
logdens_per_read[j] = j*logl - l - this->lxfactorials[j]; |
... | ... |
@@ -145,27 +146,27 @@ void Poisson::calc_logdensities(double* logdens) |
145 | 146 |
for (int t=0; t<this->T; t++) |
146 | 147 |
{ |
147 | 148 |
logdens[t] = logdens_per_read[(int) this->obs[t]]; |
148 |
- FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
149 |
+ //FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
149 | 150 |
if (isnan(logdens[t])) |
150 | 151 |
{ |
151 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
152 |
- FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
152 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
153 |
+ //FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
153 | 154 |
throw nan_detected; |
154 | 155 |
} |
155 | 156 |
} |
156 | 157 |
} |
157 | 158 |
else |
158 | 159 |
{ |
159 |
- FILE_LOG(logDEBUG2) << "Computing densities in " << __func__ << " for every t, because max(O)>T"; |
|
160 |
+ //FILE_LOG(logDEBUG2) << "Computing densities in " << __func__ << " for every t, because max(O)>T"; |
|
160 | 161 |
for (int t=0; t<this->T; t++) |
161 | 162 |
{ |
162 | 163 |
lxfactorial = this->lxfactorials[(int) this->obs[t]]; |
163 | 164 |
logdens[t] = this->obs[t]*logl - l - lxfactorial; |
164 |
- FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
165 |
+ //FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
165 | 166 |
if (isnan(logdens[t])) |
166 | 167 |
{ |
167 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
168 |
- FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
168 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
169 |
+ //FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
169 | 170 |
throw nan_detected; |
170 | 171 |
} |
171 | 172 |
} |
... | ... |
@@ -174,15 +175,15 @@ void Poisson::calc_logdensities(double* logdens) |
174 | 175 |
|
175 | 176 |
void Poisson::calc_densities(double* dens) |
176 | 177 |
{ |
177 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
178 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
178 | 179 |
double logl = log(this->lambda); |
179 | 180 |
double l = this->lambda; |
180 | 181 |
double lxfactorial; |
181 | 182 |
// Select strategy for computing densities |
182 | 183 |
if (this->max_obs <= this->T) |
183 | 184 |
{ |
184 |
- FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
185 |
- double dens_per_read [this->max_obs+1]; |
|
185 |
+ //FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
186 |
+ std::vector<double> dens_per_read(this->max_obs+1); |
|
186 | 187 |
for (int j=0; j<=this->max_obs; j++) |
187 | 188 |
{ |
188 | 189 |
dens_per_read[j] = exp( j*logl - l - this->lxfactorials[j] ); |
... | ... |
@@ -190,27 +191,27 @@ void Poisson::calc_densities(double* dens) |
190 | 191 |
for (int t=0; t<this->T; t++) |
191 | 192 |
{ |
192 | 193 |
dens[t] = dens_per_read[(int) this->obs[t]]; |
193 |
- FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
194 |
+ //FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
194 | 195 |
if (isnan(dens[t])) |
195 | 196 |
{ |
196 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
197 |
- FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
197 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
198 |
+ //FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
198 | 199 |
throw nan_detected; |
199 | 200 |
} |
200 | 201 |
} |
201 | 202 |
} |
202 | 203 |
else |
203 | 204 |
{ |
204 |
- FILE_LOG(logDEBUG2) << "Computing densities in " << __func__ << " for every t, because max(O)>T"; |
|
205 |
+ //FILE_LOG(logDEBUG2) << "Computing densities in " << __func__ << " for every t, because max(O)>T"; |
|
205 | 206 |
for (int t=0; t<this->T; t++) |
206 | 207 |
{ |
207 | 208 |
lxfactorial = this->lxfactorials[(int) this->obs[t]]; |
208 | 209 |
dens[t] = exp( this->obs[t]*logl - l - lxfactorial ); |
209 |
- FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
210 |
+ //FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
210 | 211 |
if (isnan(dens[t])) |
211 | 212 |
{ |
212 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
213 |
- FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
213 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
214 |
+ //FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
214 | 215 |
throw nan_detected; |
215 | 216 |
} |
216 | 217 |
} |
... | ... |
@@ -219,7 +220,7 @@ void Poisson::calc_densities(double* dens) |
219 | 220 |
|
220 | 221 |
void Poisson::update(double* weights) |
221 | 222 |
{ |
222 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
223 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
223 | 224 |
double numerator, denominator; |
224 | 225 |
// Update lambda |
225 | 226 |
numerator=denominator=0.0; |
... | ... |
@@ -232,15 +233,15 @@ void Poisson::update(double* weights) |
232 | 233 |
} |
233 | 234 |
this->lambda = numerator/denominator; // Update of size is now done with updated lambda |
234 | 235 |
// dtime = clock() - time; |
235 |
-// FILE_LOG(logDEBUG1) << "updateL(): "<<dtime<< " clicks"; |
|
236 |
- FILE_LOG(logDEBUG1) << "l = " << this->lambda; |
|
236 |
+// //FILE_LOG(logDEBUG1) << "updateL(): "<<dtime<< " clicks"; |
|
237 |
+ //FILE_LOG(logDEBUG1) << "l = " << this->lambda; |
|
237 | 238 |
|
238 | 239 |
} |
239 | 240 |
|
240 | 241 |
void Poisson::update_constrained(double** weights, int fromState, int toState) |
241 | 242 |
{ |
242 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
243 |
- FILE_LOG(logDEBUG1) << "l = "<<this->lambda; |
|
243 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
244 |
+ //FILE_LOG(logDEBUG1) << "l = "<<this->lambda; |
|
244 | 245 |
double numerator, denominator; |
245 | 246 |
// Update lambda |
246 | 247 |
numerator=denominator=0.0; |
... | ... |
@@ -256,45 +257,45 @@ void Poisson::update_constrained(double** weights, int fromState, int toState) |
256 | 257 |
} |
257 | 258 |
this->lambda = numerator/denominator; // Update of size is now done with old lambda |
258 | 259 |
// dtime = clock() - time; |
259 |
-// FILE_LOG(logDEBUG1) << "updateL(): "<<dtime<< " clicks"; |
|
260 |
- FILE_LOG(logDEBUG1) << "l = "<<this->lambda; |
|
260 |
+// //FILE_LOG(logDEBUG1) << "updateL(): "<<dtime<< " clicks"; |
|
261 |
+ //FILE_LOG(logDEBUG1) << "l = "<<this->lambda; |
|
261 | 262 |
|
262 | 263 |
} |
263 | 264 |
|
264 | 265 |
// Getter and Setter ------------------------------------------ |
265 | 266 |
double Poisson::get_mean() |
266 | 267 |
{ |
267 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
268 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
268 | 269 |
return( this->lambda ); |
269 | 270 |
} |
270 | 271 |
|
271 | 272 |
void Poisson::set_mean(double mean) |
272 | 273 |
{ |
273 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
274 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
274 | 275 |
this->lambda = mean; |
275 | 276 |
} |
276 | 277 |
|
277 | 278 |
double Poisson::get_variance() |
278 | 279 |
{ |
279 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
280 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
280 | 281 |
return( this->lambda ); |
281 | 282 |
} |
282 | 283 |
|
283 | 284 |
void Poisson::set_variance(double variance) |
284 | 285 |
{ |
285 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
286 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
286 | 287 |
this->lambda = variance; |
287 | 288 |
} |
288 | 289 |
|
289 | 290 |
DensityName Poisson::get_name() |
290 | 291 |
{ |
291 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
292 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
292 | 293 |
return(POISSON); |
293 | 294 |
} |
294 | 295 |
|
295 | 296 |
double Poisson::get_lambda() |
296 | 297 |
{ |
297 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
298 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
298 | 299 |
return(this->lambda); |
299 | 300 |
} |
300 | 301 |
|
... | ... |
@@ -306,7 +307,7 @@ double Poisson::get_lambda() |
306 | 307 |
// Constructor and Destructor --------------------------------- |
307 | 308 |
NegativeBinomial::NegativeBinomial(int* observations, int T, double size, double prob) |
308 | 309 |
{ |
309 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
310 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
310 | 311 |
this->obs = observations; |
311 | 312 |
this->T = T; |
312 | 313 |
this->size = size; |
... | ... |
@@ -316,8 +317,8 @@ NegativeBinomial::NegativeBinomial(int* observations, int T, double size, double |
316 | 317 |
if (this->obs != NULL) |
317 | 318 |
{ |
318 | 319 |
this->max_obs = intMax(observations, T); |
319 |
- this->lxfactorials = (double*) calloc(max_obs+1, sizeof(double)); |
|
320 |
- this->lxfactorials[0] = 0.0; // Not necessary, already 0 because of calloc |
|
320 |
+ this->lxfactorials = (double*) Calloc(max_obs+1, double); |
|
321 |
+ this->lxfactorials[0] = 0.0; // Not necessary, already 0 because of Calloc |
|
321 | 322 |
this->lxfactorials[1] = 0.0; |
322 | 323 |
for (int j=2; j<=max_obs; j++) |
323 | 324 |
{ |
... | ... |
@@ -328,17 +329,17 @@ NegativeBinomial::NegativeBinomial(int* observations, int T, double size, double |
328 | 329 |
|
329 | 330 |
NegativeBinomial::~NegativeBinomial() |
330 | 331 |
{ |
331 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
332 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
332 | 333 |
if (this->lxfactorials != NULL) |
333 | 334 |
{ |
334 |
- free(this->lxfactorials); |
|
335 |
+ Free(this->lxfactorials); |
|
335 | 336 |
} |
336 | 337 |
} |
337 | 338 |
|
338 | 339 |
// Methods ---------------------------------------------------- |
339 | 340 |
void NegativeBinomial::calc_logdensities(double* logdens) |
340 | 341 |
{ |
341 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
342 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
342 | 343 |
double logp = log(this->prob); |
343 | 344 |
double log1minusp = log(1-this->prob); |
344 | 345 |
double lGammaR,lGammaRplusX,lxfactorial; |
... | ... |
@@ -346,8 +347,8 @@ void NegativeBinomial::calc_logdensities(double* logdens) |
346 | 347 |
// Select strategy for computing gammas |
347 | 348 |
if (this->max_obs <= this->T) |
348 | 349 |
{ |
349 |
- FILE_LOG(logDEBUG2) << "Precomputing gammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
350 |
- double logdens_per_read [this->max_obs+1]; |
|
350 |
+ //FILE_LOG(logDEBUG2) << "Precomputing gammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
351 |
+ std::vector<double> logdens_per_read(this->max_obs+1); |
|
351 | 352 |
for (int j=0; j<=this->max_obs; j++) |
352 | 353 |
{ |
353 | 354 |
logdens_per_read[j] = lgamma(this->size + j) - lGammaR - lxfactorials[j] + this->size * logp + j * log1minusp; |
... | ... |
@@ -355,28 +356,28 @@ void NegativeBinomial::calc_logdensities(double* logdens) |
355 | 356 |
for (int t=0; t<this->T; t++) |
356 | 357 |
{ |
357 | 358 |
logdens[t] = logdens_per_read[(int) this->obs[t]]; |
358 |
- FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
359 |
+ //FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
359 | 360 |
if (isnan(logdens[t])) |
360 | 361 |
{ |
361 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
362 |
- FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
362 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
363 |
+ //FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
363 | 364 |
throw nan_detected; |
364 | 365 |
} |
365 | 366 |
} |
366 | 367 |
} |
367 | 368 |
else |
368 | 369 |
{ |
369 |
- FILE_LOG(logDEBUG2) << "Computing gammas in " << __func__ << " for every t, because max(O)>T"; |
|
370 |
+ //FILE_LOG(logDEBUG2) << "Computing gammas in " << __func__ << " for every t, because max(O)>T"; |
|
370 | 371 |
for (int t=0; t<this->T; t++) |
371 | 372 |
{ |
372 | 373 |
lGammaRplusX = lgamma(this->size + this->obs[t]); |
373 | 374 |
lxfactorial = this->lxfactorials[(int) this->obs[t]]; |
374 | 375 |
logdens[t] = lGammaRplusX - lGammaR - lxfactorial + this->size * logp + this->obs[t] * log1minusp; |
375 |
- FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
376 |
+ //FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
376 | 377 |
if (isnan(logdens[t])) |
377 | 378 |
{ |
378 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
379 |
- FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
379 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
380 |
+ //FILE_LOG(logERROR) << "logdens["<<t<<"] = "<< logdens[t]; |
|
380 | 381 |
throw nan_detected; |
381 | 382 |
} |
382 | 383 |
} |
... | ... |
@@ -385,7 +386,7 @@ void NegativeBinomial::calc_logdensities(double* logdens) |
385 | 386 |
|
386 | 387 |
void NegativeBinomial::calc_densities(double* dens) |
387 | 388 |
{ |
388 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
389 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
389 | 390 |
double logp = log(this->prob); |
390 | 391 |
double log1minusp = log(1-this->prob); |
391 | 392 |
double lGammaR,lGammaRplusX,lxfactorial; |
... | ... |
@@ -393,8 +394,8 @@ void NegativeBinomial::calc_densities(double* dens) |
393 | 394 |
// Select strategy for computing gammas |
394 | 395 |
if (this->max_obs <= this->T) |
395 | 396 |
{ |
396 |
- FILE_LOG(logDEBUG2) << "Precomputing gammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
397 |
- double dens_per_read [this->max_obs+1]; |
|
397 |
+ //FILE_LOG(logDEBUG2) << "Precomputing gammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
398 |
+ std::vector<double> dens_per_read(this->max_obs+1); |
|
398 | 399 |
for (int j=0; j<=this->max_obs; j++) |
399 | 400 |
{ |
400 | 401 |
dens_per_read[j] = exp( lgamma(this->size + j) - lGammaR - lxfactorials[j] + this->size * logp + j * log1minusp ); |
... | ... |
@@ -402,28 +403,28 @@ void NegativeBinomial::calc_densities(double* dens) |
402 | 403 |
for (int t=0; t<this->T; t++) |
403 | 404 |
{ |
404 | 405 |
dens[t] = dens_per_read[(int) this->obs[t]]; |
405 |
- FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
406 |
+ //FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
406 | 407 |
if (isnan(dens[t])) |
407 | 408 |
{ |
408 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
409 |
- FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
409 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
410 |
+ //FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
410 | 411 |
throw nan_detected; |
411 | 412 |
} |
412 | 413 |
} |
413 | 414 |
} |
414 | 415 |
else |
415 | 416 |
{ |
416 |
- FILE_LOG(logDEBUG2) << "Computing gammas in " << __func__ << " for every t, because max(O)>T"; |
|
417 |
+ //FILE_LOG(logDEBUG2) << "Computing gammas in " << __func__ << " for every t, because max(O)>T"; |
|
417 | 418 |
for (int t=0; t<this->T; t++) |
418 | 419 |
{ |
419 | 420 |
lGammaRplusX = lgamma(this->size + this->obs[t]); |
420 | 421 |
lxfactorial = this->lxfactorials[(int) this->obs[t]]; |
421 | 422 |
dens[t] = exp( lGammaRplusX - lGammaR - lxfactorial + this->size * logp + this->obs[t] * log1minusp ); |
422 |
- FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
423 |
+ //FILE_LOG(logDEBUG4) << "dens["<<t<<"] = " << dens[t]; |
|
423 | 424 |
if (isnan(dens[t])) |
424 | 425 |
{ |
425 |
- FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
426 |
- FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
426 |
+ //FILE_LOG(logERROR) << __PRETTY_FUNCTION__; |
|
427 |
+ //FILE_LOG(logERROR) << "dens["<<t<<"] = "<< dens[t]; |
|
427 | 428 |
throw nan_detected; |
428 | 429 |
} |
429 | 430 |
} |
... | ... |
@@ -432,7 +433,7 @@ void NegativeBinomial::calc_densities(double* dens) |
432 | 433 |
|
433 | 434 |
void NegativeBinomial::update(double* weights) |
434 | 435 |
{ |
435 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
436 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
436 | 437 |
double eps = 1e-4, kmax; |
437 | 438 |
double numerator, denominator, size0, dSize, F, dFdSize, DigammaSize, DigammaSizePlusDSize; |
438 | 439 |
// Update prob (p) |
... | ... |
@@ -447,7 +448,7 @@ void NegativeBinomial::update(double* weights) |
447 | 448 |
this->prob = numerator/denominator; // Update of size is now done with updated prob |
448 | 449 |
double logp = log(this->prob); |
449 | 450 |
// dtime = clock() - time; |
450 |
-// FILE_LOG(logDEBUG1) << "updateP(): "<<dtime<< " clicks"; |
|
451 |
+// //FILE_LOG(logDEBUG1) << "updateP(): "<<dtime<< " clicks"; |
|
451 | 452 |
// Update of size with Newton Method |
452 | 453 |
size0 = this->size; |
453 | 454 |
dSize = 0.00001; |
... | ... |
@@ -456,8 +457,9 @@ void NegativeBinomial::update(double* weights) |
456 | 457 |
// Select strategy for computing digammas |
457 | 458 |
if (this->max_obs <= this->T) |
458 | 459 |
{ |
459 |
- FILE_LOG(logDEBUG2) << "Precomputing digammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
460 |
- double DigammaSizePlusX[this->max_obs+1], DigammaSizePlusDSizePlusX[this->max_obs+1]; |
|
460 |
+ //FILE_LOG(logDEBUG2) << "Precomputing digammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
461 |
+ std::vector<double> DigammaSizePlusX(this->max_obs+1); |
|
462 |
+ std::vector<double> DigammaSizePlusDSizePlusX(this->max_obs+1); |
|
461 | 463 |
for (int k=1; k<kmax; k++) |
462 | 464 |
{ |
463 | 465 |
F=dFdSize=0.0; |
... | ... |
@@ -492,7 +494,7 @@ void NegativeBinomial::update(double* weights) |
492 | 494 |
} |
493 | 495 |
else |
494 | 496 |
{ |
495 |
- FILE_LOG(logDEBUG2) << "Computing digammas in " << __func__ << " for every t, because max(O)>T"; |
|
497 |
+ //FILE_LOG(logDEBUG2) << "Computing digammas in " << __func__ << " for every t, because max(O)>T"; |
|
496 | 498 |
double DigammaSizePlusX, DigammaSizePlusDSizePlusX; |
497 | 499 |
for (int k=1; k<kmax; k++) |
498 | 500 |
{ |
... | ... |
@@ -523,20 +525,21 @@ void NegativeBinomial::update(double* weights) |
523 | 525 |
} |
524 | 526 |
} |
525 | 527 |
this->size = size0; |
526 |
- FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
528 |
+ //FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
527 | 529 |
|
528 | 530 |
// dtime = clock() - time; |
529 |
-// FILE_LOG(logDEBUG1) << "updateR(): "<<dtime<< " clicks"; |
|
531 |
+// //FILE_LOG(logDEBUG1) << "updateR(): "<<dtime<< " clicks"; |
|
530 | 532 |
|
531 | 533 |
} |
532 | 534 |
|
533 | 535 |
void NegativeBinomial::update_constrained(double** weights, int fromState, int toState) |
534 | 536 |
{ |
535 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
536 |
- FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
537 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
538 |
+ //FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
537 | 539 |
double eps = 1e-4, kmax; |
538 | 540 |
// double numerator, denominator, size0, dSize, F, dFdSize, DigammaSize, DigammaSizePlusDSize; |
539 |
- double numerator, denominator, size0, dSize, F, dFdSize, DigammaSize, TrigammaSize; |
|
541 |
+ double numerator, denominator, size0, F, dFdSize, DigammaSize, TrigammaSize; |
|
542 |
+// double dSize; |
|
540 | 543 |
double logp = log(this->prob); |
541 | 544 |
// Update prob (p) |
542 | 545 |
numerator=denominator=0.0; |
... | ... |
@@ -552,18 +555,19 @@ void NegativeBinomial::update_constrained(double** weights, int fromState, int t |
552 | 555 |
} |
553 | 556 |
this->prob = numerator/denominator; // Update of size is now done with old prob |
554 | 557 |
// dtime = clock() - time; |
555 |
-// FILE_LOG(logDEBUG1) << "updateP(): "<<dtime<< " clicks"; |
|
558 |
+// //FILE_LOG(logDEBUG1) << "updateP(): "<<dtime<< " clicks"; |
|
556 | 559 |
// Update of size with Newton Method |
557 | 560 |
size0 = this->size; |
558 |
- dSize = 0.00001; |
|
561 |
+// dSize = 0.00001; |
|
559 | 562 |
kmax = 20; |
560 | 563 |
// time = clock(); |
561 | 564 |
// Select strategy for computing digammas |
562 | 565 |
if (this->max_obs <= this->T) |
563 | 566 |
{ |
564 |
- FILE_LOG(logDEBUG2) << "Precomputing digammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
565 |
-// double DigammaSizePlusX[this->max_obs+1], DigammaSizePlusDSizePlusX[this->max_obs+1]; |
|
566 |
- double DigammaSizePlusX[this->max_obs+1], TrigammaSizePlusX[this->max_obs+1]; |
|
567 |
+ //FILE_LOG(logDEBUG2) << "Precomputing digammas in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
568 |
+// std::vector<double> DigammaSizePlusX[this->max_obs+1], DigammaSizePlusDSizePlusX(this->max_obs+1); |
|
569 |
+ std::vector<double> DigammaSizePlusX(this->max_obs+1); |
|
570 |
+ std::vector<double> TrigammaSizePlusX(this->max_obs+1); |
|
567 | 571 |
for (int k=1; k<kmax; k++) |
568 | 572 |
{ |
569 | 573 |
F=dFdSize=0.0; |
... | ... |
@@ -604,7 +608,7 @@ void NegativeBinomial::update_constrained(double** weights, int fromState, int t |
604 | 608 |
} |
605 | 609 |
else |
606 | 610 |
{ |
607 |
- FILE_LOG(logDEBUG2) << "Computing digammas in " << __func__ << " for every t, because max(O)>T"; |
|
611 |
+ //FILE_LOG(logDEBUG2) << "Computing digammas in " << __func__ << " for every t, because max(O)>T"; |
|
608 | 612 |
// double DigammaSizePlusX, DigammaSizePlusDSizePlusX; |
609 | 613 |
double DigammaSizePlusX, TrigammaSizePlusX; |
610 | 614 |
for (int k=1; k<kmax; k++) |
... | ... |
@@ -642,49 +646,49 @@ void NegativeBinomial::update_constrained(double** weights, int fromState, int t |
642 | 646 |
} |
643 | 647 |
} |
644 | 648 |
this->size = size0; |
645 |
- FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
649 |
+ //FILE_LOG(logDEBUG1) << "size = "<<this->size << ", prob = "<<this->prob; |
|
646 | 650 |
this->mean = this->fmean(this->size, this->prob); |
647 | 651 |
this->variance = this->fvariance(this->size, this->prob); |
648 | 652 |
|
649 | 653 |
// dtime = clock() - time; |
650 |
-// FILE_LOG(logDEBUG1) << "updateR(): "<<dtime<< " clicks"; |
|
654 |
+// //FILE_LOG(logDEBUG1) << "updateR(): "<<dtime<< " clicks"; |
|
651 | 655 |
|
652 | 656 |
} |
653 | 657 |
|
654 | 658 |
double NegativeBinomial::fsize(double mean, double variance) |
655 | 659 |
{ |
656 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
660 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
657 | 661 |
return( mean*mean / (variance - mean) ); |
658 | 662 |
} |
659 | 663 |
|
660 | 664 |
double NegativeBinomial::fprob(double mean, double variance) |
661 | 665 |
{ |
662 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
666 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
663 | 667 |
return( mean / variance ); |
664 | 668 |
} |
665 | 669 |
|
666 | 670 |
double NegativeBinomial::fmean(double size, double prob) |
667 | 671 |
{ |
668 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
672 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
669 | 673 |
return( size / prob - size ); |
670 | 674 |
} |
671 | 675 |
|
672 | 676 |
double NegativeBinomial::fvariance(double size, double prob) |
673 | 677 |
{ |
674 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
678 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
675 | 679 |
return( (size - prob*size) / (prob*prob) ); |
676 | 680 |
} |
677 | 681 |
|
678 | 682 |
// Getter and Setter ------------------------------------------ |
679 | 683 |
double NegativeBinomial::get_mean() |
680 | 684 |
{ |
681 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
685 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
682 | 686 |
return( this->fmean( this->size, this->prob ) ); |
683 | 687 |
} |
684 | 688 |
|
685 | 689 |
void NegativeBinomial::set_mean(double mean) |
686 | 690 |
{ |
687 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
691 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
688 | 692 |
double variance = this->get_variance(); |
689 | 693 |
this->size = this->fsize( mean, variance ); |
690 | 694 |
this->prob = this->fprob( mean, variance ); |
... | ... |
@@ -692,13 +696,13 @@ void NegativeBinomial::set_mean(double mean) |
692 | 696 |
|
693 | 697 |
double NegativeBinomial::get_variance() |
694 | 698 |
{ |
695 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
699 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
696 | 700 |
return( this->fvariance( this->size, this->prob ) ); |
697 | 701 |
} |
698 | 702 |
|
699 | 703 |
void NegativeBinomial::set_variance(double variance) |
700 | 704 |
{ |
701 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
705 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
702 | 706 |
double mean = this->get_mean(); |
703 | 707 |
this->size = this->fsize( mean, variance ); |
704 | 708 |
this->prob = this->fprob( mean, variance ); |
... | ... |
@@ -706,19 +710,19 @@ void NegativeBinomial::set_variance(double variance) |
706 | 710 |
|
707 | 711 |
DensityName NegativeBinomial::get_name() |
708 | 712 |
{ |
709 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
713 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
710 | 714 |
return(NEGATIVE_BINOMIAL); |
711 | 715 |
} |
712 | 716 |
|
713 | 717 |
double NegativeBinomial::get_size() |
714 | 718 |
{ |
715 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
719 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
716 | 720 |
return(this->size); |
717 | 721 |
} |
718 | 722 |
|
719 | 723 |
double NegativeBinomial::get_prob() |
720 | 724 |
{ |
721 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
725 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
722 | 726 |
return(this->prob); |
723 | 727 |
} |
724 | 728 |
|
... | ... |
@@ -730,7 +734,7 @@ double NegativeBinomial::get_prob() |
730 | 734 |
// Constructor and Destructor --------------------------------- |
731 | 735 |
Binomial::Binomial(int* observations, int T, double size, double prob) |
732 | 736 |
{ |
733 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
737 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
734 | 738 |
this->obs = observations; |
735 | 739 |
this->T = T; |
736 | 740 |
this->size = size; |
... | ... |
@@ -739,20 +743,20 @@ Binomial::Binomial(int* observations, int T, double size, double prob) |
739 | 743 |
|
740 | 744 |
Binomial::~Binomial() |
741 | 745 |
{ |
742 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
746 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
743 | 747 |
} |
744 | 748 |
|
745 | 749 |
// Methods ---------------------------------------------------- |
746 | 750 |
void Binomial::calc_logdensities(double* logdens) |
747 | 751 |
{ |
748 |
- FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
752 |
+ //FILE_LOG(logDEBUG2) << __PRETTY_FUNCTION__; |
|
749 | 753 |
double logp = log(this->prob); |
750 | 754 |
double log1minusp = log(1-this->prob); |
751 | 755 |
// Select strategy for computing gammas |
752 | 756 |
if (this->max_obs <= this->T) |
753 | 757 |
{ |
754 |
- FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
755 |
- double logdens_per_read [this->max_obs+1]; |
|
758 |
+ //FILE_LOG(logDEBUG2) << "Precomputing densities in " << __func__ << " for every obs[t], because max(O)<=T"; |
|
759 |
+ std::vector<double> logdens_per_read (this->max_obs+1); |
|
756 | 760 |
for (int j=0; j<=this->max_obs; j++) |
757 | 761 |
{ |
758 | 762 |
logdens_per_read[j] = lchoose(this->size, j) + j * logp + (this->size-j) * log1minusp; |
... | ... |
@@ -760,28 +764,28 @@ void Binomial::calc_logdensities(double* logdens) |
760 | 764 |
for (int t=0; t<this->T; t++) |
761 | 765 |
{ |
762 | 766 |
logdens[t] = logdens_per_read[(int) this->obs[t]]; |
763 |
- FILE_LOG(logDEBUG4) << "logdens["<<t<<"] = " << logdens[t]; |
|
767 |