Browse code

Fixing documentation & missing S4 code

Former-commit-id: a77b7085f3d99e7d95b8f6bcda299a6976552cc5

Sean Corbett authored on 15/12/2018 17:12:36
Showing 17 changed files

... ...
@@ -54,6 +54,7 @@ export(simulateCells.celda_G)
54 54
 export(simulateObservedMatrix)
55 55
 export(subsetCeldaList)
56 56
 export(topRank)
57
+export(violinPlot)
57 58
 exportMethods(celdaHeatmap)
58 59
 exportMethods(celdaProbabilityMap)
59 60
 exportMethods(celdaTsne)
... ...
@@ -61,7 +62,6 @@ exportMethods(clusterProbability)
61 62
 exportMethods(factorizeMatrix)
62 63
 exportMethods(featureModuleLookup)
63 64
 exportMethods(perplexity)
64
-export(violinPlot)
65 65
 import(RColorBrewer)
66 66
 import(data.table)
67 67
 import(foreach)
... ...
@@ -14,7 +14,7 @@ cC.splitZ = function(counts, m.CP.by.S, n.G.by.CP, n.CP, s, z, K, nS, nG, alpha,
14 14
   
15 15
   ## Loop through each split-able Z and perform split
16 16
   clust.split = lapply(z.to.split, function(x){
17
-    suppressMessages(.celda_C(counts[,z == x], K=2, max.iter=5, split.on.iter=-1, split.on.last=FALSE))$z
17
+    suppressMessages(.celda_C(counts[,z == x], K=2, max.iter=5, split.on.iter=-1, split.on.last=FALSE))@clusters$z
18 18
   })
19 19
   
20 20
   clust.split = vector("list", K)
... ...
@@ -6,9 +6,10 @@
6 6
 \usage{
7 7
 celda_C(counts, sample.label = NULL, K, alpha = 1, beta = 1,
8 8
   algorithm = c("EM", "Gibbs"), stop.iter = 10, max.iter = 200,
9
-  split.on.iter = 10, split.on.last = TRUE, seed = 12345, nchains = 3,
10
-  initialize = c("random", "split"), count.checksum = NULL, z.init = NULL,
11
-  logfile = NULL, verbose = TRUE)
9
+  split.on.iter = 10, split.on.last = TRUE, seed = 12345,
10
+  nchains = 3, initialize = c("random", "split"),
11
+  count.checksum = NULL, z.init = NULL, logfile = NULL,
12
+  verbose = TRUE)
12 13
 }
13 14
 \arguments{
14 15
 \item{counts}{Integer matrix. Rows represent features and columns represent cells.}
... ...
@@ -5,11 +5,11 @@
5 5
 \title{Cell and feature clustering with Celda}
6 6
 \usage{
7 7
 celda_CG(counts, sample.label = NULL, K, L, alpha = 1, beta = 1,
8
-  delta = 1, gamma = 1, algorithm = c("EM", "Gibbs"), stop.iter = 10,
9
-  max.iter = 200, split.on.iter = 10, split.on.last = TRUE,
10
-  seed = 12345, nchains = 3, initialize = c("random", "split"),
11
-  count.checksum = NULL, z.init = NULL, y.init = NULL, logfile = NULL,
12
-  verbose = TRUE)
8
+  delta = 1, gamma = 1, algorithm = c("EM", "Gibbs"),
9
+  stop.iter = 10, max.iter = 200, split.on.iter = 10,
10
+  split.on.last = TRUE, seed = 12345, nchains = 3,
11
+  initialize = c("random", "split"), count.checksum = NULL,
12
+  z.init = NULL, y.init = NULL, logfile = NULL, verbose = TRUE)
13 13
 }
14 14
 \arguments{
15 15
 \item{counts}{Integer matrix. Rows represent features and columns represent cells.}
... ...
@@ -7,7 +7,8 @@
7 7
 celda_G(counts, L, beta = 1, delta = 1, gamma = 1, stop.iter = 10,
8 8
   max.iter = 200, split.on.iter = 10, split.on.last = TRUE,
9 9
   seed = 12345, nchains = 3, initialize = c("random", "split"),
10
-  count.checksum = NULL, y.init = NULL, logfile = NULL, verbose = TRUE)
10
+  count.checksum = NULL, y.init = NULL, logfile = NULL,
11
+  verbose = TRUE)
11 12
 }
12 13
 \arguments{
13 14
 \item{counts}{Integer matrix. Rows represent features and columns represent cells.}
... ...
@@ -4,8 +4,8 @@
4 4
 \alias{differentialExpression}
5 5
 \title{Differential expression for cell subpopulations using MAST}
6 6
 \usage{
7
-differentialExpression(counts, celda.mod, c1, c2 = NULL, only.pos = FALSE,
8
-  log2fc.threshold = NULL, fdr.threshold = 1)
7
+differentialExpression(counts, celda.mod, c1, c2 = NULL,
8
+  only.pos = FALSE, log2fc.threshold = NULL, fdr.threshold = 1)
9 9
 }
10 10
 \arguments{
11 11
 \item{counts}{Integer matrix. Rows represent features and columns represent cells. This matrix should be the same as the one used to generate `celda.mod`.}
... ...
@@ -4,8 +4,8 @@
4 4
 \alias{logLikelihood.celda_CG}
5 5
 \title{Calculate Celda_CG log likelihood}
6 6
 \usage{
7
-logLikelihood.celda_CG(counts, sample.label, z, y, K, L, alpha, beta, delta,
8
-  gamma)
7
+logLikelihood.celda_CG(counts, sample.label, z, y, K, L, alpha, beta,
8
+  delta, gamma)
9 9
 }
10 10
 \arguments{
11 11
 \item{counts}{Integer matrix. Rows represent features and columns represent cells.}
... ...
@@ -4,9 +4,9 @@
4 4
 \alias{normalizeCounts}
5 5
 \title{Normalization of count data}
6 6
 \usage{
7
-normalizeCounts(counts, normalize = c("proportion", "cpm", "median", "mean"),
8
-  transformation.fun = NULL, scale.fun = NULL, pseudocount.normalize = 0,
9
-  pseudocount.transform = 0)
7
+normalizeCounts(counts, normalize = c("proportion", "cpm", "median",
8
+  "mean"), transformation.fun = NULL, scale.fun = NULL,
9
+  pseudocount.normalize = 0, pseudocount.transform = 0)
10 10
 }
11 11
 \arguments{
12 12
 \item{counts}{Integer matrix. Rows represent features and columns represent cells.}
... ...
@@ -4,8 +4,9 @@
4 4
 \alias{plotDimReduceCluster}
5 5
 \title{Plotting the cell labels on a dimensionality reduction plot}
6 6
 \usage{
7
-plotDimReduceCluster(dim1, dim2, cluster, size = 1, xlab = "Dimension_1",
8
-  ylab = "Dimension_2", specific_clusters = NULL)
7
+plotDimReduceCluster(dim1, dim2, cluster, size = 1,
8
+  xlab = "Dimension_1", ylab = "Dimension_2",
9
+  specific_clusters = NULL)
9 10
 }
10 11
 \arguments{
11 12
 \item{dim1}{Numeric vector. First dimension from data dimensionality reduction output.}
... ...
@@ -5,9 +5,9 @@
5 5
 \title{Plotting feature expression on a dimensionality reduction plot}
6 6
 \usage{
7 7
 plotDimReduceFeature(dim1, dim2, counts, features, normalize = TRUE,
8
-  exact.match = TRUE, trim = c(-2, 2), size = 1, xlab = "Dimension_1",
9
-  ylab = "Dimension_2", color_low = "grey", color_mid = NULL,
10
-  color_high = "blue")
8
+  exact.match = TRUE, trim = c(-2, 2), size = 1,
9
+  xlab = "Dimension_1", ylab = "Dimension_2", color_low = "grey",
10
+  color_mid = NULL, color_high = "blue")
11 11
 }
12 12
 \arguments{
13 13
 \item{dim1}{Numeric vector. First dimension from data dimensionality reduction output.}
... ...
@@ -4,8 +4,8 @@
4 4
 \alias{plotDimReduceGrid}
5 5
 \title{Mapping the dimensionality reduction plot}
6 6
 \usage{
7
-plotDimReduceGrid(dim1, dim2, matrix, size, xlab, ylab, color_low, color_mid,
8
-  color_high, var_label)
7
+plotDimReduceGrid(dim1, dim2, matrix, size, xlab, ylab, color_low,
8
+  color_mid, color_high, var_label)
9 9
 }
10 10
 \arguments{
11 11
 \item{dim1}{Numeric vector. First dimension from data dimensionality reduction output.}
... ...
@@ -5,8 +5,9 @@
5 5
 \title{Plotting the Celda module probability on a dimensionality reduction plot}
6 6
 \usage{
7 7
 plotDimReduceModule(dim1, dim2, counts, celda.mod, modules = NULL,
8
-  rescale = TRUE, size = 1, xlab = "Dimension_1", ylab = "Dimension_2",
9
-  color_low = "grey", color_mid = NULL, color_high = "blue")
8
+  rescale = TRUE, size = 1, xlab = "Dimension_1",
9
+  ylab = "Dimension_2", color_low = "grey", color_mid = NULL,
10
+  color_high = "blue")
10 11
 }
11 12
 \arguments{
12 13
 \item{dim1}{Numeric vector. First dimension from data dimensionality reduction output.}
... ...
@@ -4,9 +4,10 @@
4 4
 \alias{plotHeatmap}
5 5
 \title{Renders a heatmap based on a matrix of counts where rows are features and columns are cells.}
6 6
 \usage{
7
-plotHeatmap(counts, z = NULL, y = NULL, scale.row = scale, trim = c(-2,
8
-  2), feature.ix = NULL, cell.ix = NULL, cluster.feature = TRUE,
9
-  cluster.cell = TRUE, color.scheme = c("divergent", "sequential"),
7
+plotHeatmap(counts, z = NULL, y = NULL, scale.row = scale,
8
+  trim = c(-2, 2), feature.ix = NULL, cell.ix = NULL,
9
+  cluster.feature = TRUE, cluster.cell = TRUE,
10
+  color.scheme = c("divergent", "sequential"),
10 11
   color.scheme.symmetric = TRUE, color.scheme.center = 0, col = NULL,
11 12
   annotation.cell = NULL, annotation.feature = NULL,
12 13
   annotation.color = NULL, breaks = NULL, legend = TRUE,
... ...
@@ -5,19 +5,21 @@
5 5
 \title{A function to draw clustered heatmaps.}
6 6
 \usage{
7 7
 semi_pheatmap(mat, color = colorRampPalette(rev(brewer.pal(n = 7, name =
8
-  "RdYlBu")))(100), kmeans_k = NA, breaks = NA, border_color = "grey60",
9
-  cellwidth = NA, cellheight = NA, scale = "none", cluster_rows = TRUE,
10
-  cluster_cols = TRUE, clustering_distance_rows = "euclidean",
11
-  clustering_distance_cols = "euclidean", clustering_method = "complete",
12
-  clustering_callback = identity2, cutree_rows = NA, cutree_cols = NA,
8
+  "RdYlBu")))(100), kmeans_k = NA, breaks = NA,
9
+  border_color = "grey60", cellwidth = NA, cellheight = NA,
10
+  scale = "none", cluster_rows = TRUE, cluster_cols = TRUE,
11
+  clustering_distance_rows = "euclidean",
12
+  clustering_distance_cols = "euclidean",
13
+  clustering_method = "complete", clustering_callback = identity2,
14
+  cutree_rows = NA, cutree_cols = NA,
13 15
   treeheight_row = ifelse(cluster_rows, 50, 0),
14 16
   treeheight_col = ifelse(cluster_cols, 50, 0), legend = TRUE,
15 17
   legend_breaks = NA, legend_labels = NA, annotation_row = NA,
16 18
   annotation_col = NA, annotation = NA, annotation_colors = NA,
17 19
   annotation_legend = TRUE, annotation_names_row = TRUE,
18
-  annotation_names_col = TRUE, drop_levels = TRUE, show_rownames = TRUE,
19
-  show_colnames = TRUE, main = NA, fontsize = 10,
20
-  fontsize_row = fontsize, fontsize_col = fontsize,
20
+  annotation_names_col = TRUE, drop_levels = TRUE,
21
+  show_rownames = TRUE, show_colnames = TRUE, main = NA,
22
+  fontsize = 10, fontsize_row = fontsize, fontsize_col = fontsize,
21 23
   display_numbers = FALSE, number_format = "\%.2f",
22 24
   number_color = "grey30", fontsize_number = 0.8 * fontsize,
23 25
   gaps_row = NULL, gaps_col = NULL, labels_row = NULL,
... ...
@@ -5,8 +5,8 @@
5 5
 \title{This function generates a list containing two count matrices -- one for real expression, the other one for contamination, as well as other parameters 
6 6
 used in the simulation which can be useful for running decontamination}
7 7
 \usage{
8
-simulateObservedMatrix(C = 300, G = 100, K = 3, N.Range = c(500, 1000),
9
-  beta = 0.5, delta = c(1, 2), seed = 12345)
8
+simulateObservedMatrix(C = 300, G = 100, K = 3, N.Range = c(500,
9
+  1000), beta = 0.5, delta = c(1, 2), seed = 12345)
10 10
 }
11 11
 \arguments{
12 12
 \item{C}{Integer. Number of cells to be simulated. Default to be 300}
... ...
@@ -4,7 +4,8 @@
4 4
 \alias{topRank}
5 5
 \title{Identify features with the highest influence on clustering.}
6 6
 \usage{
7
-topRank(matrix, n = 25, margin = 2, threshold = 0, decreasing = TRUE)
7
+topRank(matrix, n = 25, margin = 2, threshold = 0,
8
+  decreasing = TRUE)
8 9
 }
9 10
 \arguments{
10 11
 \item{matrix}{Numeric matrix.}
11 12
old mode 100644
12 13
new mode 100755
... ...
@@ -67,12 +67,12 @@ static const R_CallMethodDef CallEntries[] = {
67 67
     {"_celda_fastNormProp", (DL_FUNC) &_celda_fastNormProp, 2},
68 68
     {"_celda_fastNormPropLog", (DL_FUNC) &_celda_fastNormPropLog, 2},
69 69
     {"_celda_fastNormPropSqrt", (DL_FUNC) &_celda_fastNormPropSqrt, 2},
70
-    {"_colSumByGroup",          (DL_FUNC) &_colSumByGroup,          2},
71
-    {"_colSumByGroup_numeric",  (DL_FUNC) &_colSumByGroup_numeric,  2},
72
-    {"_colSumByGroupChange",    (DL_FUNC) &_colSumByGroupChange,    4},
73
-    {"_rowSumByGroup",          (DL_FUNC) &_rowSumByGroup,          2},
74
-    {"_rowSumByGroup_numeric",  (DL_FUNC) &_rowSumByGroup_numeric,  2},
75
-    {"_rowSumByGroupChange",    (DL_FUNC) &_rowSumByGroupChange,    4},
70
+    {"_colSumByGroup",         (DL_FUNC) &_colSumByGroup,         2},
71
+    {"_colSumByGroup_numeric", (DL_FUNC) &_colSumByGroup_numeric, 2},
72
+    {"_colSumByGroupChange",   (DL_FUNC) &_colSumByGroupChange,   4},
73
+    {"_rowSumByGroup",         (DL_FUNC) &_rowSumByGroup,         2},
74
+    {"_rowSumByGroup_numeric", (DL_FUNC) &_rowSumByGroup_numeric, 2},
75
+    {"_rowSumByGroupChange",   (DL_FUNC) &_rowSumByGroupChange,   4},
76 76
     {NULL, NULL, 0}
77 77
 };
78 78