Browse code

- Recently added assayName parameter removed from crossValidate (use characteristicsLabel instead). - All references to outcomes now use outcome instead.

Dario Strbenac authored on 22/07/2022 12:00:33
Showing 48 changed files

... ...
@@ -21,7 +21,7 @@ export(SVMtrainInterface)
21 21
 export(SelectParams)
22 22
 export(TrainParams)
23 23
 export(TransformParams)
24
-export(actualOutcomes)
24
+export(actualOutcome)
25 25
 export(bartlettRanking)
26 26
 export(calcCVperformance)
27 27
 export(calcExternalPerformance)
... ...
@@ -106,7 +106,7 @@ exportMethods(SVMtrainInterface)
106 106
 exportMethods(SelectParams)
107 107
 exportMethods(TrainParams)
108 108
 exportMethods(TransformParams)
109
-exportMethods(actualOutcomes)
109
+exportMethods(actualOutcome)
110 110
 exportMethods(bartlettRanking)
111 111
 exportMethods(calcCVperformance)
112 112
 exportMethods(calcExternalPerformance)
... ...
@@ -98,7 +98,7 @@ setMethod("ROCplot", "list",
98 98
   mode <- match.arg(mode)
99 99
                       
100 100
   ggplot2::theme_set(ggplot2::theme_classic() + ggplot2::theme(panel.border = ggplot2::element_rect(fill = NA)))
101
-  distinctClasses <- levels(actualOutcomes(results[[1]]))
101
+  distinctClasses <- levels(actualOutcome(results[[1]]))
102 102
   numberDistinctClasses <- length(distinctClasses)
103 103
   comparisonName <- comparison
104 104
   comparisonValues <- sapply(results, function(result) result@characteristics[match(comparisonName, result@characteristics[, "characteristic"]), "value"])
... ...
@@ -124,7 +124,7 @@ setMethod("ROCplot", "list",
124 124
 
125 125
     allPRlist <- lapply(predictionsList, function(predictions)
126 126
     {
127
-      actualClasses <- actualOutcomes(result)[match(predictions[, "sample"], sampleNames(result))]
127
+      actualClasses <- actualOutcome(result)[match(predictions[, "sample"], sampleNames(result))]
128 128
       do.call(rbind, lapply(levels(actualClasses), function(class)
129 129
       {
130 130
         totalPositives <- sum(actualClasses == class)
... ...
@@ -60,8 +60,8 @@
60 60
 #' \item{\code{"Sample C-index"}: Per-individual C-index.}
61 61
 #' }
62 62
 #' 
63
-#' @param actualOutcomes A factor vector or survival information specifying each sample's known outcome.
64
-#' @param predictedOutcomes A factor vector or survival information of the same length as \code{actualOutcomes} specifying each sample's predicted outcome.
63
+#' @param actualOutcome A factor vector or survival information specifying each sample's known outcome.
64
+#' @param predictedOutcome A factor vector or survival information of the same length as \code{actualOutcome} specifying each sample's predicted outcome.
65 65
 #' 
66 66
 #' @return If \code{calcCVperformance} was run, an updated
67 67
 #' \code{\linkS4class{ClassifyResult}} object, with new metric values in the
... ...
@@ -86,13 +86,13 @@
86 86
 #' @rdname calcPerformance
87 87
 #' @usage NULL
88 88
 #' @export
89
-setGeneric("calcExternalPerformance", function(actualOutcomes, predictedOutcomes, ...)
89
+setGeneric("calcExternalPerformance", function(actualOutcome, predictedOutcome, ...)
90 90
 standardGeneric("calcExternalPerformance"))
91 91
 
92 92
 #' @rdname calcPerformance
93 93
 #' @exportMethod calcExternalPerformance
94 94
 setMethod("calcExternalPerformance", c("factor", "factor"),
95
-          function(actualOutcomes, predictedOutcomes, # Both are classes.
95
+          function(actualOutcome, predictedOutcome, # Both are classes.
96 96
                    performanceType = c("Balanced Accuracy", "Balanced Error", "Error", "Accuracy",
97 97
                                        "Sample Error", "Sample Accuracy",
98 98
                                        "Micro Precision", "Micro Recall",
... ...
@@ -100,19 +100,19 @@ setMethod("calcExternalPerformance", c("factor", "factor"),
100 100
                                        "Macro Recall", "Macro F1", "Matthews Correlation Coefficient"))
101 101
 {
102 102
   performanceType <- match.arg(performanceType)
103
-  if(length(levels(actualOutcomes)) > 2 && performanceType == "Matthews Correlation Coefficient")
103
+  if(length(levels(actualOutcome)) > 2 && performanceType == "Matthews Correlation Coefficient")
104 104
     stop("Error: Matthews Correlation Coefficient specified but data set has more than 2 classes.")
105
-  if(is(predictedOutcomes, "factor")) levels(predictedOutcomes) <- levels(actualOutcomes)
106
-  .calcPerformance(list(actualOutcomes), list(predictedOutcomes), performanceType = performanceType)[["values"]]
105
+  if(is(predictedOutcome, "factor")) levels(predictedOutcome) <- levels(actualOutcome)
106
+  .calcPerformance(list(actualOutcome), list(predictedOutcome), performanceType = performanceType)[["values"]]
107 107
 })
108 108
 
109 109
 #' @rdname calcPerformance
110 110
 #' @exportMethod calcExternalPerformance
111 111
 setMethod("calcExternalPerformance", c("Surv", "numeric"),
112
-          function(actualOutcomes, predictedOutcomes, performanceType = "C-index")
112
+          function(actualOutcome, predictedOutcome, performanceType = "C-index")
113 113
           {
114 114
             performanceType <- match.arg(performanceType)
115
-            .calcPerformance(actualOutcomes, predictedOutcomes, performanceType = performanceType)[["values"]]
115
+            .calcPerformance(actualOutcome, predictedOutcome, performanceType = performanceType)[["values"]]
116 116
           })
117 117
 
118 118
 #' @rdname calcPerformance
... ...
@@ -132,7 +132,7 @@ setMethod("calcCVperformance", "ClassifyResult",
132 132
                                                "C-index", "Sample C-index"))
133 133
 {
134 134
   performanceType <- match.arg(performanceType)
135
-  actualOutcomes <- actualOutcomes(result) # Extract the known outcomes of all samples.
135
+  actualOutcome <- actualOutcome(result) # Extract the known outcome of each sample.
136 136
   
137 137
   ### Group by permutation
138 138
   if(!performanceType %in% c("Sample Error", "Sample Accuracy"))
... ...
@@ -146,8 +146,8 @@ setMethod("calcCVperformance", "ClassifyResult",
146 146
   ### Performance for survival data
147 147
   if(performanceType %in% c("C-index", "Sample C-index")) {
148 148
     samples <- factor(result@predictions[, "sample"], levels = sampleNames(result))
149
-    performance <- .calcPerformance(actualOutcomes = actualOutcomes[match(result@predictions[, "sample"], sampleNames(result))],
150
-                                    predictedOutcomes = result@predictions[, "risk"], 
149
+    performance <- .calcPerformance(actualOutcome = actualOutcome[match(result@predictions[, "sample"], sampleNames(result))],
150
+                                    predictedOutcome = result@predictions[, "risk"], 
151 151
                                     samples = samples,
152 152
                                     performanceType = performanceType, 
153 153
                                     grouping = grouping)
... ...
@@ -156,28 +156,28 @@ setMethod("calcCVperformance", "ClassifyResult",
156 156
   }
157 157
   
158 158
   if(performanceType == "AUC") {
159
-    performance <- .calcPerformance(actualOutcomes[match(result@predictions[, "sample"], sampleNames(result))],
160
-                                    result@predictions[, levels(actualOutcomes)],
159
+    performance <- .calcPerformance(actualOutcome[match(result@predictions[, "sample"], sampleNames(result))],
160
+                                    result@predictions[, levels(actualOutcome)],
161 161
                                     performanceType = performanceType, grouping = grouping)
162 162
     result@performance[[performance[["name"]]]] <- performance[["values"]]
163 163
     return(result)
164 164
   }
165 165
   
166 166
   ### Performance for data with classes
167
-  if(length(levels(actualOutcomes)) > 2 && performanceType == "Matthews Correlation Coefficient")
167
+  if(length(levels(actualOutcome)) > 2 && performanceType == "Matthews Correlation Coefficient")
168 168
     stop("Error: Matthews Correlation Coefficient specified but data set has more than 2 classes.")
169 169
 
170
-  classLevels <- levels(actualOutcomes)
170
+  classLevels <- levels(actualOutcome)
171 171
   samples <- factor(result@predictions[, "sample"], levels = sampleNames(result))
172
-  predictedOutcomes <- factor(result@predictions[, "class"], levels = classLevels)
173
-  actualOutcomes <- factor(actualOutcomes[match(result@predictions[, "sample"], sampleNames(result))], levels = classLevels, ordered = TRUE)
174
-  performance <- .calcPerformance(actualOutcomes, predictedOutcomes, samples, performanceType, grouping)
172
+  predictedOutcome <- factor(result@predictions[, "class"], levels = classLevels)
173
+  actualOutcome <- factor(actualOutcome[match(result@predictions[, "sample"], sampleNames(result))], levels = classLevels, ordered = TRUE)
174
+  performance <- .calcPerformance(actualOutcome, predictedOutcome, samples, performanceType, grouping)
175 175
   result@performance[[performance[["name"]]]] <- performance[["values"]]
176 176
   result
177 177
 })
178 178
 
179 179
 #' @importFrom survival concordance
180
-.calcPerformance <- function(actualOutcomes, predictedOutcomes, samples = NA, performanceType, grouping = NULL)
180
+.calcPerformance <- function(actualOutcome, predictedOutcome, samples = NA, performanceType, grouping = NULL)
181 181
 {
182 182
   if(performanceType %in% c("Sample Error", "Sample Accuracy"))
183 183
   {
... ...
@@ -186,9 +186,9 @@ setMethod("calcCVperformance", "ClassifyResult",
186 186
     {
187 187
       consider <- which(samples == sampleID)
188 188
       if(performanceType == "Sample Error")
189
-        sum(predictedOutcomes[consider] != as.character(actualOutcomes[consider]))
189
+        sum(predictedOutcome[consider] != as.character(actualOutcome[consider]))
190 190
       else
191
-        sum(predictedOutcomes[consider] == as.character(actualOutcomes[consider]))
191
+        sum(predictedOutcome[consider] == as.character(actualOutcome[consider]))
192 192
     })
193 193
     performanceValues <- as.numeric(sampleMetricValues / table(samples))
194 194
     names(performanceValues) <- levels(samples)
... ...
@@ -197,8 +197,8 @@ setMethod("calcCVperformance", "ClassifyResult",
197 197
     
198 198
   if(!is.null(grouping))
199 199
   {
200
-    actualOutcomes <- split(actualOutcomes, grouping)
201
-    predictedOutcomes <- split(predictedOutcomes, grouping)
200
+    actualOutcome <- split(actualOutcome, grouping)
201
+    predictedOutcome <- split(predictedOutcome, grouping)
202 202
     allSamples <- levels(samples)
203 203
     samples <- split(samples, grouping)
204 204
   }
... ...
@@ -231,7 +231,7 @@ setMethod("calcCVperformance", "ClassifyResult",
231 231
         }
232 232
         data.frame(sample = sampleID, concordant = concordants, discordant = discordants)
233 233
       }))
234
-    }, actualOutcomes, predictedOutcomes, samples, SIMPLIFY = FALSE))
234
+    }, actualOutcome, predictedOutcome, samples, SIMPLIFY = FALSE))
235 235
 
236 236
     sampleValues <- by(performanceValues[, c("concordant", "discordant")], performanceValues[, "sample"], colSums)
237 237
     Cindex <- round(sapply(sampleValues, '[', 1) / (sapply(sampleValues, '[', 1) + sapply(sampleValues, '[', 2)), 2)
... ...
@@ -240,8 +240,8 @@ setMethod("calcCVperformance", "ClassifyResult",
240 240
     return(list(name = performanceType, values = Cindex))
241 241
   }
242 242
   
243
-  if(!is(actualOutcomes, "list")) actualOutcomes <- list(actualOutcomes)
244
-  if(!is(predictedOutcomes, "list")) predictedOutcomes <- list(predictedOutcomes)
243
+  if(!is(actualOutcome, "list")) actualOutcome <- list(actualOutcome)
244
+  if(!is(predictedOutcome, "list")) predictedOutcome <- list(predictedOutcome)
245 245
   
246 246
 
247 247
   if(performanceType %in% c("Accuracy", "Error")) {
... ...
@@ -257,7 +257,7 @@ setMethod("calcCVperformance", "ClassifyResult",
257 257
         correctPredictions / totalPredictions
258 258
       else # It is "error".
259 259
         wrongPredictions / totalPredictions
260
-    }, actualOutcomes, predictedOutcomes, SIMPLIFY = FALSE))
260
+    }, actualOutcome, predictedOutcome, SIMPLIFY = FALSE))
261 261
   } else if(performanceType %in% c("Balanced Accuracy", "Balanced Error")) {
262 262
     performanceValues <- unlist(mapply(function(iterationClasses, iterationPredictions)
263 263
     {
... ...
@@ -269,7 +269,7 @@ setMethod("calcCVperformance", "ClassifyResult",
269 269
         mean(diag(confusionMatrix) / classSizes)
270 270
       else
271 271
         mean(classErrors / classSizes)
272
-    }, actualOutcomes, predictedOutcomes, SIMPLIFY = FALSE))
272
+    }, actualOutcome, predictedOutcome, SIMPLIFY = FALSE))
273 273
   } else if(performanceType %in% c("AUC")) {
274 274
     performanceValues <- unlist(mapply(function(iterationClasses, iterationPredictions)
275 275
     {
... ...
@@ -290,14 +290,14 @@ setMethod("calcCVperformance", "ClassifyResult",
290 290
         rates <- rbind(data.frame(FPR = 0, TPR = 0, class = class), rates)
291 291
         rates
292 292
       }))
293
-      classesAUC <- .calcArea(classesTable, levels(actualOutcomes[[1]]))
293
+      classesAUC <- .calcArea(classesTable, levels(actualOutcome[[1]]))
294 294
       mean(classesAUC[!duplicated(classesAUC[, c("class", "AUC")]), "AUC"]) # Average AUC in iteration.
295
-    }, actualOutcomes, predictedOutcomes, SIMPLIFY = FALSE))
295
+    }, actualOutcome, predictedOutcome, SIMPLIFY = FALSE))
296 296
   } else if(performanceType %in% c("C-index")) {
297 297
     performanceValues <- unlist(mapply(function(x, y){
298 298
       y <- -y
299 299
       survival::concordance(x ~ y)$concordance
300
-    }, actualOutcomes, predictedOutcomes, SIMPLIFY = FALSE))
300
+    }, actualOutcome, predictedOutcome, SIMPLIFY = FALSE))
301 301
 
302 302
     } else { # Metrics for which true positives, true negatives, false positives, false negatives must be calculated.
303 303
     performanceValues <- unlist(mapply(function(iterationClasses, iterationPredictions)
... ...
@@ -345,7 +345,7 @@ setMethod("calcCVperformance", "ClassifyResult",
345 345
         return(unname((truePositives[2] * trueNegatives[2] - falsePositives[2] * falseNegatives[2]) / sqrt((truePositives[2] + falsePositives[2]) * (truePositives[2] + falseNegatives[2]) * (trueNegatives[2] + falsePositives[2]) * (trueNegatives[2] + falseNegatives[2]))))
346 346
       }
347 347
       
348
-    }, actualOutcomes, predictedOutcomes, SIMPLIFY = FALSE))
348
+    }, actualOutcome, predictedOutcome, SIMPLIFY = FALSE))
349 349
   }
350 350
 
351 351
   list(name = performanceType, values = performanceValues)
... ...
@@ -1228,8 +1228,8 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL"))
1228 1228
 #' ClassifyResult,DataFrame,character,characterOrDataFrame-method
1229 1229
 #' show,ClassifyResult-method sampleNames sampleNames,ClassifyResult-method
1230 1230
 #' featuresInfo featuresInfo,ClassifyResult-method
1231
-#' predictions predictions,ClassifyResult-method actualOutcomes
1232
-#' actualOutcomes,ClassifyResult-method features features,ClassifyResult-method
1231
+#' predictions predictions,ClassifyResult-method actualOutcome
1232
+#' actualOutcome,ClassifyResult-method features features,ClassifyResult-method
1233 1233
 #' models models,ClassifyResult-method performance
1234 1234
 #' performance,ClassifyResult-method tunedParameters
1235 1235
 #' tunedParameters,ClassifyResult-method totalPredictions
... ...
@@ -1238,7 +1238,7 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL"))
1238 1238
 #' 
1239 1239
 #' @section Constructor:
1240 1240
 #' \preformatted{ClassifyResult(characteristics, originalNames, originalFeatures,
1241
-#'               rankedFeatures, chosenFeatures, models, tunedParameters, predictions, actualOutcomes, importance = NULL, modellingParams = NULL, finalModel = NULL)}
1241
+#'               rankedFeatures, chosenFeatures, models, tunedParameters, predictions, actualOutcome, importance = NULL, modellingParams = NULL, finalModel = NULL)}
1242 1242
 #' \describe{
1243 1243
 #' \item{\code{characteristics}}{A \code{\link{DataFrame}} describing the
1244 1244
 #' characteristics of classification done. First column must be named
... ...
@@ -1257,7 +1257,7 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL"))
1257 1257
 #' \item{\code{tunedParameters}}{Names of tuning parameters and the value chosen of each parameter.}
1258 1258
 #' \item{\code{predictions}}{A data frame containing sample IDs, predicted class or risk and information about the 
1259 1259
 #' cross-validation iteration in which the prediction was made.}
1260
-#' \item{\code{actualOutcomes}}{The known class or survival data of each sample.}
1260
+#' \item{\code{actualOutcome}}{The known class or survival data of each sample.}
1261 1261
 #' \item{\code{importance}}{The changes in model performance for each selected variable when it is excluded.}
1262 1262
 #' \item{\code{modellingParams}}{Stores the object used for defining the model building to enable future reuse.}
1263 1263
 #' \item{\code{finalModel}}{A model built using all of the sample for future use. For any tuning parameters, the
... ...
@@ -1278,7 +1278,7 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL"))
1278 1278
 #' \describe{
1279 1279
 #' \item{\code{featuresInfo(result)}}{Returns a table of features present in the data set. Shows original names and renamed names to ensure no unusual symbols in names.}}
1280 1280
 #' \describe{
1281
-#' \item{\code{actualOutcomes(result)}}{Returns the known outcomes of each sample.}}
1281
+#' \item{\code{actualOutcome(result)}}{Returns the known outcome of each sample.}}
1282 1282
 #' \describe{
1283 1283
 #' \item{\code{models(result)}}{A \code{list} of the models fitted for each training.}}
1284 1284
 #' \describe{
... ...
@@ -1328,7 +1328,7 @@ setClass("ClassifyResult", representation(
1328 1328
   featuresInfo = "DataFrame",
1329 1329
   rankedFeatures = "listOrNULL",
1330 1330
   chosenFeatures = "listOrNULL",
1331
-  actualOutcomes = "factorOrSurv",
1331
+  actualOutcome = "factorOrSurv",
1332 1332
   models = "list",
1333 1333
   tune = "listOrNULL",
1334 1334
   predictions = "DataFrame",
... ...
@@ -1342,13 +1342,13 @@ setClass("ClassifyResult", representation(
1342 1342
 #' @export
1343 1343
 setMethod("ClassifyResult", c("DataFrame", "character", "characterOrDataFrame"),
1344 1344
           function(characteristics, originalNames, featuresInfo,
1345
-                   rankedFeatures, chosenFeatures, models, tunedParameters, predictions, actualOutcomes, importance = NULL, modellingParams = NULL, finalModel = NULL)
1345
+                   rankedFeatures, chosenFeatures, models, tunedParameters, predictions, actualOutcome, importance = NULL, modellingParams = NULL, finalModel = NULL)
1346 1346
           {
1347 1347
             new("ClassifyResult", characteristics = characteristics,
1348 1348
                 originalNames = originalNames, featuresInfo = featuresInfo,
1349 1349
                 rankedFeatures = rankedFeatures, chosenFeatures = chosenFeatures,
1350 1350
                 models = models, tune = tunedParameters,
1351
-                predictions = predictions, actualOutcomes = actualOutcomes, importance = importance, modellingParams = modellingParams, finalModel = finalModel)
1351
+                predictions = predictions, actualOutcome = actualOutcome, importance = importance, modellingParams = modellingParams, finalModel = finalModel)
1352 1352
           })
1353 1353
 
1354 1354
 #' @usage NULL
... ...
@@ -1463,16 +1463,16 @@ setMethod("performance", c("ClassifyResult"),
1463 1463
 
1464 1464
 #' @export
1465 1465
 #' @usage NULL
1466
-setGeneric("actualOutcomes", function(object, ...)
1467
-standardGeneric("actualOutcomes"))
1466
+setGeneric("actualOutcome", function(object, ...)
1467
+standardGeneric("actualOutcome"))
1468 1468
 
1469 1469
 #' @rdname ClassifyResult-class
1470 1470
 #' @usage NULL
1471 1471
 #' @export
1472
-setMethod("actualOutcomes", c("ClassifyResult"),
1472
+setMethod("actualOutcome", c("ClassifyResult"),
1473 1473
           function(object)
1474 1474
           {
1475
-            object@actualOutcomes
1475
+            object@actualOutcome
1476 1476
           })
1477 1477
 
1478 1478
 #' @export
... ...
@@ -7,16 +7,13 @@
7 7
 #' or a list of these objects containing the training data.  For a
8 8
 #' \code{matrix} and \code{data.frame}, the rows are samples and the columns are features. For a \code{data.frame} or \code{\link{MultiAssayExperiment}} assay
9 9
 #' the rows are features and the columns are samples, as is typical in Bioconductor.
10
-#' @param outcomes A vector of class labels of class \code{\link{factor}} of the
10
+#' @param outcome A vector of class labels of class \code{\link{factor}} of the
11 11
 #' same length as the number of samples in \code{measurements} or a character vector of length 1 containing the
12 12
 #' column name in \code{measurements} if it is a \code{\link{DataFrame}} or the
13 13
 #' column name in \code{colData(measurements)} if \code{measurements} is a \code{\link{MultiAssayExperiment}}. If a column name, that column will be
14 14
 #' removed before training. Or a \code{\link{Surv}} object or a character vector of length 2 or 3 specifying the time and event columns in
15 15
 #' \code{measurements} for survival outcome.
16
-#' @param ... Arguments other than measurements and outcomes in the generic.
17
-#' @param assayName An informative name describing the data (e.g. RNA-seq) table if the input is a data frame or matrix. Not used if input
18
-#' is \code{MultiAssayExperiment} or other list-like structure because it will already have assay names in the experiment list. This
19
-#' name will be stored in the characteristics table of the result as Assay Name characteristic.
16
+#' @param ... Arguments other than measurements and outcome in the generic.
20 17
 #' @param nFeatures The number of features to be used for classification. If this is a single number, the same number of features will be used for all comparisons
21 18
 #' or assays. If a numeric vector these will be optimised over using \code{selectionOptimisation}. If a named vector with the same names of multiple assays, 
22 19
 #' a different number of features will be used for each assay. If a named list of vectors, the respective number of features will be optimised over. 
... ...
@@ -81,15 +78,14 @@
81 78
 #' # performancePlot(c(result, resultMerge))
82 79
 #' 
83 80
 #' @importFrom survival Surv
84
-setGeneric("crossValidate", function(measurements, outcomes, ...)
81
+setGeneric("crossValidate", function(measurements, outcome, ...)
85 82
     standardGeneric("crossValidate"))
86 83
 
87 84
 #' @rdname crossValidate
88 85
 #' @export
89 86
 setMethod("crossValidate", "DataFrame", 
90 87
           function(measurements,
91
-                   outcomes,
92
-                   assayName = NULL,
88
+                   outcome,
93 89
                    nFeatures = 20,
94 90
                    selectionMethod = "t-test",
95 91
                    selectionOptimisation = "Resubstitution",
... ...
@@ -103,16 +99,16 @@ setMethod("crossValidate", "DataFrame",
103 99
 
104 100
           {
105 101
               # Check that data is in the right format
106
-              splitAssay <- .splitDataAndOutcomes(measurements, outcomes)
102
+              splitAssay <- .splitDataAndOutcome(measurements, outcome)
107 103
               measurements <- splitAssay[["measurements"]]
108
-              outcomes <- splitAssay[["outcomes"]]
104
+              outcome <- splitAssay[["outcome"]]
109 105
               
110 106
               # Which data-types or data-views are present?
111 107
               assayIDs <- unique(mcols(measurements)[, "assay"])
112 108
               if(is.null(assayIDs))
113 109
                 assayIDs <- 1
114 110
               
115
-              checkData(measurements, outcomes)
111
+              checkData(measurements, outcome)
116 112
 
117 113
               # Check that other variables are in the right format and fix
118 114
               nFeatures <- cleanNFeatures(nFeatures = nFeatures,
... ...
@@ -161,10 +157,9 @@ setMethod("crossValidate", "DataFrame",
161 157
                                   # Loop over classifiers
162 158
                                   set.seed(seed)
163 159
                                   measurementsUse <- measurements
164
-                                  if(!is.null(assayName)) attr(measurementsUse, "assayName") <- assayName
165 160
                                   if(assayIndex != 1) measurementsUse <- measurements[, mcols(measurements)[, "assay"] == assayIndex, drop = FALSE]
166 161
                                   CV(
167
-                                      measurements = measurementsUse, outcomes = outcomes,
162
+                                      measurements = measurementsUse, outcome = outcome,
168 163
                                       assayIDs = assayIndex,
169 164
                                       nFeatures = nFeatures[assayIndex],
170 165
                                       selectionMethod = selectionIndex,
... ...
@@ -206,7 +201,7 @@ setMethod("crossValidate", "DataFrame",
206 201
 
207 202
                   result <- sapply(assayCombinations, function(assayIndex){
208 203
                       CV(measurements = measurements[, mcols(measurements)[["assay"]] %in% assayIndex],
209
-                         outcomes = outcomes, assayIDs = assayIndex,
204
+                         outcome = outcome, assayIDs = assayIndex,
210 205
                          nFeatures = nFeatures[assayIndex],
211 206
                          selectionMethod = selectionMethod[assayIndex],
212 207
                          selectionOptimisation = selectionOptimisation,
... ...
@@ -239,7 +234,7 @@ setMethod("crossValidate", "DataFrame",
239 234
 
240 235
                   result <- sapply(assayCombinations, function(assayIndex){
241 236
                       CV(measurements = measurements[, mcols(measurements)[["assay"]] %in% assayIndex],
242
-                         outcomes = outcomes, assayIDs = assayIndex,
237
+                         outcome = outcome, assayIDs = assayIndex,
243 238
                          nFeatures = nFeatures[assayIndex],
244 239
                          selectionMethod = selectionMethod[assayIndex],
245 240
                          selectionOptimisation = selectionOptimisation,
... ...
@@ -272,7 +267,7 @@ setMethod("crossValidate", "DataFrame",
272 267
 
273 268
                   result <- sapply(assayCombinations, function(assayIndex){
274 269
                       CV(measurements = measurements[, mcols(measurements)$assay %in% assayIndex],
275
-                         outcomes = outcomes, assayIDs = assayIndex,
270
+                         outcome = outcome, assayIDs = assayIndex,
276 271
                          nFeatures = nFeatures[assayIndex],
277 272
                          selectionMethod = selectionMethod[assayIndex],
278 273
                          selectionOptimisation = selectionOptimisation,
... ...
@@ -296,7 +291,7 @@ setMethod("crossValidate", "DataFrame",
296 291
 # One or more omics data sets, possibly with clinical data.
297 292
 setMethod("crossValidate", "MultiAssayExperiment",
298 293
           function(measurements,
299
-                   outcomes, 
294
+                   outcome, 
300 295
                    nFeatures = 20,
301 296
                    selectionMethod = "t-test",
302 297
                    selectionOptimisation = "Resubstitution",
... ...
@@ -316,12 +311,12 @@ setMethod("crossValidate", "MultiAssayExperiment",
316 311
                       stop("Data set contains replicates. Please provide remove or average replicate observations and try again.")
317 312
               }
318 313
               
319
-              tablesAndoutcomes <- .MAEtoWideTable(measurements, targets, outcomes, restrict = NULL)
320
-              measurements <- tablesAndoutcomes[["dataTable"]]
321
-              outcomes <- tablesAndoutcomes[["outcomes"]]
314
+              tablesAndoutcome <- .MAEtoWideTable(measurements, targets, outcome, restrict = NULL)
315
+              measurements <- tablesAndoutcome[["dataTable"]]
316
+              outcome <- tablesAndoutcome[["outcome"]]
322 317
 
323 318
               crossValidate(measurements = measurements,
324
-                            outcomes = outcomes, 
319
+                            outcome = outcome, 
325 320
                             nFeatures = nFeatures,
326 321
                             selectionMethod = selectionMethod,
327 322
                             selectionOptimisation = selectionOptimisation,
... ...
@@ -338,8 +333,7 @@ setMethod("crossValidate", "MultiAssayExperiment",
338 333
 #' @export
339 334
 setMethod("crossValidate", "data.frame", # data.frame of numeric measurements.
340 335
           function(measurements,
341
-                   outcomes, 
342
-                   assayName = NULL,
336
+                   outcome, 
343 337
                    nFeatures = 20,
344 338
                    selectionMethod = "t-test",
345 339
                    selectionOptimisation = "Resubstitution",
... ...
@@ -353,8 +347,7 @@ setMethod("crossValidate", "data.frame", # data.frame of numeric measurements.
353 347
           {
354 348
               measurements <- DataFrame(measurements)
355 349
               crossValidate(measurements = measurements,
356
-                            outcomes = outcomes,
357
-                            assayName = assayName,
350
+                            outcome = outcome,
358 351
                             nFeatures = nFeatures,
359 352
                             selectionMethod = selectionMethod,
360 353
                             selectionOptimisation = selectionOptimisation,
... ...
@@ -371,8 +364,7 @@ setMethod("crossValidate", "data.frame", # data.frame of numeric measurements.
371 364
 #' @export
372 365
 setMethod("crossValidate", "matrix", # Matrix of numeric measurements.
373 366
           function(measurements,
374
-                   outcomes,
375
-                   assayName = NULL,
367
+                   outcome,
376 368
                    nFeatures = 20,
377 369
                    selectionMethod = "t-test",
378 370
                    selectionOptimisation = "Resubstitution",
... ...
@@ -386,8 +378,7 @@ setMethod("crossValidate", "matrix", # Matrix of numeric measurements.
386 378
           {
387 379
               measurements <- S4Vectors::DataFrame(measurements, check.names = FALSE)
388 380
               crossValidate(measurements = measurements,
389
-                            outcomes = outcomes,
390
-                            assayName = assayName,
381
+                            outcome = outcome,
391 382
                             nFeatures = nFeatures,
392 383
                             selectionMethod = selectionMethod,
393 384
                             selectionOptimisation = selectionOptimisation,
... ...
@@ -407,7 +398,7 @@ setMethod("crossValidate", "matrix", # Matrix of numeric measurements.
407 398
 #' @export
408 399
 setMethod("crossValidate", "list",
409 400
           function(measurements,
410
-                   outcomes, 
401
+                   outcome, 
411 402
                    nFeatures = 20,
412 403
                    selectionMethod = "t-test",
413 404
                    selectionOptimisation = "Resubstitution",
... ...
@@ -439,9 +430,9 @@ setMethod("crossValidate", "list",
439 430
                   stop("All datasets must have the same number of samples")
440 431
               }
441 432
               
442
-              # Check the number of classes is the same
443
-              if ((measurements[[1]] |> dim())[1] != length(classes)) {
444
-                  stop("Classes must have same number of samples as measurements")
433
+              # Check the number of outcome is the same
434
+              if ((measurements[[1]] |> dim())[1] != length(outcome)) {
435
+                  stop("outcome must have same number of samples as measurements")
445 436
               }
446 437
               
447 438
               df_list <- sapply(measurements, t, simplify = FALSE)
... ...
@@ -458,7 +449,7 @@ setMethod("crossValidate", "list",
458 449
               colnames(combined_df) <- mcols(combined_df)$feature
459 450
               
460 451
               crossValidate(measurements = combined_df,
461
-                            outcomes = outcomes, 
452
+                            outcome = outcome, 
462 453
                             nFeatures = nFeatures,
463 454
                             selectionMethod = selectionMethod,
464 455
                             selectionOptimisation = selectionOptimisation,
... ...
@@ -563,7 +554,7 @@ generateCrossValParams <- function(nRepeats, nFolds, nCores, selectionOptimisati
563 554
 
564 555
 ######################################
565 556
 ######################################
566
-checkData <- function(measurements, outcomes){
557
+checkData <- function(measurements, outcome){
567 558
     if(is.null(rownames(measurements)))
568 559
         stop("'measurements' DataFrame must have sample identifiers as its row names.")
569 560
     if(any(is.na(measurements)))
... ...
@@ -702,8 +693,8 @@ generateModellingParams <- function(assayIDs,
702 693
 
703 694
     #
704 695
     # if(multiViewMethod == "prevalidation"){
705
-    #     params$trainParams <- function(measurements, outcomes) prevalTrainInterface(measurements, outcomes, params)
706
-    #     params$trainParams <- function(measurements, outcomes) prevalTrainInterface(measurements, outcomes, params)
696
+    #     params$trainParams <- function(measurements, outcome) prevalTrainInterface(measurements, outcome, params)
697
+    #     params$trainParams <- function(measurements, outcome) prevalTrainInterface(measurements, outcome, params)
707 698
     # }
708 699
     #
709 700
 
... ...
@@ -849,7 +840,7 @@ generateMultiviewParams <- function(assayIDs,
849 840
 
850 841
 
851 842
 CV <- function(measurements,
852
-               outcomes,
843
+               outcome,
853 844
                assayIDs,
854 845
                nFeatures = NULL,
855 846
                selectionMethod = "t-test",
... ...
@@ -864,7 +855,7 @@ CV <- function(measurements,
864 855
 
865 856
 {
866 857
     # Check that data is in the right format
867
-    checkData(measurements, outcomes)
858
+    checkData(measurements, outcome)
868 859
     
869 860
     # Check that other variables are in the right format and fix
870 861
     nFeatures <- cleanNFeatures(nFeatures = nFeatures,
... ...
@@ -893,12 +884,12 @@ CV <- function(measurements,
893 884
                                                classifier = classifier,
894 885
                                                multiViewMethod = multiViewMethod
895 886
     )
896
-    if(assayIDs != 1) assayText <- assayIDs else if(!is.null(attr(measurements, "assayName"))) assayText <- attr(measurements, "assayName") else assayText <- NULL
887
+    if(length(assayIDs) > 1 || length(assayIDs) == 1 && assayIDs != 1) assayText <- assayIDs else assayText <- NULL
897 888
     characteristics <- S4Vectors::DataFrame(characteristic = c(if(!is.null(assayText)) "Assay Name" else NULL, "Classifier Name", "Selection Name", "multiViewMethod", "characteristicsLabel"), value = c(if(!is.null(assayText)) paste(assayText, collapse = ", ") else NULL, paste(classifier, collapse = ", "),  paste(selectionMethod, collapse = ", "), multiViewMethod, characteristicsLabel))
898 889
 
899
-    classifyResults <- runTests(measurements, outcomes, crossValParams = crossValParams, modellingParams = modellingParams, characteristics = characteristics)
890
+    classifyResults <- runTests(measurements, outcome, crossValParams = crossValParams, modellingParams = modellingParams, characteristics = characteristics)
900 891
     
901
-    fullResult <- runTest(measurements, outcomes, measurements, outcomes, crossValParams = crossValParams, modellingParams = modellingParams, characteristics = characteristics, .iteration = 1)
892
+    fullResult <- runTest(measurements, outcome, measurements, outcome, crossValParams = crossValParams, modellingParams = modellingParams, characteristics = characteristics, .iteration = 1)
902 893
 
903 894
     classifyResults@finalModel <- list(fullResult$models)
904 895
     classifyResults
... ...
@@ -922,8 +913,4 @@ setMethod("predict", "ClassifyResult",
922 913
           function(object, newData)
923 914
           {
924 915
               object@modellingParams@predictParams@predictor(object@finalModel[[1]], newData)
925
-          })
926
-
927
-
928
-
929
-
916
+          })
930 917
\ No newline at end of file
... ...
@@ -86,7 +86,7 @@ setMethod("distribution", "ClassifyResult",
86 86
   {
87 87
     errors <- by(allPredictions, allPredictions[, "sample"], function(samplePredicitons)
88 88
               {
89
-                sampleClass <- rep(actualOutcomes(result)[samplePredicitons[1, 1]], nrow(samplePredicitons))
89
+                sampleClass <- rep(actualOutcome(result)[samplePredicitons[1, 1]], nrow(samplePredicitons))
90 90
                 confusion <- table(samplePredicitons[, 2], sampleClass)
91 91
                 (confusion[upper.tri(confusion)] + confusion[lower.tri(confusion)]) /
92 92
                 (sum(diag(confusion)) + confusion[upper.tri(confusion)] + confusion[lower.tri(confusion)])
... ...
@@ -21,7 +21,7 @@
21 21
 #' name of the data table to be used.
22 22
 #' @param classesColumn If \code{measurementsTrain} is a \code{MultiAssayExperiment}, the
23 23
 #' names of the class column in the table extracted by \code{colData(multiAssayExperiment)}
24
-#' that contains the samples' outcomes to use for prediction.
24
+#' that contains each sample's outcome to use for prediction.
25 25
 #' @param ... Variables not used by the \code{matrix} nor the
26 26
 #' \code{MultiAssayExperiment} method which are passed into and used by the
27 27
 #' \code{DataFrame} method.
... ...
@@ -82,8 +82,8 @@ setMethod("classifyInterface", "DataFrame", function(countsTrain, classesTrain,
82 82
   returnType <- match.arg(returnType)
83 83
   
84 84
   # Ensure that any non-integer variables are removed from the training and testing matrices.
85
-  splitDataset <- .splitDataAndOutcomes(countsTrain, classesTrain, restrict = "integer")
86
-  classesTrain <- splitDataset[["outcomes"]]
85
+  splitDataset <- .splitDataAndOutcome(countsTrain, classesTrain, restrict = "integer")
86
+  classesTrain <- splitDataset[["outcome"]]
87 87
   trainingMatrix <- as.matrix(splitDataset[["measurements"]])
88 88
   isInteger <- sapply(countsTest, is.integer)
89 89
   testingMatrix <- as.matrix(countsTest[, isInteger, drop = FALSE])
... ...
@@ -106,9 +106,9 @@ setMethod("classifyInterface", "DataFrame", function(countsTrain, classesTrain,
106 106
 setMethod("classifyInterface", "MultiAssayExperiment",
107 107
 function(countsTrain, countsTest, targets = names(countsTrain), classesTrain, ...)
108 108
 {
109
-  tablesAndOutcomes <- .MAEtoWideTable(countsTrain, targets, classesTrain, "integer")
110
-  trainingMatrix <- tablesAndOutcomes[["dataTable"]]
111
-  classesTrain <- tablesAndOutcomes[["outcomes"]]
109
+  tablesAndOutcome <- .MAEtoWideTable(countsTrain, targets, classesTrain, "integer")
110
+  trainingMatrix <- tablesAndOutcome[["dataTable"]]
111
+  classesTrain <- tablesAndOutcome[["outcome"]]
112 112
   testingMatrix <- .MAEtoWideTable(countsTest, targets, "integer")
113 113
             
114 114
   .checkVariablesAndSame(trainingMatrix, testingMatrix)
... ...
@@ -89,8 +89,8 @@ setMethod("coxphTrainInterface", "DataFrame", function(measurementsTrain, surviv
89 89
     message("Fitting coxph classifier to training data and making predictions on test
90 90
             data.")
91 91
 
92
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, survivalTrain)  
93
-  survivalTrain <- splitDataset[["outcomes"]]
92
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, survivalTrain)  
93
+  survivalTrain <- splitDataset[["outcome"]]
94 94
   measurementsTrain <- splitDataset[["measurements"]]
95 95
   
96 96
   survival::coxph(survivalTrain ~ ., measurementsTrain)
... ...
@@ -102,7 +102,7 @@ setMethod("coxphTrainInterface", "MultiAssayExperiment", function(measurementsTr
102 102
 {
103 103
   tablesAndSurvival <- .MAEtoWideTable(measurementsTrain, targets, survivalTrain, restrict = NULL)
104 104
   measurementsTrain <- tablesAndSurvival[["dataTable"]]
105
-  survivalTrain <- tablesAndSurvival[["outcomes"]]
105
+  survivalTrain <- tablesAndSurvival[["outcome"]]
106 106
   
107 107
   coxphTrainInterface(measurementsTrain, survivalTrain, ...)
108 108
 })
... ...
@@ -101,12 +101,12 @@ setMethod("coxnetTrainInterface", "DataFrame", function(measurementsTrain, survi
101 101
   if(verbose == 3)
102 102
     message("Fitting coxnet model to data.")
103 103
   
104
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, survivalTrain)
104
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, survivalTrain)
105 105
   measurementsTrain <- data.frame(splitDataset[["measurements"]], check.names = FALSE)
106 106
   measurementsMatrix <- glmnet::makeX(as(measurementsTrain, "data.frame"))
107 107
   
108 108
   # The response variable is a Surv class of object.
109
-  fit <- glmnet::cv.glmnet(measurementsMatrix, splitDataset[["outcomes"]], family = "cox", type = "C", ...)
109
+  fit <- glmnet::cv.glmnet(measurementsMatrix, splitDataset[["outcome"]], family = "cox", type = "C", ...)
110 110
   fitted <- fit$glmnet.fit
111 111
   
112 112
   offset <- -mean(predict(fitted, measurementsMatrix, s = fit$lambda.min, type = "link"))
... ...
@@ -123,7 +123,7 @@ setMethod("coxnetTrainInterface", "MultiAssayExperiment",
123 123
           {
124 124
             tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, survivalTrain)
125 125
             measurementsTrain <- tablesAndClasses[["dataTable"]]
126
-            survivalTrain <- tablesAndClasses[["outcomes"]]
126
+            survivalTrain <- tablesAndClasses[["outcome"]]
127 127
             
128 128
             if(ncol(measurementsTrain) == 0)
129 129
               stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -159,7 +159,7 @@ setMethod("coxnetPredictInterface", c("coxnet", "DataFrame"), function(model, me
159 159
 { # ... just consumes emitted tuning variables from .doTrain which are unused.
160 160
   if(!is.null(survivalTest))
161 161
   {
162
-    splitDataset <- .splitDataAndOutcomes(measurementsTest, survivalTest)  # Remove any classes, if present.
162
+    splitDataset <- .splitDataAndOutcome(measurementsTest, survivalTest)  # Remove any classes, if present.
163 163
     measurementsTest <- splitDataset[["measurements"]]
164 164
   }
165 165
   
... ...
@@ -82,9 +82,9 @@ setMethod("DLDAtrainInterface", "matrix", function(measurementsTrain, classesTra
82 82
 #' @export
83 83
 setMethod("DLDAtrainInterface", "DataFrame", function(measurementsTrain, classesTrain, verbose = 3)
84 84
 {
85
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
85
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
86 86
   trainingMatrix <- as.matrix(splitDataset[["measurements"]]) # DLDA demands matrix input type.
87
-  classesTrain <- splitDataset[["outcomes"]]
87
+  classesTrain <- splitDataset[["outcome"]]
88 88
   
89 89
   #if(!requireNamespace("sparsediscrim", quietly = TRUE))
90 90
   #stop("The package 'sparsediscrim' could not be found. Please install it.")
... ...
@@ -101,7 +101,7 @@ setMethod("DLDAtrainInterface", "MultiAssayExperiment", function(measurementsTra
101 101
 {
102 102
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
103 103
   measurementsTrain <- tablesAndClasses[["dataTable"]]
104
-  classesTrain <- tablesAndClasses[["outcomes"]]
104
+  classesTrain <- tablesAndClasses[["outcome"]]
105 105
   
106 106
   if(ncol(measurementsTrain) == 0)
107 107
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -117,18 +117,18 @@ setMethod("elasticNetGLMtrainInterface", "DataFrame", function(measurementsTrain
117 117
   if(verbose == 3)
118 118
     message("Fitting elastic net regularised GLM classifier to data.")
119 119
   
120
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain, restrict = NULL)
120
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain, restrict = NULL)
121 121
   measurementsTrain <- data.frame(splitDataset[["measurements"]], check.names = FALSE)
122 122
   measurementsMatrix <- glmnet::makeX(as(measurementsTrain, "data.frame"))
123 123
 
124
-  fitted <- glmnet::glmnet(measurementsMatrix, splitDataset[["outcomes"]], family = "multinomial", ...)
124
+  fitted <- glmnet::glmnet(measurementsMatrix, splitDataset[["outcome"]], family = "multinomial", ...)
125 125
 
126 126
   if(is.null(lambda)) # fitted has numerous models for automatically chosen lambda values.
127 127
   { # Pick one lambda based on resubstitution performance.
128 128
     bestLambda <- fitted[["lambda"]][which.min(sapply(fitted[["lambda"]], function(lambda) # Largest Lambda with minimum balanced error rate.
129 129
     {
130 130
       classPredictions <- factor(as.character(predict(fitted, measurementsMatrix, s = lambda, type = "class")), levels = fitted[["classnames"]])
131
-      calcExternalPerformance(splitDataset[["outcomes"]], classPredictions, "Balanced Error")
131
+      calcExternalPerformance(splitDataset[["outcome"]], classPredictions, "Balanced Error")
132 132
     }))[1]]
133 133
     attr(fitted, "tune") <- list(lambda = bestLambda)
134 134
   }
... ...
@@ -141,9 +141,9 @@ setMethod("elasticNetGLMtrainInterface", "DataFrame", function(measurementsTrain
141 141
 setMethod("elasticNetGLMtrainInterface", "MultiAssayExperiment",
142 142
 function(measurementsTrain, targets = names(measurementsTrain), classesTrain, ...)
143 143
 {
144
-  tablesAndOutcomes <- .MAEtoWideTable(measurementsTrain, targets, classesTrain, restrict = NULL)
145
-  measurementsTrain <- tablesAndOutcomes[["dataTable"]]
146
-  classesTrain <- tablesAndOutcomes[["outcomes"]]
144
+  tablesAndOutcome <- .MAEtoWideTable(measurementsTrain, targets, classesTrain, restrict = NULL)
145
+  measurementsTrain <- tablesAndOutcome[["dataTable"]]
146
+  classesTrain <- tablesAndOutcome[["outcome"]]
147 147
   
148 148
   if(ncol(measurementsTrain) == 0)
149 149
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -74,9 +74,9 @@ setMethod("fisherDiscriminant", "matrix", function(measurementsTrain, classesTra
74 74
 setMethod("fisherDiscriminant", "DataFrame", # Sample information data or one of the other inputs, transformed.
75 75
           function(measurementsTrain, classesTrain, measurementsTest, returnType = c("both", "class", "score"), verbose = 3)
76 76
 {
77
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
77
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
78 78
   trainingMatrix <- as.matrix(splitDataset[["measurements"]])
79
-  classesTrain <- splitDataset[["outcomes"]]
79
+  classesTrain <- splitDataset[["outcome"]]
80 80
   isNumeric <- sapply(measurementsTest, is.numeric)
81 81
   testingMatrix <- as.matrix(measurementsTest[, isNumeric, drop = FALSE])
82 82
             
... ...
@@ -117,7 +117,7 @@ setMethod("fisherDiscriminant", "MultiAssayExperiment", function(measurementsTra
117 117
 {
118 118
   tablesAndClasses <- .MAEtoWideTable(measurements, targets, classesTrain)
119 119
   trainingMatrix <- tablesAndClasses[["dataTable"]]
120
-  classesTrain <- tablesAndClasses[["outcomes"]]
120
+  classesTrain <- tablesAndClasses[["outcome"]]
121 121
   testingMatrix <- .MAEtoWideTable(measurementsTest, targets)
122 122
   
123 123
   .checkVariablesAndSame(trainingMatrix, testingMatrix)
... ...
@@ -115,9 +115,9 @@ setMethod("GLMtrainInterface", "DataFrame", function(measurementsTrain, classesT
115 115
 setMethod("GLMtrainInterface", "MultiAssayExperiment",
116 116
 function(measurementsTrain, targets = names(measurementsTrain), classesTrain, ...)
117 117
 {
118
-  tablesAndOutcomes <- .MAEtoWideTable(measurementsTrain, targets, classesTrain, restrict = NULL)
119
-  measurementsTrain <- tablesAndOutcomes[["dataTable"]]
120
-  classesTrain <- tablesAndOutcomes[["outcomes"]]
118
+  tablesAndOutcome <- .MAEtoWideTable(measurementsTrain, targets, classesTrain, restrict = NULL)
119
+  measurementsTrain <- tablesAndOutcome[["dataTable"]]
120
+  classesTrain <- tablesAndOutcome[["outcome"]]
121 121
   
122 122
   if(ncol(measurementsTrain) == 0)
123 123
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -71,8 +71,8 @@ setMethod("kNNinterface", "matrix",
71 71
 #' @export
72 72
 setMethod("kNNinterface", "DataFrame", function(measurementsTrain, classesTrain, measurementsTest, ..., classifierName = "k Nearest Neighbours", verbose = 3)
73 73
 {
74
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
75
-  classesTrain <- splitDataset[["outcomes"]]
74
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
75
+  classesTrain <- splitDataset[["outcome"]]
76 76
   trainingMatrix <- as.matrix(splitDataset[["measurements"]])
77 77
   measurementsTest <- measurementsTest[, colnames(measurementsTrain), drop = FALSE]
78 78
   
... ...
@@ -91,7 +91,7 @@ function(measurementsTrain, measurementsTest, targets = names(measurementsTrain)
91 91
 {
92 92
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
93 93
   trainingTable <- tablesAndClasses[["dataTable"]]
94
-  classes <- tablesAndClasses[["outcomes"]]
94
+  classes <- tablesAndClasses[["outcome"]]
95 95
   testingTable <- .MAEtoWideTable(measurementsTest, targets)
96 96
             
97 97
   .checkVariablesAndSame(trainingTable, testingTable)
... ...
@@ -115,8 +115,8 @@ setMethod("kTSPclassifier", "DataFrame", # Sample information data or one of the
115 115
   if(!"Pairs" %in% class(featurePairs))
116 116
     stop("'featurePairs' must be of type Pairs.")            
117 117
         
118
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
119
-  classesTrain <- splitDataset[["outcomes"]]
118
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
119
+  classesTrain <- splitDataset[["outcome"]]
120 120
   trainingMatrix <- splitDataset[["measurements"]]
121 121
   isNumeric <- sapply(measurementsTest, is.numeric)
122 122
   testingMatrix <- as.matrix(measurementsTest[, isNumeric, drop = FALSE])
... ...
@@ -205,7 +205,7 @@ setMethod("kTSPclassifier", "MultiAssayExperiment",
205 205
             
206 206
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, target)
207 207
   trainingMatrix <- tablesAndClasses[["dataTable"]]
208
-  classes <- tablesAndClasses[["outcomes"]]
208
+  classes <- tablesAndClasses[["outcome"]]
209 209
   testingMatrix <- .MAEtoWideTable(measurementsTest, target)
210 210
             
211 211
   .checkVariablesAndSame(trainingMatrix, testingMatrix)
... ...
@@ -121,9 +121,9 @@ setMethod("mixModelsTrain", "matrix", function(measurementsTrain, ...) # Matrix
121 121
 #' @export
122 122
 setMethod("mixModelsTrain", "DataFrame", function(measurementsTrain, classesTrain, ..., verbose = 3) # Mixed data types.
123 123
 {
124
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
124
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
125 125
   measurementsTrain <- splitDataset[["measurements"]]
126
-  classesTrain <- splitDataset[["outcomes"]]
126
+  classesTrain <- splitDataset[["outcome"]]
127 127
 
128 128
   if(verbose == 3)
129 129
     message("Fitting mixtures of normals for features.")
... ...
@@ -162,7 +162,7 @@ setMethod("mixModelsTrain", "MultiAssayExperiment", function(measurementsTrain,
162 162
 {
163 163
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
164 164
   dataTable <- tablesAndClasses[["dataTable"]]
165
-  classesTrain <- tablesAndClasses[["outcomes"]]
165
+  classesTrain <- tablesAndClasses[["outcome"]]
166 166
   mixModelsTrain(dataTable, classesTrain, ...)
167 167
 })
168 168
 
... ...
@@ -71,9 +71,9 @@ setMethod("NSCtrainInterface", "matrix", function(measurementsTrain, classesTrai
71 71
 setMethod("NSCtrainInterface", "DataFrame", # Sample information data or one of the other inputs, transformed.
72 72
           function(measurementsTrain, classesTrain, ..., verbose = 3)
73 73
 {
74
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
74
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
75 75
   measurementsTrain <- splitDataset[["measurements"]]
76
-  classesTrain <- splitDataset[["outcomes"]]
76
+  classesTrain <- splitDataset[["outcome"]]
77 77
 
78 78
   if(!requireNamespace("pamr", quietly = TRUE))
79 79
     stop("The package 'pamr' could not be found. Please install it.")
... ...
@@ -94,7 +94,7 @@ setMethod("NSCtrainInterface", "MultiAssayExperiment",
94 94
 { 
95 95
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
96 96
   measurementsTrain <- tablesAndClasses[["dataTable"]]
97
-  classesTrain <- tablesAndClasses[["outcomes"]]
97
+  classesTrain <- tablesAndClasses[["outcome"]]
98 98
   
99 99
   if(ncol(measurementsTrain) == 0)
100 100
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -184,7 +184,7 @@ setMethod("NSCpredictInterface", c("pamrtrained", "DataFrame"), function(model,
184 184
   
185 185
   if(!is.null(classesColumnTest)) # Remove the column, since pamr uses positional matching of features.
186 186
   {
187
-    splitDataset <- .splitDataAndOutcomes(measurementsTest, classesColumnTest) 
187
+    splitDataset <- .splitDataAndOutcome(measurementsTest, classesColumnTest) 
188 188
     measurementsTest <- splitDataset[["measurements"]] # Without classes column.
189 189
   }
190 190
   
... ...
@@ -122,9 +122,9 @@ setMethod("naiveBayesKernel", "DataFrame", # Sample information data or one of t
122 122
                    weighting = c("height difference", "crossover distance"),
123 123
                    minDifference = 0, returnType = c("both", "class", "score"), verbose = 3)
124 124
 {
125
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
125
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
126 126
   trainingMatrix <- splitDataset[["measurements"]]
127
-  classesTrain <- splitDataset[["outcomes"]]
127
+  classesTrain <- splitDataset[["outcome"]]
128 128
   testingMatrix <- as.matrix(measurementsTest[, colnames(trainingMatrix), drop = FALSE])
129 129
   
130 130
   .checkVariablesAndSame(trainingMatrix, testingMatrix)
... ...
@@ -249,7 +249,7 @@ setMethod("naiveBayesKernel", "MultiAssayExperiment",
249 249
 {
250 250
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
251 251
   trainingMatrix <- tablesAndClasses[["dataTable"]]
252
-  classesTrain <- tablesAndClasses[["outcomes"]]
252
+  classesTrain <- tablesAndClasses[["outcome"]]
253 253
   testingMatrix <- .MAEtoWideTable(measurementsTest, targets)
254 254
             
255 255
   .checkVariablesAndSame(trainingMatrix, testingMatrix)
... ...
@@ -102,7 +102,7 @@ setMethod("randomForestTrainInterface", "matrix", # Matrix of numeric measuremen
102 102
 #' @rdname randomForest
103 103
 setMethod("randomForestTrainInterface", "DataFrame", function(measurementsTrain, classesTrain, ..., verbose = 3)
104 104
 {
105
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain, restrict = NULL)
105
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain, restrict = NULL)
106 106
 
107 107
   if(!requireNamespace("randomForest", quietly = TRUE))
108 108
     stop("The package 'randomForest' could not be found. Please install it.")
... ...
@@ -111,7 +111,7 @@ setMethod("randomForestTrainInterface", "DataFrame", function(measurementsTrain,
111 111
             data.")
112 112
 
113 113
   # Convert to base data.frame as randomForest doesn't understand DataFrame.
114
-  randomForest::randomForest(as(splitDataset[["measurements"]], "data.frame"), splitDataset[["outcomes"]], keep.forest = TRUE, ...)
114
+  randomForest::randomForest(as(splitDataset[["measurements"]], "data.frame"), splitDataset[["outcome"]], keep.forest = TRUE, ...)
115 115
 })
116 116
 
117 117
 #' @export
... ...
@@ -121,7 +121,7 @@ function(measurementsTrain, targets = names(measurementsTrain), classesTrain, ..
121 121
 {
122 122
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain, restrict = NULL)
123 123
   measurementsTrain <- tablesAndClasses[["dataTable"]]
124
-  classesTrain <- tablesAndClasses[["outcomes"]]
124
+  classesTrain <- tablesAndClasses[["outcome"]]
125 125
   
126 126
   randomForestTrainInterface(measurementsTrain, classesTrain, ...)
127 127
 })
... ...
@@ -77,8 +77,8 @@ setMethod("rfsrcTrainInterface", "DataFrame", function(measurementsTrain, surviv
77 77
     message("Fitting rfsrc classifier to training data and making predictions on test
78 78
             data.")
79 79
 
80
-  splitDataset <- ClassifyR:::.splitDataAndOutcomes(measurementsTrain, survivalTrain)  
81
-  survivalTrain <- splitDataset[["outcomes"]]
80
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, survivalTrain)  
81
+  survivalTrain <- splitDataset[["outcome"]]
82 82
   measurementsTrain <- splitDataset[["measurements"]]
83 83
   bindedMeasurements <- cbind(measurementsTrain, event = survivalTrain[,1], time = survivalTrain[,2])
84 84
   randomForestSRC::rfsrc(Surv(event = event, time = time) ~ ., as.data.frame(bindedMeasurements), ...)
... ...
@@ -90,7 +90,7 @@ setMethod("rfsrcTrainInterface", "MultiAssayExperiment", function(measurementsTr
90 90
 {
91 91
   tablesAndSurvival <- ClassifyR:::.MAEtoWideTable(measurementsTrain, targets, survivalTrain, restrict = NULL)
92 92
   measurementsTrain <- tablesAndSurvival[["dataTable"]]
93
-  survivalTrain <- tablesAndSurvival[["outcomes"]]
93
+  survivalTrain <- tablesAndSurvival[["outcome"]]
94 94
   
95 95
   rfsrcTrainInterface(measurementsTrain, survivalTrain, ...)
96 96
 })
... ...
@@ -123,9 +123,9 @@ setMethod("rfsrcPredictInterface", c("rfsrc", "matrix"), # Matrix of numeric mea
123 123
 setMethod("rfsrcPredictInterface", c("rfsrc", "DataFrame"),
124 124
 function(model, measurementsTest, ..., verbose = 3)
125 125
 {
126
-  predictedOutcomes = predict(model, as.data.frame(measurementsTest), ...)$predicted
127
-  names(predictedOutcomes) = rownames(measurementsTest)
128
-  predictedOutcomes
126
+  predictedOutcome = predict(model, as.data.frame(measurementsTest), ...)$predicted
127
+  names(predictedOutcome) = rownames(measurementsTest)
128
+  predictedOutcome
129 129
 })
130 130
 
131 131
 # One or more omics data sets, possibly with clinical data.
... ...
@@ -89,7 +89,7 @@ setMethod("SVMtrainInterface", "DataFrame", function(measurementsTrain, classesT
89 89
   if(!requireNamespace("e1071", quietly = TRUE))
90 90
     stop("The package 'e1071' could not be found. Please install it.")
91 91
     
92
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
92
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
93 93
   # Classifier requires matrix input data type.
94 94
   trainingMatrix <- as.matrix(splitDataset[["measurements"]])
95 95
   
... ...
@@ -110,7 +110,7 @@ function(measurementsTrain, targets = names(measurementsTrain), classesTrain, ..
110 110
 {
111 111
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
112 112
   measurementsTrain <- tablesAndClasses[["dataTable"]]
113
-  classesTrain <- tablesAndClasses[["outcomes"]]
113
+  classesTrain <- tablesAndClasses[["outcome"]]
114 114
   
115 115
   if(ncol(measurementsTrain) == 0)
116 116
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -27,7 +27,7 @@
27 27
 #' thus each row unambiguously specifies a variable to be plotted.
28 28
 #' @param classesColumn If \code{measurementsTrain} is a \code{MultiAssayExperiment}, the
29 29
 #' names of the class column in the table extracted by \code{colData(multiAssayExperiment)}
30
-#' that contains the samples' outcomes to use for prediction.
30
+#' that contains each sample's outcome to use for prediction.
31 31
 #' @param groupBy If \code{measurements} is a \code{DataFrame}, then a
32 32
 #' character vector of length 1, which contains the name of a categorical
33 33
 #' feature, may be specified.  If \code{measurements} is a
... ...
@@ -175,9 +175,9 @@ setMethod("plotFeatureClasses", "DataFrame", function(measurements, classes, tar
175 175
                     facets = factor(paste(groupingName, "is", groupBy), levels = paste(groupingName, "is", levelsOrder)))
176 176
   }
177 177
   
178
-  splitDataset <- .splitDataAndOutcomes(measurements, classes, restrict = NULL)
178
+  splitDataset <- .splitDataAndOutcome(measurements, classes, restrict = NULL)
179 179
   measurements <- splitDataset[["measurements"]]
180
-  classes <- splitDataset[["outcomes"]]
180
+  classes <- splitDataset[["outcome"]]
181 181
   
182 182
   if(!requireNamespace("ggplot2", quietly = TRUE))
183 183
     stop("The package 'ggplot2' could not be found. Please install it.")
... ...
@@ -73,9 +73,9 @@ function(measurementsTrain, classesTrain, ...)
73 73
 setMethod("bartlettRanking", "DataFrame", # Sample information data or one of the other inputs, transformed.
74 74
           function(measurementsTrain, classesTrain, verbose = 3)
75 75
 {
76
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
76
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
77 77
   measurementsTrain <- splitDataset[["measurements"]]
78
-  classesTrain <- splitDataset[["outcomes"]]
78
+  classesTrain <- splitDataset[["outcome"]]
79 79
   
80 80
   if(verbose == 3)
81 81
     message("Ranking features based on Bartlett statistic.")
... ...
@@ -94,7 +94,7 @@ setMethod("bartlettRanking", "MultiAssayExperiment",
94 94
 {
95 95
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
96 96
   measurementsTrain <- tablesAndClasses[["dataTable"]]
97
-  classesTrain <- tablesAndClasses[["outcomes"]]
97
+  classesTrain <- tablesAndClasses[["outcome"]]
98 98
   
99 99
   if(ncol(measurementsTrain) == 0)
100 100
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -48,9 +48,9 @@ setMethod("coxphRanking", "matrix", function(measurementsTrain, survivalTrain, .
48 48
 #' @export
49 49
 setMethod("coxphRanking", "DataFrame", function(measurementsTrain, survivalTrain, verbose = 3) # Clinical data or one of the other inputs, transformed.
50 50
 {
51
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, survivalTrain)
51
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, survivalTrain)
52 52
   measurementsTrain <- splitDataset[["measurements"]]
53
-  survivalTrain <- splitDataset[["outcomes"]]
53
+  survivalTrain <- splitDataset[["outcome"]]
54 54
 
55 55
   pValues <- apply(measurementsTrain, 2, function(featureColumn){
56 56
     fit <- survival::coxph(survivalTrain ~ featureColumn)
... ...
@@ -68,7 +68,7 @@ setMethod("coxphRanking", "MultiAssayExperiment", function(measurementsTrain, ta
68 68
 {
69 69
   tablesAndSurvival <- .MAEtoWideTable(measurementsTrain, targets, survivalTrain)
70 70
   measurementsTrain <- tablesAndSurvival[["dataTable"]]
71
-  survivalTrain <- tablesAndSurvival[["outcomes"]]
71
+  survivalTrain <- tablesAndSurvival[["outcome"]]
72 72
   
73 73
   if(ncol(measurementsTrain) == 0)
74 74
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -79,7 +79,7 @@ setMethod("DMDranking", "DataFrame", # sampleInfo data or one of the other input
79 79
           function(measurementsTrain, classesTrain, differences = c("both", "location", "scale"),
80 80
                    ..., verbose = 3)
81 81
 {
82
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
82
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
83 83
   measurementsTrain <- splitDataset[["measurements"]]
84 84
 
85 85
   if(verbose == 3)
... ...
@@ -113,6 +113,6 @@ setMethod("DMDranking", "MultiAssayExperiment",
113 113
 {
114 114
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
115 115
   measurementsTrain <- tablesAndClasses[["dataTable"]]
116
-  classesTrain <- tablesAndClasses[["outcomes"]]            
116
+  classesTrain <- tablesAndClasses[["outcome"]]            
117 117
   DMDranking(measurementsTrain, classesTrain, ...)
118 118
 })
119 119
\ No newline at end of file
... ...
@@ -70,8 +70,8 @@ setMethod("differentMeansRanking", "DataFrame",
70 70
   if(!requireNamespace("genefilter", quietly = TRUE))
71 71
     stop("The package 'genefilter' could not be found. Please install it.")
72 72
 
73
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
74
-  classesTrain <- splitDataset[["outcomes"]]
73
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
74
+  classesTrain <- splitDataset[["outcome"]]
75 75
   # Data is required to be in traditional bioinformatics format - features in rows
76 76
   # and samples in columns and also must be a matrix, not another kind of rectangular data.  
77 77
   measurementsMatrix <- t(as.matrix(splitDataset[["measurements"]]))
... ...
@@ -103,6 +103,6 @@ setMethod("differentMeansRanking", "MultiAssayExperiment",
103 103
             
104 104
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
105 105
   measurementsTrain <- tablesAndClasses[["dataTable"]]
106
-  classesTrain <- tablesAndClasses[["outcomes"]]
106
+  classesTrain <- tablesAndClasses[["outcome"]]
107 107
   differentMeansRanking(measurementsTrain, classesTrain, ...)
108 108
 })
109 109
\ No newline at end of file
... ...
@@ -126,6 +126,6 @@ setMethod("edgeRranking", "MultiAssayExperiment", function(countsTrain, targets
126 126
 
127 127
   tablesAndClasses <- .MAEtoWideTable(countsTrain, targets, "integer")
128 128
   countsTable <- tablesAndClasses[["dataTable"]]
129
-  classesTrain <- tablesAndClasses[["outcomes"]]
129
+  classesTrain <- tablesAndClasses[["outcome"]]
130 130
   edgeRranking(countsTable, classesTrain, ...)
131 131
 })
132 132
\ No newline at end of file
... ...
@@ -63,7 +63,7 @@ setMethod("KolmogorovSmirnovRanking", "matrix", function(measurementsTrain, clas
63 63
 setMethod("KolmogorovSmirnovRanking", "DataFrame", # Sample information data or one of the other inputs, transformed.
64 64
           function(measurementsTrain, classesTrain, ..., verbose = 3)
65 65
 {
66
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
66
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
67 67
   measurementsTrain <- splitDataset[["measurements"]]
68 68
   
69 69
   if(verbose == 3)
... ...
@@ -85,7 +85,7 @@ function(measurementsTrain, targets = names(measurementsTrain), classesTrain, ..
85 85
 {
86 86
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
87 87
   measurementsTrain <- tablesAndClasses[["dataTable"]]
88
-  classesTrain <- tablesAndClasses[["outcomes"]]
88
+  classesTrain <- tablesAndClasses[["outcome"]]
89 89
             
90 90
   if(ncol(dataTable) == 0)
91 91
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -71,7 +71,7 @@ setMethod("KullbackLeiblerRanking", "matrix", function(measurementsTrain, classe
71 71
 setMethod("KullbackLeiblerRanking", "DataFrame", # Sample information data or one of the other inputs, transformed.
72 72
           function(measurementsTrain, classesTrain, ..., verbose = 3)
73 73
 {
74
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
74
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
75 75
   measurementsTrain <- splitDataset[["measurements"]]
76 76
   
77 77
   if(verbose == 3)
... ...
@@ -98,7 +98,7 @@ setMethod("KullbackLeiblerRanking", "MultiAssayExperiment",
98 98
 {
99 99
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
100 100
   measurementsTrain <- tablesAndClasses[["dataTable"]]
101
-  classesTrain <- tablesAndClasses[["outcomes"]]
101
+  classesTrain <- tablesAndClasses[["outcome"]]
102 102
 
103 103
   if(ncol(dataTable) == 0)
104 104
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -64,7 +64,7 @@ setMethod("leveneRanking", "matrix", function(measurementsTrain, classesTrain, .
64 64
 setMethod("leveneRanking", "DataFrame", # Sample information data or one of the other inputs, transformed.
65 65
           function(measurementsTrain, classesTrain, verbose = 3)
66 66
 {
67
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
67
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
68 68
   measurementsTrain <- splitDataset[["measurements"]]
69 69
   
70 70
   if(!requireNamespace("car", quietly = TRUE))
... ...
@@ -86,7 +86,7 @@ setMethod("leveneRanking", "MultiAssayExperiment",
86 86
 {
87 87
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
88 88
   measurementsTrain <- tablesAndClasses[["dataTable"]]
89
-  classesTrain <- tablesAndClasses[["outcomes"]]
89
+  classesTrain <- tablesAndClasses[["outcome"]]
90 90
   
91 91
   leveneRanking(measurementsTrain, classesTrain, ...)
92 92
 })
93 93
\ No newline at end of file
... ...
@@ -76,7 +76,7 @@ setMethod("likelihoodRatioRanking", "DataFrame", # Sample information data or on
76 76
           function(measurementsTrain, classesTrain, alternative = c(location = "different", scale = "different"),
77 77
                    ..., verbose = 3)
78 78
 {
79
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
79
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
80 80
   measurementsTrain <- splitDataset[["measurements"]]
81 81
 
82 82
   if(verbose == 3)
... ...
@@ -109,7 +109,7 @@ setMethod("likelihoodRatioRanking", "MultiAssayExperiment",
109 109
 {
110 110
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
111 111
   measurementsTrain <- tablesAndClasses[["dataTable"]]
112
-  classesTrain <- tablesAndClasses[["outcomes"]]
112
+  classesTrain <- tablesAndClasses[["outcome"]]
113 113
 
114 114
   if(ncol(measurementsTrain) == 0)
115 115
     stop("No variables in data tables specified by \'targets\' are numeric.")
... ...
@@ -92,6 +92,6 @@ setMethod("limmaRanking", "MultiAssayExperiment",
92 92
             
93 93
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, targets, classesTrain)
94 94
   measurementsTrain <- tablesAndClasses[["dataTable"]]
95
-  classesTrain <- tablesAndClasses[["outcomes"]]
95
+  classesTrain <- tablesAndClasses[["outcome"]]
96 96
   limmaRanking(measurementsTrain, classesTrain, ...)
97 97
 })
98 98
\ No newline at end of file
... ...
@@ -86,7 +86,7 @@ setMethod("pairsDifferencesRanking", "DataFrame",
86 86
   if(!"Pairs" %in% class(featurePairs))
87 87
     stop("'featurePairs' must be of type Pairs.")
88 88
             
89
-  splitDataset <- .splitDataAndOutcomes(measurementsTrain, classesTrain)
89
+  splitDataset <- .splitDataAndOutcome(measurementsTrain, classesTrain)
90 90
   measurementsTrain <- splitDataset[["measurements"]]
91 91
   
92 92
   suppliedPairs <- length(featurePairs)
... ...
@@ -130,6 +130,6 @@ setMethod("pairsDifferencesRanking", "MultiAssayExperiment",
130 130
             
131 131
   tablesAndClasses <- .MAEtoWideTable(measurementsTrain, target, classesTrain)
132 132
   measurementsTrain <- tablesAndClasses[["dataTable"]]
133
-  classesTrain <- tablesAndClasses[["outcomes"]]            
133
+  classesTrain <- tablesAndClasses[["outcome"]]            
134 134
   pairsDifferencesRanking(measurementsTrain, classesTrain, featurePairs, ...)
135 135
 })
136 136
\ No newline at end of file
... ...
@@ -14,12 +14,12 @@
14 14
 #' @param measurementsTrain Either a \code{\link{matrix}}, \code{\link{DataFrame}}
15 15
 #' or \code{\link{MultiAssayExperiment}} containing the training data. For a
16 16
 #' \code{matrix} or \code{\link{DataFrame}}, the rows are samples, and the columns are features.
17
-#' @param outcomesTrain Either a factor vector of classes, a \code{\link{Surv}} object, or
17
+#' @param outcomeTrain Either a factor vector of classes, a \code{\link{Surv}} object, or
18 18
 #' a character string, or vector of such strings, containing column name(s) of column(s)
19 19
 #' containing either classes or time and event information about survival.
20 20
 #' @param measurementsTest Same data type as \code{measurementsTrain}, but only the test
21 21
 #' samples.
22
-#' @param outcomesTest Same data type as \code{outcomesTrain}, but only the test
22
+#' @param outcomeTest Same data type as \code{outcomeTrain}, but only the test
23 23
 #' samples.
24 24
 #' @param crossValParams An object of class \code{\link{CrossValParams}},
25 25
 #' specifying the kind of cross-validation to be done, if nested
... ...
@@ -31,9 +31,9 @@
31 31
 #' names of the data tables to be used. \code{"sampleInfo"} is also a valid value
32 32
 #' and specifies that numeric variables from the sample information data table will be
33 33
 #' used.
34
-#' @param outcomesColumns If \code{measurementsTrain} is a \code{MultiAssayExperiment}, the
34
+#' @param outcomeColumns If \code{measurementsTrain} is a \code{MultiAssayExperiment}, the
35 35
 #' names of the column (class) or columns (survival) in the table extracted by \code{colData(data)}
36
-#' that contain(s) the samples' outcomes to use for prediction.
36
+#' that contain(s) the samples' outcome to use for prediction.
37 37
 #' @param ... Variables not used by the \code{matrix} nor the
38 38
 #' \code{MultiAssayExperiment} method which are passed into and used by the
39 39
 #' \code{DataFrame} method.
... ...
@@ -77,19 +77,19 @@ setGeneric("runTest", function(measurementsTrain, ...)
77 77
 #' @rdname runTest
78 78
 #' @export
79 79
 setMethod("runTest", "matrix", # Matrix of numeric measurements.
80
-  function(measurementsTrain, outcomesTrain, measurementsTest, outcomesTest, ...)
80
+  function(measurementsTrain, outcomeTrain, measurementsTest, outcomeTest, ...)
81 81
 {
82 82
   runTest(measurementsTrain = S4Vectors::DataFrame(measurementsTrain, check.names = FALSE),
83
-          outcomesTrain = outcomesTrain,
83
+          outcomeTrain = outcomeTrain,
84 84
           measurementsTest = S4Vectors::DataFrame(measurementsTest, check.names = FALSE),
85
-          outcomesTest = outcomesTest,
85
+          outcomeTest = outcomeTest,
86 86
           ...)
87 87
 })
88 88
 
89 89
 #' @rdname runTest
90 90
 #' @export
91 91
 setMethod("runTest", "DataFrame", # Sample information data or one of the other inputs, transformed.
92
-function(measurementsTrain, outcomesTrain, measurementsTest, outcomesTest,
92
+function(measurementsTrain, outcomeTrain, measurementsTest, outcomeTest,
93 93
          crossValParams = CrossValParams(), # crossValParams might be used for tuning optimisation.
94 94
          modellingParams = ModellingParams(), characteristics = S4Vectors::DataFrame(), verbose = 1, .iteration = NULL)
95 95
 {if(!is.null(.iteration) && .iteration != "internal")
... ...
@@ -100,14 +100,14 @@ function(measurementsTrain, outcomesTrain, measurementsTest, outcomesTest,
100 100
     if(any(is.na(measurementsTrain)))
101 101
       stop("Some data elements are missing and classifiers don't work with missing data. Consider imputation or filtering.")                
102 102
 
103
-    splitDatasetTrain <- .splitDataAndOutcomes(measurementsTrain, outcomesTrain)
103
+    splitDatasetTrain <- .splitDataAndOutcome(measurementsTrain, outcomeTrain)
104 104
     # Rebalance the class sizes of the training samples by either downsampling or upsampling
105 105
     # or leave untouched if balancing is none.
106
-    if(!is(outcomesTrain, "Surv"))
106
+    if(!is(outcomeTrain, "Surv"))
107 107
     {
108
-      rebalancedTrain <- .rebalanceTrainingClasses(splitDatasetTrain[["measurements"]], splitDatasetTrain[["outcomes"]], modellingParams@balancing)
108
+      rebalancedTrain <- .rebalanceTrainingClasses(splitDatasetTrain[["measurements"]], splitDatasetTrain[["outcome"]], modellingParams@balancing)
109 109
       measurementsTrain <- rebalancedTrain[["measurementsTrain"]]
110
-      outcomesTrain <- rebalancedTrain[["classesTrain"]]
110
+      outcomeTrain <- rebalancedTrain[["classesTrain"]]
111 111
     }
112 112
   }
113 113
     
... ...
@@ -140,12 +140,12 @@ input data. Autmomatically reducing to smaller number.")
140 140
     repeat{
141 141
       newSamples <- sample(nrow(measurementsTrain), replace = TRUE, prob = scoresPrevious)
142 142
       measurementsTrainResampled <- measurementsTrain[newSamples, ]
143
-      outcomesResampled <- outcomesTrain[newSamples]
144
-      ASpredictions <- runTest(measurementsTrainResampled, outcomesResampled,
145
-              measurementsTrain, outcomesTrain, crossValParams, modellingParams,
143
+      outcomeResampled <- outcomeTrain[newSamples]
144
+      ASpredictions <- runTest(measurementsTrainResampled, outcomeResampled,
145
+              measurementsTrain, outcomeTrain, crossValParams, modellingParams,
146 146
               .iteration = "internal")[["predictions"]]
147
-      if(is.factor(outcomesResampled))
148
-          scoresNew <- mapply(function(rowIndex, class) ASpredictions[rowIndex, class], 1:nrow(ASpredictions), as.character(outcomesTrain))
147
+      if(is.factor(outcomeResampled))
148
+          scoresNew <- mapply(function(rowIndex, class) ASpredictions[rowIndex, class], 1:nrow(ASpredictions), as.character(outcomeTrain))
149 149
       else
150 150
           scoresNew <- ASpredictions[, "risk"]
151 151
 
... ...
@@ -176,7 +176,7 @@ input data. Autmomatically reducing to smaller number.")
176 176
     if(length(modellingParams@selectParams@intermediate) != 0)
177 177
       modellingParams@selectParams <- .addIntermediates(modellingParams@selectParams)
178 178
  
179
-    topFeatures <- tryCatch(.doSelection(measurementsTrain, outcomesTrain, crossValParams, modellingParams, verbose),
179
+    topFeatures <- tryCatch(.doSelection(measurementsTrain, outcomeTrain, crossValParams, modellingParams, verbose),
180 180
                             error = function(error) error[["message"]]) 
181 181
     if(is.character(topFeatures)) return(topFeatures) # An error occurred.
182 182
     
... ...
@@ -196,7 +196,7 @@ input data. Autmomatically reducing to smaller number.")
196 196
     modellingParams@trainParams <- .addIntermediates(modellingParams@trainParams)
197 197
 
198 198
   # Some classifiers have one function for training and testing, so that's why test data is also passed in.
199
-  trained <- tryCatch(.doTrain(measurementsTrain, outcomesTrain, measurementsTest, outcomesTest, modellingParams, verbose),
199
+  trained <- tryCatch(.doTrain(measurementsTrain, outcomeTrain, measurementsTest, outcomeTest, modellingParams, verbose),
200 200
                       error = function(error) error[["message"]])
201 201
   if(is.character(trained)) return(trained) # An error occurred.
202 202
   
... ...
@@ -219,16 +219,16 @@ input data. Autmomatically reducing to smaller number.")
219 219
     if(length(modellingParams@predictParams@intermediate) != 0)
220 220
       modellingParams@predictParams <- .addIntermediates(modellingParams@predictParams)
221 221
     
222
-    predictedOutcomes <- tryCatch(.doTest(trained[["model"]], measurementsTest, modellingParams@predictParams, verbose),
222
+    predictedOutcome <- tryCatch(.doTest(trained[["model"]], measurementsTest, modellingParams@predictParams, verbose),
223 223
                                 error = function(error) error[["message"]]
224 224
                                 )
225 225
 
226
-    if(is.character(predictedOutcomes)) # An error occurred.
227
-      return(predictedOutcomes) # Return early.
226
+    if(is.character(predictedOutcome)) # An error occurred.
227
+      return(predictedOutcome) # Return early.
228 228
     
229 229
   } else { # One function that does training and testing, so predictions were made earlier
230 230
            # by .doTrain, rather than this .doTest stage.
231
-    predictedOutcomes <- trained[[1]]
231
+    predictedOutcome <- trained[[1]]
232 232
   }
233 233
   
234 234
   # Exclude one feature at a time, build model, predict test samples.
... ...
@@ -241,21 +241,21 @@ input data. Autmomatically reducing to smaller number.")
241 241
     {
242 242
       measurementsTrainLess1 <- measurementsTrain[, -selectedIndex, drop = FALSE]
243 243
       measurementsTestLess1 <- measurementsTest[, -selectedIndex, drop = FALSE]
244
-      modelWithoutOne <- tryCatch(.doTrain(measurementsTrainLess1, outcomesTrain, measurementsTestLess1, outcomesTest, modellingParams, verbose),
244
+      modelWithoutOne <- tryCatch(.doTrain(measurementsTrainLess1, outcomeTrain, measurementsTestLess1, outcomeTest, modellingParams, verbose),
245 245
                                   error = function(error) error[["message"]])
246 246
       if(!is.null(modellingParams@predictParams))
247
-      predictedOutcomesWithoutOne <- tryCatch(.doTest(modelWithoutOne[["model"]], measurementsTestLess1, modellingParams@predictParams, verbose),
247
+      predictedOutcomeWithoutOne <- tryCatch(.doTest(modelWithoutOne[["model"]], measurementsTestLess1, modellingParams@predictParams, verbose),
248 248
                                               error = function(error) error[["message"]])
249
-      else predictedOutcomesWithoutOne <- modelWithoutOne[["model"]]
249
+      else predictedOutcomeWithoutOne <- modelWithoutOne[["model"]]
250 250
 
251
-      if(!is.null(ncol(predictedOutcomesWithoutOne)))
252
-        predictedOutcomesWithoutOne <- predictedOutcomesWithoutOne[, na.omit(match(c("class", "risk"), colnames(predictedOutcomesWithoutOne)))]
253
-      calcExternalPerformance(outcomesTest, predictedOutcomesWithoutOne, performanceType)
251
+      if(!is.null(ncol(predictedOutcomeWithoutOne)))
252
+        predictedOutcomeWithoutOne <- predictedOutcomeWithoutOne[, na.omit(match(c("class", "risk"), colnames(predictedOutcomeWithoutOne)))]
253
+      calcExternalPerformance(outcomeTest, predictedOutcomeWithoutOne, performanceType)
254 254
     })
255 255
     
256
-    if(!is.null(ncol(predictedOutcomes)))
257
-        predictedOutcomes <- predictedOutcomes[, na.omit(match(c("class", "risk"), colnames(predictedOutcomes)))]
258
-    performanceChanges <- round(performancesWithoutEach - calcExternalPerformance(outcomesTest, predictedOutcomes, performanceType), 2)
256
+    if(!is.null(ncol(predictedOutcome)))
257
+        predictedOutcome <- predictedOutcome[, na.omit(match(c("class", "risk"), colnames(predictedOutcome)))]
258
+    performanceChanges <- round(performancesWithoutEach - calcExternalPerformance(outcomeTest, predictedOutcome, performanceType), 2)
259 259
      
260 260
     if(is.null(S4Vectors::mcols(measurementsTrain))) selectedFeatures <- featuresInfo[selectedFeaturesIndices, "Original Feature"] else selectedFeatures <- featuresInfo[selectedFeaturesIndices, c("Original Assay", "Original Feature")]
261 261
     importanceTable <- DataFrame(selectedFeatures, performanceChanges)
... ...
@@ -283,7 +283,7 @@ input data. Autmomatically reducing to smaller number.")
283 283
   
284 284
   if(!is.null(.iteration)) # This function was not called by the end user.
285 285
   {
286
-    list(ranked = rankedFeatures, selected = selectedFeatures, models = models, testSet = rownames(measurementsTest), predictions = predictedOutcomes, tune = tuneDetails, importance = importanceTable)
286
+    list(ranked = rankedFeatures, selected = selectedFeatures, models = models, testSet = rownames(measurementsTest), predictions = predictedOutcome, tune = tuneDetails, importance = importanceTable)
287 287
   } else { # runTest executed by the end user. Create a ClassifyResult object.
288 288
     # Only one training, so only one tuning choice, which can be summarised in characteristics.
289 289
     modParamsList <- list(modellingParams@transformParams, modellingParams@selectParams, modellingParams@trainParams, modellingParams@predictParams)
... ...
@@ -300,24 +300,24 @@ input data. Autmomatically reducing to smaller number.")
300 300
     characteristics <- rbind(characteristics, extrasDF)
301 301
     
302 302
     allSamples <- c(rownames(measurementsTrain), rownames(measurementsTest))
303
-    if(!is.null(ncol(outcomesTrain)))
303
+    if(!is.null(ncol(outcomeTrain)))
304 304
     {
305
-      allOutcomes <- rbind(outcomesTrain, outcomesTest)
306
-      rownames(allOutcomes) <- allSamples
305
+      allOutcome <- rbind(outcomeTrain, outcomeTest)
306
+      rownames(allOutcome) <- allSamples
307 307
     } else { 
308
-      allOutcomes <- c(outcomesTrain, outcomesTest)
309
-      names(allOutcomes) <- allSamples
308
+      allOutcome <- c(outcomeTrain, outcomeTest)
309
+      names(allOutcome) <- allSamples
310 310
     }
311 311
 
312 312
     ClassifyResult(characteristics, allSamples, featuresInfo, list(rankedFeatures), list(selectedFeatures),
313
-                   list(models), tuneDetails, DataFrame(sample = rownames(measurementsTest), predictedOutcomes, check.names = FALSE), allOutcomes, importanceTable)
313
+                   list(models), tuneDetails, DataFrame(sample = rownames(measurementsTest), predictedOutcome, check.names = FALSE), allOutcome, importanceTable)
314 314
   }  
315 315
 })
316 316
 
317 317
 #' @rdname runTest
318 318
 #' @export
319 319
 setMethod("runTest", c("MultiAssayExperiment"),
320
-          function(measurementsTrain, measurementsTest, targets = names(measurements), outcomesColumns, ...)
320
+          function(measurementsTrain, measurementsTest, targets = names(measurements), outcomeColumns, ...)
321 321
 {
322 322
   omicsTargets <- setdiff(targets, "sampleInfo")
323 323
   if(length(omicsTargets) > 0)
... ...
@@ -326,8 +326,8 @@ setMethod("runTest", c("MultiAssayExperiment"),
326 326
       stop("Data set contains replicates. Please provide remove or average replicate observations and try again.")
327 327
   }
328 328
   
329
-  tablesAndClassesTrain <- .MAEtoWideTable(measurementsTrain, targets, outcomesColumns, restrict = NULL)
330