... | ... |
@@ -3,8 +3,8 @@ Type: Package |
3 | 3 |
Title: A framework for cross-validated classification problems, with |
4 | 4 |
applications to differential variability and differential |
5 | 5 |
distribution testing |
6 |
-Version: 3.1.16 |
|
7 |
-Date: 2022-09-12 |
|
6 |
+Version: 3.1.17 |
|
7 |
+Date: 2022-09-15 |
|
8 | 8 |
Author: Dario Strbenac, Ellis Patrick, John Ormerod, Graham Mann, Jean Yang |
9 | 9 |
Maintainer: Dario Strbenac <dario.strbenac@sydney.edu.au> |
10 | 10 |
VignetteBuilder: knitr |
... | ... |
@@ -26,6 +26,7 @@ export(crossValidate) |
26 | 26 |
export(distribution) |
27 | 27 |
export(edgesToHubNetworks) |
28 | 28 |
export(featureSetSummary) |
29 |
+export(finalModel) |
|
29 | 30 |
export(generateCrossValParams) |
30 | 31 |
export(generateModellingParams) |
31 | 32 |
export(interactorDifferences) |
... | ... |
@@ -68,6 +69,7 @@ exportMethods(chosenFeatureNames) |
68 | 69 |
exportMethods(crossValidate) |
69 | 70 |
exportMethods(distribution) |
70 | 71 |
exportMethods(featureSetSummary) |
72 |
+exportMethods(finalModel) |
|
71 | 73 |
exportMethods(interactorDifferences) |
72 | 74 |
exportMethods(length) |
73 | 75 |
exportMethods(models) |
... | ... |
@@ -43,6 +43,8 @@ |
43 | 43 |
#' @param yLabel Label to be used for the y-axis of true positive rate. |
44 | 44 |
#' @param showAUC Logical. If \code{TRUE}, the AUC value of each result is |
45 | 45 |
#' added to its legend text. |
46 |
+#' @param ... Parameters not used by the \code{ClassifyResult} method but passed to |
|
47 |
+#' the \code{list} method. |
|
46 | 48 |
#' @return An object of class \code{ggplot} and a plot on the current graphics |
47 | 49 |
#' device, if \code{plot} is \code{TRUE}. |
48 | 50 |
#' @author Dario Strbenac |
... | ... |
@@ -862,8 +862,8 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL")) |
862 | 862 |
#' show,ClassifyResult-method sampleNames sampleNames,ClassifyResult-method |
863 | 863 |
#' predictions predictions,ClassifyResult-method actualOutcome |
864 | 864 |
#' actualOutcome,ClassifyResult-method features features,ClassifyResult-method |
865 |
-#' models models,ClassifyResult-method performance |
|
866 |
-#' performance,ClassifyResult-method tunedParameters |
|
865 |
+#' models models,ClassifyResult-method finalModel finalModel,ClassifyResult-method |
|
866 |
+#' performance performance,ClassifyResult-method tunedParameters |
|
867 | 867 |
#' tunedParameters,ClassifyResult-method totalPredictions |
868 | 868 |
#' totalPredictions,ClassifyResult-method |
869 | 869 |
#' @docType class |
... | ... |
@@ -911,6 +911,8 @@ setClassUnion("ModellingParamsOrNULL", c("ModellingParams", "NULL")) |
911 | 911 |
#' \describe{ |
912 | 912 |
#' \item{\code{models(result)}}{A \code{list} of the models fitted for each training.}} |
913 | 913 |
#' \describe{ |
914 |
+#' \item{\code{finalModel(result)}}{A deployable model fitted on all of the data for use on future data.}} |
|
915 |
+#' \describe{ |
|
914 | 916 |
#' \item{\code{chosenFeatureNames(result)}}{A \code{list} of the features selected for each training.}} |
915 | 917 |
#' \describe{ |
916 | 918 |
#' \item{\code{predictions(result)}}{Returns a \code{DataFrame} which has columns with test sample, |
... | ... |
@@ -1054,6 +1056,20 @@ setMethod("models", "ClassifyResult", |
1054 | 1056 |
object@models |
1055 | 1057 |
}) |
1056 | 1058 |
|
1059 |
+#' @export |
|
1060 |
+#' @usage NULL |
|
1061 |
+setGeneric("finalModel", function(object, ...) |
|
1062 |
+standardGeneric("finalModel")) |
|
1063 |
+ |
|
1064 |
+#' @rdname ClassifyResult-class |
|
1065 |
+#' @usage NULL |
|
1066 |
+#' @export |
|
1067 |
+setMethod("finalModel", "ClassifyResult", |
|
1068 |
+ function(object) |
|
1069 |
+ { |
|
1070 |
+ object@finalModel |
|
1071 |
+ }) |
|
1072 |
+ |
|
1057 | 1073 |
#' @export |
1058 | 1074 |
#' @usage NULL |
1059 | 1075 |
setGeneric("predictions", function(object, ...) |
... | ... |
@@ -252,7 +252,7 @@ input data. Autmomatically reducing to smaller number.") |
252 | 252 |
predictedOutcome <- predictedOutcome[, na.omit(match(c("class", "risk"), colnames(predictedOutcome)))] |
253 | 253 |
performanceChanges <- round(performancesWithoutEach - calcExternalPerformance(outcomeTest, predictedOutcome, performanceType), 2) |
254 | 254 |
|
255 |
- if(is.null(S4Vectors::mcols(measurementsTrain))) |
|
255 |
+ if(is.null(S4Vectors::mcols(measurementsTrain)) || !any(c("assay", "feature") %in% colnames(S4Vectors::mcols(measurementsTrain)))) |
|
256 | 256 |
{ |
257 | 257 |
selectedFeatures <- colnames(measurementsTrain)[selectedFeaturesIndices] |
258 | 258 |
} else { |
... | ... |
@@ -272,22 +272,22 @@ input data. Autmomatically reducing to smaller number.") |
272 | 272 |
{ |
273 | 273 |
if(!is.null(rankedFeaturesIndices)) |
274 | 274 |
{ |
275 |
- if(is.null(S4Vectors::mcols(measurementsTrain)) || !"assay" %in% colnames(S4Vectors::mcols(measurementsTrain))) |
|
275 |
+ if(is.null(S4Vectors::mcols(measurementsTrain)) || !any(c("assay", "feature") %in% colnames(S4Vectors::mcols(measurementsTrain)))) |
|
276 | 276 |
{ |
277 |
- rankedFeatures <- originalFeatures[rankedFeaturesIndices] |
|
277 |
+ rankedFeatures <- colnames(measurementsTrain)[rankedFeaturesIndices] |
|
278 | 278 |
} else { |
279 | 279 |
featureColumns <- na.omit(match(c("assay", "feature"), colnames(S4Vectors::mcols(measurementsTrain)))) |
280 |
- rankedFeatures <- originalFeatures[rankedFeaturesIndices, featureColumns] |
|
280 |
+ rankedFeatures <- S4Vectors::mcols(measurementsTrain)[rankedFeaturesIndices, featureColumns] |
|
281 | 281 |
} |
282 | 282 |
} else { rankedFeatures <- NULL} |
283 | 283 |
if(!is.null(selectedFeaturesIndices)) |
284 | 284 |
{ |
285 |
- if(is.null(S4Vectors::mcols(measurementsTrain)) || !"assay" %in% colnames(S4Vectors::mcols(measurementsTrain))) |
|
285 |
+ if(is.null(S4Vectors::mcols(measurementsTrain)) || !any(c("assay", "feature") %in% colnames(S4Vectors::mcols(measurementsTrain)))) |
|
286 | 286 |
{ |
287 |
- selectedFeatures <- originalFeatures[selectedFeaturesIndices] |
|
287 |
+ selectedFeatures <- colnames(measurementsTrain)[selectedFeaturesIndices] |
|
288 | 288 |
} else { |
289 | 289 |
featureColumns <- na.omit(match(c("assay", "feature"), colnames(S4Vectors::mcols(measurementsTrain)))) |
290 |
- selectedFeatures <- originalFeatures[selectedFeaturesIndices, ] |
|
290 |
+ selectedFeatures <- S4Vectors::mcols(measurementsTrain)[selectedFeaturesIndices, ] |
|
291 | 291 |
} |
292 | 292 |
} else { selectedFeatures <- NULL} |
293 | 293 |
} else { # Nested use in feature selection. No feature selection in inner execution, so ignore features. |
... | ... |
@@ -16,6 +16,8 @@ |
16 | 16 |
\alias{features,ClassifyResult-method} |
17 | 17 |
\alias{models} |
18 | 18 |
\alias{models,ClassifyResult-method} |
19 |
+\alias{finalModel} |
|
20 |
+\alias{finalModel,ClassifyResult-method} |
|
19 | 21 |
\alias{performance} |
20 | 22 |
\alias{performance,ClassifyResult-method} |
21 | 23 |
\alias{tunedParameters} |
... | ... |
@@ -83,6 +85,8 @@ most popular value of the parameter in cross-validation is used.} |
83 | 85 |
\describe{ |
84 | 86 |
\item{\code{models(result)}}{A \code{list} of the models fitted for each training.}} |
85 | 87 |
\describe{ |
88 |
+\item{\code{finalModel(result)}}{A deployable model fitted on all of the data for use on future data.}} |
|
89 |
+\describe{ |
|
86 | 90 |
\item{\code{chosenFeatureNames(result)}}{A \code{list} of the features selected for each training.}} |
87 | 91 |
\describe{ |
88 | 92 |
\item{\code{predictions(result)}}{Returns a \code{DataFrame} which has columns with test sample, |
... | ... |
@@ -27,6 +27,9 @@ |
27 | 27 |
\arguments{ |
28 | 28 |
\item{results}{A list of \code{\link{ClassifyResult}} objects.} |
29 | 29 |
|
30 |
+\item{...}{Parameters not used by the \code{ClassifyResult} method but passed to |
|
31 |
+the \code{list} method.} |
|
32 |
+ |
|
30 | 33 |
\item{mode}{Default: "merge". Whether to merge all predictions of all |
31 | 34 |
iterations of cross-validation into one set or keep them separate. Keeping |
32 | 35 |
them separate will cause separate ROC curves to be computed for each |