... | ... |
@@ -3,7 +3,7 @@ Type: Package |
3 | 3 |
Title: A framework for cross-validated classification problems, with |
4 | 4 |
applications to differential variability and differential |
5 | 5 |
distribution testing |
6 |
-Version: 3.1.21 |
|
6 |
+Version: 3.1.22 |
|
7 | 7 |
Date: 2022-10-14 |
8 | 8 |
Author: Dario Strbenac, Ellis Patrick, John Ormerod, Graham Mann, Jean Yang |
9 | 9 |
Maintainer: Dario Strbenac <dario.strbenac@sydney.edu.au> |
... | ... |
@@ -945,7 +945,7 @@ train.DataFrame <- function(x, outcomeTrain, classifier = "randomForest", perfor |
945 | 945 |
if(assayIDs == "all") assayIDs <- unique(mcols(measurements)[, "assay"]) |
946 | 946 |
if(is.null(assayIDs)) assayIDs <- 1 |
947 | 947 |
names(assayIDs) <- assayIDs |
948 |
- names(classifier) <- classifier |
|
948 |
+ names(classifier) <- assayIDs |
|
949 | 949 |
|
950 | 950 |
if(multiViewMethod == "none"){ |
951 | 951 |
resClassifier <- |
... | ... |
@@ -1050,7 +1050,7 @@ train.list <- function(x, outcomeTrain, ...) |
1050 | 1050 |
stop("All datasets must have the same samples") |
1051 | 1051 |
|
1052 | 1052 |
# Check the number of outcome is the same |
1053 |
- if (!all(sapply(x, nrow) == length(x)) && !is.character(x)) |
|
1053 |
+ if (!all(sapply(x, nrow) == length(outcomeTrain)) && !is.character(outcomeTrain)) |
|
1054 | 1054 |
stop("outcome must have same number of samples as measurements") |
1055 | 1055 |
|
1056 | 1056 |
df_list <- sapply(x, S4Vectors::DataFrame) |
... | ... |
@@ -1065,6 +1065,7 @@ train.list <- function(x, outcomeTrain, ...) |
1065 | 1065 |
|
1066 | 1066 |
# Each list of tabular data has been collapsed into a DataFrame. |
1067 | 1067 |
# Will be subset to relevant assayIDs inside the DataFrame method. |
1068 |
+ |
|
1068 | 1069 |
train(combined_df, outcomeTrain, ...) |
1069 | 1070 |
} |
1070 | 1071 |
|
... | ... |
@@ -1115,7 +1116,7 @@ predict.trainedByClassifyR <- function(object, newData, ...) |
1115 | 1116 |
newData <- prepareData(newData, useFeatures = allFeatureNames(object)) |
1116 | 1117 |
# Some classifiers dangerously use positional matching rather than column name matching. |
1117 | 1118 |
# newData columns are sorted so that the right column ordering is guaranteed. |
1118 |
- } else {stop("'newData' is not one of the valid data types. It is of type ", class(newData), '.')} |
|
1119 |
+ } |
|
1119 | 1120 |
|
1120 | 1121 |
if (is(object, "listOfModels")) |
1121 | 1122 |
mapply(function(model, assay) predict(model, assay), object, newData, SIMPLIFY = FALSE) |
... | ... |
@@ -140,9 +140,9 @@ |
140 | 140 |
|
141 | 141 |
tuneParamsTrain <- list(topN = topNfeatures) |
142 | 142 |
tuneParamsTrain <- append(tuneParamsTrain, modellingParams@trainParams@tuneParams) |
143 |
- tuneParamsTrain <- tuneParamsTrain[-match("performanceType", names(tuneParamsTrain))] |
|
144 | 143 |
tuneCombosTrain <- expand.grid(tuneParamsTrain, stringsAsFactors = FALSE) |
145 | 144 |
modellingParams@trainParams@tuneParams <- NULL |
145 |
+ |
|
146 | 146 |
allPerformanceTables <- lapply(rankings, function(rankingsVariety) |
147 | 147 |
{ |
148 | 148 |
# Creates a matrix. Columns are top n features, rows are varieties (one row if None). |
... | ... |
@@ -280,7 +280,6 @@ The naive Bayes kernel classifier by default uses the vertical distance between |
280 | 280 |
Now, the classification error for each sample is also calculated for both the differential means and differential distribution classifiers and both *ClassifyResult* objects generated so far are plotted with *samplesMetricMap*. |
281 | 281 |
|
282 | 282 |
```{r, fig.width = 10, fig.height = 7} |
283 |
-library(grid) |
|
284 | 283 |
DMresults <- calcCVperformance(DMresults, "Sample Error") |
285 | 284 |
DDresults <- calcCVperformance(DDresults, "Sample Error") |
286 | 285 |
resultsList <- list(Abundance = DMresults, Distribution = DDresults) |