Browse code

- Variable importance updated for new feature and dataset safe renaming. - Documentation of ranking functions updated to explain that they now return indices, not identifiers. - Ensemble feature selection in .doSelection fixes for newer format of runTest parameters. - crossValidate updated for safe renaming.

Dario Strbenac authored on 13/07/2022 11:30:39
Showing 27 changed files

... ...
@@ -72,10 +72,10 @@ Collate:
72 72
     'rankingLimma.R'
73 73
     'rankingPairsDifferences.R'
74 74
     'rankingPlot.R'
75
-    'rankingSelectMulti.R'
76 75
     'runTest.R'
77 76
     'runTests.R'
78 77
     'samplesMetricMap.R'
78
+    'selectMulti.R'
79 79
     'selectionPlot.R'
80 80
     'simpleParams.R'
81 81
     'subtractFromLocation.R'
... ...
@@ -165,6 +165,7 @@ setMethod("crossValidate", "DataFrame",
165 165
                                   set.seed(seed)
166 166
                                   measurementsUse <- measurements
167 167
                                   if(!is.null(mcols(measurements))) measurementsUse <- measurements[, mcols(measurements)[, "dataset"] == dataIndex, drop = FALSE]
168
+                                  
168 169
                                   CV(
169 170
                                       measurements = measurementsUse, classes = classes,
170 171
                                       nFeatures = nFeatures[dataIndex],
... ...
@@ -233,7 +234,7 @@ setMethod("crossValidate", "DataFrame",
233 234
 
234 235
 
235 236
                   if(is.null(dataCombinations)){
236
-                      dataCombinations <- do.call("c", sapply(seq_len(length(datasetIDs)),function(n)combn(datasetIDs, n, simplify = FALSE)))
237
+                      dataCombinations <- do.call("c", sapply(seq_along(datasetIDs),function(n)combn(datasetIDs, n, simplify = FALSE)))
237 238
                       dataCombinations <- dataCombinations[sapply(dataCombinations, function(x)"clinical"%in%x, simplify = TRUE)]
238 239
                       if(length(dataCombinations)==0) stop("No dataCombinations with `clinical` data")
239 240
                   }
... ...
@@ -267,7 +268,7 @@ setMethod("crossValidate", "DataFrame",
267 268
 
268 269
 
269 270
                   if(is.null(dataCombinations)){
270
-                      dataCombinations <- do.call("c", sapply(seq_len(length(datasetIDs)),function(n)combn(datasetIDs, n, simplify = FALSE)))
271
+                      dataCombinations <- do.call("c", sapply(seq_along(datasetIDs),function(n)combn(datasetIDs, n, simplify = FALSE)))
271 272
                       dataCombinations <- dataCombinations[sapply(dataCombinations, function(x)"clinical"%in%x, simplify = TRUE)]
272 273
                       if(length(dataCombinations)==0) stop("No dataCombinations with `clinical` data")
273 274
                   }
... ...
@@ -611,8 +612,6 @@ generateModellingParams <- function(datasetIDs,
611 612
                                     classifier,
612 613
                                     multiViewMethod = "none"
613 614
 ){
614
-
615
-
616 615
     if(multiViewMethod != "none") {
617 616
         params <- generateMultiviewParams(datasetIDs,
618 617
                                           measurements,
... ...
@@ -627,7 +626,8 @@ generateModellingParams <- function(datasetIDs,
627 626
 
628 627
 
629 628
 
630
-    obsFeatures <- sum(mcols(measurements)[, "dataset"] %in% datasetIDs)
629
+    if(length(datasetIDs) > 1) obsFeatures <- sum(mcols(measurements)[, "dataset"] %in% datasetIDs)
630
+    else obsFeatures <- ncol(measurements)
631 631
 
632 632
 
633 633
     nFeatures <- unlist(nFeatures)
... ...
@@ -146,6 +146,7 @@ setMethod("SVMpredictInterface", c("svm", "DataFrame"), function(model, measurem
146 146
   
147 147
   # Prediction function depends on test data having same set of columns in same order as
148 148
   # selected features used for training.
149
+  colnames(measurementsTest) <- make.names(colnames(measurementsTest))
149 150
   measurementsTest <- measurementsTest[, colnames(model[["SV"]])]
150 151
   classPredictions <- predict(model, measurementsTest, probability = TRUE)
151 152
   
... ...
@@ -31,9 +31,8 @@
31 31
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
32 32
 #' progress messages to give.  This function only prints progress messages if
33 33
 #' the value is 3.
34
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
35
-#' features, from the most promising features in the first position to the
36
-#' least promising feature in the last position.
34
+#' @return A vector of feature indices, from the most promising features in
35
+#' the first position to the least promising feature in the last position.
37 36
 #' @author Dario Strbenac
38 37
 #' @examples
39 38
 #' 
... ...
@@ -29,9 +29,8 @@
29 29
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
30 30
 #' progress messages to give.  This function only prints progress messages if
31 31
 #' the value is 3.
32
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
33
-#' features, from the most promising features in the first position to the
34
-#' least promising feature in the last position.
32
+#' @return A vector of feature indicies, from the most promising features in the
33
+#' first position to the least promising feature in the last position.
35 34
 #' @importFrom survival coxph
36 35
 #' @rdname coxphRanking
37 36
 #' @usage NULL
... ...
@@ -32,9 +32,8 @@
32 32
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
33 33
 #' progress messages to give.  This function only prints progress messages if
34 34
 #' the value is 3.
35
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
36
-#' features, from the most promising features in the first position to the
37
-#' least promising feature in the last position.
35
+#' @return A vector of feature indices, from the most promising features in the
36
+#' first position to the least promising feature in the last position.
38 37
 #' @author Dario Strbenac
39 38
 #' @examples
40 39
 #' 
... ...
@@ -38,9 +38,8 @@
38 38
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
39 39
 #' progress messages to give.  This function only prints progress messages if
40 40
 #' the value is 3.
41
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
42
-#' features, from the most promising features in the first position to the
43
-#' least promising feature in the last position.
41
+#' @return A vector of feature indices, from the most promising features in the
42
+#' first position to the least promising feature in the last position.
44 43
 #' @author Dario Strbenac
45 44
 #' @references edgeR: a Bioconductor package for differential expression
46 45
 #' analysis of digital gene expression data, Mark D. Robinson, Davis McCarthy,
... ...
@@ -25,9 +25,8 @@
25 25
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
26 26
 #' progress messages to give.  This function only prints progress messages if
27 27
 #' the value is 3.
28
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
29
-#' features, from the most promising features in the first position to the
30
-#' least promising feature in the last position.
28
+#' @return A vector of feature indices, from the most promising features in the
29
+#' first position to the least promising feature in the last position.
31 30
 #' @author Dario Strbenac
32 31
 #' @examples
33 32
 #' 
... ...
@@ -34,9 +34,8 @@
34 34
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
35 35
 #' progress messages to give.  This function only prints progress messages if
36 36
 #' the value is 3.
37
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
38
-#' features, from the most promising features in the first position to the
39
-#' least promising feature in the last position.
37
+#' @return A vector of feature indices, from the most promising features in the
38
+#' first position to the least promising feature in the last position.
40 39
 #' @author Dario Strbenac
41 40
 #' @examples
42 41
 #' 
... ...
@@ -26,9 +26,8 @@
26 26
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
27 27
 #' progress messages to give.  This function only prints progress messages if
28 28
 #' the value is 3.
29
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
30
-#' features, from the most promising features in the first position to the
31
-#' least promising feature in the last position.
29
+#' @return A vector of feature indices, from the most promising features in the
30
+#' first position to the least promising feature in the last position.
32 31
 #' @author Dario Strbenac
33 32
 #' @examples
34 33
 #' 
... ...
@@ -37,9 +37,8 @@
37 37
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
38 38
 #' progress messages to give.  This function only prints progress messages if
39 39
 #' the value is 3.
40
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
41
-#' features, from the most promising features in the first position to the
42
-#' least promising feature in the last position.
40
+#' @return A vector of feature indices, from the most promising features in the
41
+#' first position to the least promising feature in the last position.
43 42
 #' @author Dario Strbenac
44 43
 #' @examples
45 44
 #' 
... ...
@@ -25,9 +25,8 @@
25 25
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
26 26
 #' progress messages to give.  This function only prints progress messages if
27 27
 #' the value is 3.
28
-#' @return A vector or data frame (if \code{MultiAssayExperiment} input) of
29
-#' features, from the most promising features in the first position to the
30
-#' least promising feature in the last position.
28
+#' @return A vector of feature indicies, from the most promising features in
29
+#' the first position to the least promising feature in the last position.
31 30
 #' @author Dario Strbenac
32 31
 #' @references Limma: linear models for microarray data, Gordon Smyth, 2005,
33 32
 #' In: Bioinformatics and Computational Biology Solutions using R and
... ...
@@ -31,7 +31,7 @@
31 31
 #' @param verbose Default: 3. A number between 0 and 3 for the amount of
32 32
 #' progress messages to give.  This function only prints progress messages if
33 33
 #' the value is 3.
34
-#' @return A \code{\link{Pairs}} object, from the most promising feature pair
34
+#' @return A vector of feature indices, from the most promising feature pair
35 35
 #' in the first position to the least promising feature pair in the last
36 36
 #' position.
37 37
 #' @author Dario Strbenac
... ...
@@ -119,9 +119,9 @@ function(measurementsTrain, outcomesTrain, measurementsTest, outcomesTest,
119 119
     {
120 120
       S4Vectors::mcols(measurementsTrain) <- featuresInfo[, c("Renamed Dataset", "Renamed Feature")]
121 121
       S4Vectors::mcols(measurementsTest) <- featuresInfo[, c("Renamed Dataset", "Renamed Feature")]
122
+      colnames(measurementsTrain) <- colnames(measurementsTest) <- paste(featuresInfo[["Renamed Dataset"]], featuresInfo[["Renamed Feature"]], sep = '')
122 123
     } else {
123
-      colnames(measurementsTrain) <- featuresInfo[, "Renamed Feature"]
124
-      colnames(measurementsTest) <- featuresInfo[, "Renamed Feature"]
124
+      colnames(measurementsTrain) <- colnames(measurementsTest) <- featuresInfo[, "Renamed Feature"]
125 125
     }
126 126
   }
127 127
     
... ...
@@ -185,8 +185,11 @@ input data. Autmomatically reducing to smaller number.")
185 185
     tuneDetailsSelect <- topFeatures[[3]]
186 186
 
187 187
     if(modellingParams@selectParams@subsetToSelections == TRUE)
188
+    {
188 189
       measurementsTrain <- measurementsTrain[, selectedFeaturesIndices, drop = FALSE]
189
-  } 
190
+      measurementsTest <- measurementsTest[, selectedFeaturesIndices, drop = FALSE]
191
+    }
192
+  }
190 193
   
191 194
   # Training stage.
192 195
   if(length(modellingParams@trainParams@intermediate) > 0)
... ...
@@ -232,25 +235,16 @@ input data. Autmomatically reducing to smaller number.")
232 235
   importanceTable <- NULL
233 236
   if(is.numeric(.iteration) && modellingParams@doImportance == TRUE)
234 237
   {
235
-    nSelected <- ifelse(is.null(ncol(selectedFeatures)), length(selectedFeatures), nrow(selectedFeatures))
236 238
     performanceMP <- modellingParams@selectParams@tuneParams[["performanceType"]]
237 239
     performanceType <- ifelse(!is.null(performanceMP), performanceMP, "Balanced Error")
238
-    performancesWithoutEach <- sapply(1:nSelected, function(selectedIndex)
240
+    performancesWithoutEach <- sapply(selectedFeaturesIndices, function(selectedIndex)
239 241
     {
240
-      if(is.null(S4Vectors::mcols(measurementsTrain)))
241
-      { # Input was ordinary matrix or DataFrame.
242
-        measurementsTrainLess1 <- measurementsTrain[, selectedFeatures[-selectedIndex], drop = FALSE]
243
-      } else { # Input was MultiAssayExperiment. # Match the selected features to the data frame columns
244
-        selectedIDs <-  do.call(paste, selectedFeatures[-selectedIndex, ])
245
-        featuresIDs <- do.call(paste, S4Vectors::mcols(measurementsTrain)[, c("dataset", "feature")])
246
-        useColumns <- match(selectedIDs, featuresIDs)
247
-        measurementsTrainLess1 <- measurementsTrain[, useColumns, drop = FALSE]
248
-      }
249
-         
250
-      modelWithoutOne <- tryCatch(.doTrain(measurementsTrainLess1, outcomesTrain, measurementsTest, outcomesTest, modellingParams, verbose),
242
+      measurementsTrainLess1 <- measurementsTrain[, -selectedIndex, drop = FALSE]
243
+      measurementsTestLess1 <- measurementsTest[, -selectedIndex, drop = FALSE]
244
+      modelWithoutOne <- tryCatch(.doTrain(measurementsTrainLess1, outcomesTrain, measurementsTestLess1, outcomesTest, modellingParams, verbose),
251 245
                                   error = function(error) error[["message"]])
252 246
       if(!is.null(modellingParams@predictParams))
253
-      predictedOutcomesWithoutOne <- tryCatch(.doTest(modelWithoutOne[["model"]], measurementsTest, modellingParams@predictParams, verbose),
247
+      predictedOutcomesWithoutOne <- tryCatch(.doTest(modelWithoutOne[["model"]], measurementsTestLess1, modellingParams@predictParams, verbose),
254 248
                                               error = function(error) error[["message"]])
255 249
       else predictedOutcomesWithoutOne <- modelWithoutOne[["model"]]
256 250
 
... ...
@@ -113,7 +113,7 @@ input data. Autmomatically reducing to smaller number.")
113 113
   {
114 114
     if(verbose >= 1 && setNumber %% 10 == 0)
115 115
       message("Processing sample set ", setNumber, '.')
116
-
116
+      
117 117
     # crossValParams is needed at least for nested feature tuning.
118 118
     runTest(measurements[trainingSamples, , drop = FALSE], outcomes[trainingSamples],
119 119
             measurements[testSamples, , drop = FALSE], outcomes[testSamples],
120 120
similarity index 64%
121 121
rename from R/rankingSelectMulti.R
122 122
rename to R/selectMulti.R
... ...
@@ -5,17 +5,15 @@ setGeneric("selectMulti", function(measurementsTrain, classesTrain, params, ...)
5 5
 setMethod("selectMulti", "DataFrame",
6 6
           function(measurementsTrain, classesTrain, params, verbose = 0)
7 7
           {
8
-              
9
-              assayTrain <- sapply(unique(mcols(measurementsTrain)[["dataset"]]), function(x) measurementsTrain[,mcols(measurementsTrain)[["dataset"]]%in%x], simplify = FALSE)
10
-              
11
-              selectedFeatures <- mapply(.doSelection, 
8
+              assayTrain <- sapply(unique(mcols(measurementsTrain)[["Renamed Dataset"]]), function(x) measurementsTrain[, mcols(measurementsTrain)[["Renamed Dataset"]] %in% x], simplify = FALSE)
9
+
10
+              featuresIndices <- mapply(.doSelection, 
12 11
                                          measurements = assayTrain,
13
-                                         modellingParams = params[names(assayTrain)],
12
+                                         modellingParams = params,
14 13
                                          MoreArgs = list(outcomesTrain = classesTrain, 
15 14
                                                          crossValParams = CrossValParams(permutations = 1, folds = 5), ###### Where to get this from?
16
-                                                         verbose = 0)
17
-              )
15
+                                                         verbose = 0), SIMPLIFY = FALSE
16
+                                        )
18 17
               
19
-              do.call("rbind", selectedFeatures[2,])
20
-              #S4Vectors::DataFrame(dataset = rep(names(selectedFeatures[2,]), unlist(lapply(selectedFeatures[2,], length))), feature = unlist(selectedFeatures[2,]))
18
+              unique(unlist(lapply(featuresIndices, "[[", 2)))
21 19
           })
... ...
@@ -354,25 +354,25 @@
354 354
       
355 355
       list(ranked = rankingUse, selected = selectionIndices, tune = tuneDetails)
356 356
     } else if(is.list(featureRanking)) { # It is a list of functions for ensemble selection.
357
-      featuresLists <- mapply(function(selector, selParams)
357
+      featuresIndiciesLists <- mapply(function(selector, selParams)
358 358
       {
359 359
         paramList <- list(measurementsTrain, outcomesTrain, trainParams = trainParams,
360 360
                           predictParams = predictParams, verbose = verbose)
361 361
         paramList <- append(paramList, selParams)
362 362
         do.call(selector, paramList)
363
-      }, modellingParams@selectParams@featureRanking, modellingParams@selectParams@featureRanking, SIMPLIFY = FALSE)
363
+      }, modellingParams@selectParams@featureRanking, modellingParams@selectParams@otherParams, SIMPLIFY = FALSE)
364 364
 
365 365
       performances <- sapply(topNfeatures, function(topN)
366 366
       {
367
-        topIndices <- unlist(lapply(featuresLists, function(features) features[1:topN]))
367
+        topIndices <- unlist(lapply(featuresIndiciesLists, function(featuresIndicies) featuresIndicies[1:topN]))
368 368
         topIndicesCounts <- table(topIndices)
369 369
         keep <- names(topIndicesCounts)[topIndicesCounts >= modellingParams@selectParams@minPresence]
370
-        measurementsSelected <- measurementsTrain[, keep, drop = FALSE] # Features in columns
370
+        measurementsTrain <- measurementsTrain[, as.numeric(keep), drop = FALSE] # Features in columns
371 371
         
372 372
         if(crossValParams@tuneMode == "Resubstitution")
373 373
         {
374
-          result <- runTest(measurementsSelected, classesTrain,
375
-                            training = 1:nrow(measurementsSelected), testing = 1:nrow(measurementsSelected),
374
+          result <- runTest(measurementsTrain, outcomesTrain,
375
+                            measurementsTrain, outcomesTrain,
376 376
                             crossValParams = NULL, modellingParams,
377 377
                             verbose = verbose, .iteration = "internal")
378 378
           predictions <- result[["predictions"]]
... ...
@@ -389,13 +389,13 @@
389 389
       })
390 390
       bestOne <- ifelse(betterValues == "lower", which.min(performances)[1], which.max(performances)[1])
391 391
       
392
-      selectedFeatures <- unlist(lapply(featuresLists, function(featuresList) featuresList[1:topNfeatures[bestOne]]))
393
-      names(table(selectedFeatures))[table(selectedFeatures) >= modellingParams@selectParams@minPresence]
392
+      selectionIndices <- unlist(lapply(featuresLists, function(featuresList) featuresList[1:topNfeatures[bestOne]]))
393
+      names(table(selectionIndices))[table(selectionIndices) >= modellingParams@selectParams@minPresence]
394 394
       
395
-      list(NULL, selectedFeatures, NULL)
395
+      list(NULL, selectionIndices, NULL)
396 396
     } else { # Previous selection
397 397
       selectedFeatures <- 
398
-      list(NULL, selectedFeatures, NULL)
398
+      list(NULL, selectionIndices, NULL)
399 399
     }
400 400
 }
401 401
 
... ...
@@ -45,9 +45,8 @@ and specifies that numeric variables from the sample information data table will
45 45
 used.}
46 46
 }
47 47
 \value{
48
-A vector or data frame (if \code{MultiAssayExperiment} input) of
49
-features, from the most promising features in the first position to the
50
-least promising feature in the last position.
48
+A vector of feature indices, from the most promising features in the
49
+first position to the least promising feature in the last position.
51 50
 }
52 51
 \description{
53 52
 Ranks features from largest Kolmogorov-Smirnov distance to smallest.
... ...
@@ -45,9 +45,8 @@ and specifies that numeric variables from the sample information data table will
45 45
 used.}
46 46
 }
47 47
 \value{
48
-A vector or data frame (if \code{MultiAssayExperiment} input) of
49
-features, from the most promising features in the first position to the
50
-least promising feature in the last position.
48
+A vector of feature indices, from the most promising features in the
49
+first position to the least promising feature in the last position.
51 50
 }
52 51
 \description{
53 52
 Ranks features from largest Kullback-Leibler distance between classes to
... ...
@@ -47,9 +47,8 @@ and specifies that numeric variables from the sample information table will be
47 47
 used.}
48 48
 }
49 49
 \value{
50
-A vector or data frame (if \code{MultiAssayExperiment} input) of
51
-features, from the most promising features in the first position to the
52
-least promising feature in the last position.
50
+A vector of feature indices, from the most promising features in
51
+the first position to the least promising feature in the last position.
53 52
 }
54 53
 \description{
55 54
 Ranks all features from largest Bartlett statistic to smallest.
... ...
@@ -45,9 +45,8 @@ and specifies that numeric variables from the clinical data table will be
45 45
 used.}
46 46
 }
47 47
 \value{
48
-A vector or data frame (if \code{MultiAssayExperiment} input) of
49
-features, from the most promising features in the first position to the
50
-least promising feature in the last position.
48
+A vector of feature indicies, from the most promising features in the
49
+first position to the least promising feature in the last position.
51 50
 }
52 51
 \description{
53 52
 Ranks all features from largest coxph statistic to smallest.
... ...
@@ -40,9 +40,8 @@ the value is 3.}
40 40
 used in the analysis.}
41 41
 }
42 42
 \value{
43
-A vector or data frame (if \code{MultiAssayExperiment} input) of
44
-features, from the most promising features in the first position to the
45
-least promising feature in the last position.
43
+A vector of feature indices, from the most promising features in the
44
+first position to the least promising feature in the last position.
46 45
 }
47 46
 \description{
48 47
 Uses an ordinary t-test if the data set has two classes or one-way ANOVA if
... ...
@@ -55,9 +55,8 @@ the value is 3.}
55 55
 names of the data tables of counts to be used.}
56 56
 }
57 57
 \value{
58
-A vector or data frame (if \code{MultiAssayExperiment} input) of
59
-features, from the most promising features in the first position to the
60
-least promising feature in the last position.
58
+A vector of feature indices, from the most promising features in the
59
+first position to the least promising feature in the last position.
61 60
 }
62 61
 \description{
63 62
 Performs a differential expression analysis between classes and ranks the
... ...
@@ -44,9 +44,8 @@ and specifies that numeric variables from the sample information table will be
44 44
 used.}
45 45
 }
46 46
 \value{
47
-A vector or data frame (if \code{MultiAssayExperiment} input) of
48
-features, from the most promising features in the first position to the
49
-least promising feature in the last position.
47
+A vector of feature indices, from the most promising features in the
48
+first position to the least promising feature in the last position.
50 49
 }
51 50
 \description{
52 51
 Ranks features by largest Levene statistic.
... ...
@@ -57,9 +57,8 @@ and specifies that numeric variables from the sample information data table will
57 57
 used.}
58 58
 }
59 59
 \value{
60
-A vector or data frame (if \code{MultiAssayExperiment} input) of
61
-features, from the most promising features in the first position to the
62
-least promising feature in the last position.
60
+A vector of feature indices, from the most promising features in the
61
+first position to the least promising feature in the last position.
63 62
 }
64 63
 \description{
65 64
 Ranks features from largest difference of log likelihoods (null hypothesis -
... ...
@@ -36,9 +36,8 @@ the value is 3.}
36 36
 used in the analysis.}
37 37
 }
38 38
 \value{
39
-A vector or data frame (if \code{MultiAssayExperiment} input) of
40
-features, from the most promising features in the first position to the
41
-least promising feature in the last position.
39
+A vector of feature indicies, from the most promising features in
40
+the first position to the least promising feature in the last position.
42 41
 }
43 42
 \description{
44 43
 Uses a moderated F-test with empirical Bayes shrinkage to rank
... ...
@@ -57,7 +57,7 @@ the value is 3.}
57 57
 name of the data table to be used.}
58 58
 }
59 59
 \value{
60
-A \code{\link{Pairs}} object, from the most promising feature pair
60
+A vector of feature indices, from the most promising feature pair
61 61
 in the first position to the least promising feature pair in the last
62 62
 position.
63 63
 }