Browse code

check and biocheck OK

Simone authored on 03/10/2017 17:36:09
Showing 85 changed files

... ...
@@ -21,7 +21,7 @@ License: Artistic-2.0
21 21
 Encoding: UTF-8
22 22
 LazyData: true
23 23
 RoxygenNote: 6.0.1
24
-Imports: rscala(>= 2.4.0), httr, GenomicRanges, rtracklayer, data.table, utils, plyr, xml2, methods, S4Vectors, dplyr
24
+Imports: rscala(>= 2.4.0), httr, GenomicRanges, rtracklayer, data.table, utils, plyr, xml2, methods, S4Vectors, dplyr, stats
25 25
 Depends: R(>= 3.3.2)
26 26
 VignetteBuilder: knitr
27 27
 Suggests: BiocStyle, knitr, rmarkdown
... ...
@@ -83,5 +83,6 @@ importFrom(methods,is)
83 83
 importFrom(plyr,revalue)
84 84
 importFrom(rtracklayer,export)
85 85
 importFrom(rtracklayer,import)
86
+importFrom(stats,setNames)
86 87
 importFrom(utils,read.delim)
87 88
 importFrom(utils,write.table)
... ...
@@ -59,26 +59,26 @@
59 59
 #'
60 60
 #' @examples
61 61
 #' 
62
-#' ### This GMQL statement produces an output dataset with a single output sample. 
63
-#' The COVER operation considers all areas defined by a minimum of two overlapping regions 
64
-#' in the input samples, up to any amount of overlapping regions.
62
+#' ## This GMQL statement produces an output dataset with a single output sample. 
63
+#' ## The COVER operation considers all areas defined by a minimum of two overlapping regions 
64
+#' ## in the input samples, up to any amount of overlapping regions.
65 65
 #' 
66 66
 #' initGMQL("gtf")
67 67
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
68
-#' exp = read(test_path)
68
+#' exp = readDataset(test_path)
69 69
 #' res = cover(input_data = exp,2,"ANY")
70 70
 #'
71 71
 #' \dontrun{
72
-#' ### This GMQL statement computes the result grouping the input exp samples by the values of 
73
-#' their cell metadata attribute, 
74
-#' thus one output res sample is generated for each cell type; 
75
-#' output regions are produced where at least 2 and at most 3 regions of grouped exp samples 
76
-#' overlap, setting as attributes of the resulting regions the minimum pValue of the overlapping regions 
77
-#' (min_pvalue) and their Jaccard indexes (JaccardIntersect and JaccardResult).
72
+#' ## This GMQL statement computes the result grouping the input exp samples by the values of 
73
+#' ## their cell metadata attribute, 
74
+#' ## thus one output res sample is generated for each cell type; 
75
+#' ## output regions are produced where at least 2 and at most 3 regions of grouped exp samples 
76
+#' ## overlap, setting as attributes of the resulting regions the minimum pvalue of the overlapping regions 
77
+#' ## (min_pvalue) and their Jaccard indexes (JaccardIntersect and JaccardResult).
78 78
 #' 
79 79
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
80 80
 #' exp = read(test_path)
81
-#' res = cover(input_data = exp,2,3, c("cell"), list(min_pValue = MIN(pValue)))
81
+#' res = cover(input_data = exp,2,3, c("cell"), list(min_pValue = MIN("pvalue")))
82 82
 #' }
83 83
 #' @export
84 84
 #'
... ...
@@ -136,17 +136,17 @@ cover <- function(input_data, minAcc, maxAcc, groupBy = NULL, aggregates = NULL)
136 136
 #'
137 137
 #' @examples
138 138
 #'
139
-#' ### This GMQL statement computes the result grouping the input \emph{exp} samples 
140
-#' by the values of their \emph{cell} metadata attribute, 
141
-#' thus one output \emph{res} sample is generated for each cell type. 
142
-#' Output regions are produced by dividing results from COVER in contiguous subregions 
143
-#' according to the varying accumulation values (from 2 to 4 in this case): 
144
-#' one region for each accumulation value;
139
+#' ## This GMQL statement computes the result grouping the input \emph{exp} samples 
140
+#' ## by the values of their \emph{cell} metadata attribute, 
141
+#' ## thus one output \emph{res} sample is generated for each cell type. 
142
+#' ## Output regions are produced by dividing results from COVER in contiguous subregions 
143
+#' ## according to the varying accumulation values (from 2 to 4 in this case): 
144
+#' ## one region for each accumulation value;
145 145
 #'
146 146
 #' initGMQL("gtf")
147 147
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
148
-#' exp = read(test_path)
149
-#' res = histogram(exp, 2,4,groupBy = c("cell")) exp 
148
+#' exp = readDataset(test_path)
149
+#' res = histogram(exp, 2,4,groupBy = c("cell"))
150 150
 #' 
151 151
 #' @export
152 152
 #'
... ...
@@ -206,16 +206,16 @@ histogram <- function(input_data, minAcc, maxAcc, groupBy = NULL, aggregates = N
206 206
 #'
207 207
 #' @examples
208 208
 #'
209
-#' ### This GMQL statement computes the result grouping the input \emph{exp} samples by the values 
210
-#' of their \emph{cell} metadata attribute, thus one output \emph{res} sample is generated 
211
-#' for each cell type.
212
-#' Output regions are produced by extracting the highest accumulation overlapping 
213
-#' (sub)regions according to the methodologies described above;
209
+#' ## This GMQL statement computes the result grouping the input \emph{exp} samples by the values 
210
+#' ## of their \emph{cell} metadata attribute, thus one output \emph{res} sample is generated 
211
+#' ## for each cell type.
212
+#' ## Output regions are produced by extracting the highest accumulation overlapping 
213
+#' ## (sub)regions according to the methodologies described above;
214 214
 #'
215 215
 #'
216 216
 #' initGMQL("gtf")
217 217
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
218
-#' exp = read(test_path)
218
+#' exp = readDataset(test_path)
219 219
 #' res = summit(input_data = exp,2,4, c("cell"))
220 220
 #' 
221 221
 #' @export
... ...
@@ -274,15 +274,15 @@ summit <- function(input_data, minAcc, maxAcc, groupBy = NULL, aggregates = NULL
274 274
 #'
275 275
 #' @examples
276 276
 #' 
277
-#' ### This GMQL statement computes the result grouping the input \emph{exp} samples by 
278
-#' the values of their \emph{cell} metadata attribute, thus one output \emph{res} sample 
279
-#' is generated for each cell type. 
280
-#' Output regions are produced by concatenating all regions which would have been used 
281
-#' to construct a COVER(2,4) statement on the same dataset; 
277
+#' ## This GMQL statement computes the result grouping the input \emph{exp} samples by 
278
+#' ## the values of their \emph{cell} metadata attribute, thus one output \emph{res} sample 
279
+#' ## is generated for each cell type. 
280
+#' ## Output regions are produced by concatenating all regions which would have been used 
281
+#' ## to construct a COVER(2,4) statement on the same dataset; 
282 282
 #' 
283 283
 #' initGMQL("gtf")
284 284
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
285
-#' exp = read(test_path)
285
+#' exp = readDataset(test_path)
286 286
 #' res = flat(input_data = exp,2,4, c("cell"))
287 287
 #'
288 288
 #' @export
... ...
@@ -34,8 +34,8 @@
34 34
 #'
35 35
 #' @examples
36 36
 #'
37
-#' #### This GMQL statement returns all the regions in the first dataset that do not 
38
-#' overlap any region in the second dataset.
37
+#' ## This GMQL statement returns all the regions in the first dataset that do not 
38
+#' ## overlap any region in the second dataset.
39 39
 #' 
40 40
 #' initGMQL("gtf")
41 41
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
... ...
@@ -45,10 +45,10 @@
45 45
 #' out = difference(r_left,r_right)
46 46
 #' 
47 47
 #' \dontrun{
48
-#' ### This GMQL statement extracts for every pair of samples s1 in EXP1 and s2 in EXP2
49
-#' having the same value of the metadata attribute 'antibody_target'
50
-#' the regions that appear in s1 but do not overlap any region in s2; 
51
-#' metadata of the result are the same as the metadata of s1.
48
+#' ## This GMQL statement extracts for every pair of samples s1 in EXP1 and s2 in EXP2
49
+#' ## having the same value of the metadata attribute 'antibody_target'
50
+#' ## the regions that appear in s1 but do not overlap any region in s2; 
51
+#' ## metadata of the result are the same as the metadata of s1.
52 52
 #' 
53 53
 #' initGMQL("gtf")
54 54
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
... ...
@@ -30,22 +30,22 @@
30 30
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
31 31
 #' r = readDataset(test_path)
32 32
 #'
33
-#' ### it counts the regions in each sample and stores their number as value of the new metadata 
34
-#' RegionCount attribute of the sample.
35
-#' e = extend(input_data = r, list(RegionCount = COUNT())
33
+#' ## it counts the regions in each sample and stores their number as value of the new metadata 
34
+#' ## RegionCount attribute of the sample.
35
+#' e = extend(input_data = r, list(RegionCount = COUNT()))
36 36
 #' \dontrun{
37 37
 #' 
38 38
 #' initGMQL("gtf")
39 39
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
40 40
 #' exp = readDataset(test_path)
41 41
 #'
42
-#' ### it copies all samples of exp dataset into res dataset, and then calculates 
43
-#' for each of them two new metadata attributes:
44
-#' 1. RegionCount is the number of sample regions;
45
-#' 2. MinP is the minimum Pvalue of the sample regions.
46
-#' res sample regions are the same as the ones in exp.
42
+#' ## it copies all samples of exp dataset into res dataset, and then calculates 
43
+#' ## for each of them two new metadata attributes:
44
+#' ##  1. RegionCount is the number of sample regions;
45
+#' ##  2. MinP is the minimum pvalue of the sample regions.
46
+#' ## res sample regions are the same as the ones in exp.
47 47
 #' 
48
-#' res = extend(input_data = exp, list(RegionCount = COUNT(),MinP = MIN(pValue))
48
+#' res = extend(input_data = exp, list(RegionCount = COUNT(),MinP = MIN(pvalue)))
49 49
 #' 
50 50
 #' }
51 51
 #' 
... ...
@@ -45,18 +45,18 @@
45 45
 #'
46 46
 #' @examples
47 47
 #' 
48
-#' ### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
49
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
50
-#' and takes the first/closest one for each TSS, 
51
-#' provided that such distance is lesser than 120K bases and joined 'tss' and 'hm' samples are obtained 
52
-#' from the same provider (joinby clause).
48
+#' ## Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
49
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
50
+#' ## and takes the first/closest one for each TSS, 
51
+#' ## provided that such distance is lesser than 120K bases and joined 'tss' and 'hm' samples are obtained 
52
+#' ## from the same provider (joinby clause).
53 53
 #' 
54
-#' #' initGMQL("gtf")
54
+#' initGMQL("gtf")
55 55
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
56 56
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
57 57
 #' TSS = readDataset(test_path)
58 58
 #' HM = readDataset(test_path2)
59
-#' join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DLE("120000"))),c("provider"),region_output="RIGHT")
59
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DLE(120000))),c("provider"),region_output="RIGHT")
60 60
 #'
61 61
 #' @export
62 62
 #'
... ...
@@ -47,19 +47,19 @@
47 47
 #'
48 48
 #' @examples
49 49
 #'
50
-#' ### it counts the number of regions in each sample from exp that overlap with a ref region, 
51
-#' and for each ref region it computes the minimum score of all the regions in each exp sample 
52
-#' that overlap with it. 
53
-#' The MAP joinby option ensures that only the exp samples referring to the same 'cell_tissue' 
54
-#' of a ref sample are mapped on such ref sample; 
55
-#' exp samples with no cell_tissue metadata attribute, or with such metadata 
56
-#' but with a different value from the one(s) of ref sample(s), are disregarded.
50
+#' ## it counts the number of regions in each sample from exp that overlap with a ref region, 
51
+#' ## and for each ref region it computes the minimum score of all the regions in each exp sample 
52
+#' ## that overlap with it. 
53
+#' ## The MAP joinby option ensures that only the exp samples referring to the same 'cell_tissue' 
54
+#' ## of a ref sample are mapped on such ref sample; 
55
+#' ## exp samples with no cell_tissue metadata attribute, or with such metadata 
56
+#' ## but with a different value from the one(s) of ref sample(s), are disregarded.
57 57
 #' 
58 58
 #' initGMQL("gtf")
59 59
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
60 60
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
61
-#' exp = read(test_path)
62
-#' ref = read(test_path2)
61
+#' exp = readDataset(test_path)
62
+#' ref = readDataset(test_path2)
63 63
 #' out = map(ref,exp, list(minScore = MIN("score")), joinBy = c("cell_tissue") )
64 64
 #' 
65 65
 #' 
... ...
@@ -68,7 +68,7 @@
68 68
 map <- function(left_input_data, right_input_data, aggregates = NULL, joinBy = NULL)
69 69
 {
70 70
   if(!is.null(aggregates))
71
-    metadata_matrix <- .aggregates(metadata,"OPERATOR")
71
+    metadata_matrix <- .aggregates(aggregates,"OPERATOR")
72 72
   else
73 73
     metadata_matrix = scalaNull("Array[Array[String]]")
74 74
 
... ...
@@ -77,7 +77,7 @@ map <- function(left_input_data, right_input_data, aggregates = NULL, joinBy = N
77 77
   else
78 78
     join_condition_matrix <- scalaNull("Array[Array[String]]")
79 79
 
80
-  out<-WrappeR$map(join_condition_matrix,aggregates,left_input_data$value,right_input_data$value)
80
+  out<-WrappeR$map(join_condition_matrix,metadata_matrix,left_input_data$value,right_input_data$value)
81 81
 
82 82
   if(grepl("No",out,ignore.case = TRUE))
83 83
     stop(out)
... ...
@@ -18,8 +18,10 @@
18 18
 #' s = select(input_data = r)
19 19
 #' m = merge(groupBy = c("antibody_targer","cell_karyotype"),input_data = s)
20 20
 #' materialize(input_data = m, dir_out = test_path)
21
-#' execute()
22 21
 #' 
22
+#' \dontrun{
23
+#' execute()
24
+#' }
23 25
 #' @export
24 26
 #'
25 27
 execute <- function()
... ...
@@ -51,7 +53,7 @@ execute <- function()
51 53
 #'
52 54
 #' initGMQL("gtf")
53 55
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
54
-#' r = read(test_path)
56
+#' r = readDataset(test_path)
55 57
 #' s = select(input_data = r)
56 58
 #' m = merge(groupBy = c("antibody_targer","cell_karyotype"),input_data = s)
57 59
 #' materialize(input_data = m, dir_out = test_path)
... ...
@@ -76,7 +78,8 @@ materialize <- function(input_data, dir_out = getwd())
76 78
 #' as folder (like if execution was invoked)
77 79
 #'
78 80
 #' @import GenomicRanges
79
-#'
81
+#' @importFrom stats setNames
82
+#' 
80 83
 #' @param input_data returned object from any GMQL function
81 84
 #' @param rows number of rows for each sample regions that you want to retrieve and stored in memory
82 85
 #' by default is 0 that means take all rows for each sample
... ...
@@ -87,10 +90,10 @@ materialize <- function(input_data, dir_out = getwd())
87 90
 #'
88 91
 #' initGMQL("gtf")
89 92
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
90
-#' r = read(test_path)
93
+#' r = readDataset(test_path)
91 94
 #' s = select(input_data = r)
92 95
 #' m = merge(groupBy = c("antibody_targer","cell_karyotype"),input_data = s)
93
-#' take(input_data = m, rows = 45)
96
+#' g <- take(input_data = m, rows = 45)
94 97
 #' 
95 98
 #' @export
96 99
 #'
... ...
@@ -134,7 +137,7 @@ take <- function(input_data, rows=0L)
134 137
     x <- x[-1]
135 138
   })
136 139
   meta_list <- lapply(name_value_list, function(x){
137
-    setNames(as.list(as.character(x[[2]])), x[[1]])
140
+    stats::setNames(as.list(as.character(x[[2]])), x[[1]])
138 141
   })
139 142
 }
140 143
 
... ...
@@ -26,15 +26,15 @@
26 26
 #'
27 27
 #' @examples
28 28
 #' 
29
-#' ### it creates a dataset called merged which contains one sample for each antibody_target value 
30
-#' found within the metadata of the exp dataset sample; 
31
-#' each created sample contains all regions from all 'exp' samples with a specific value for their 
32
-#' antibody_target metadata attribute.
29
+#' ## it creates a dataset called merged which contains one sample for each antibody_target value 
30
+#' ## found within the metadata of the exp dataset sample; 
31
+#' ## each created sample contains all regions from all 'exp' samples with a specific value for their 
32
+#' ## antibody_target metadata attribute.
33 33
 #' 
34 34
 #' initGMQL("gtf")
35 35
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
36 36
 #' exp = readDataset(test_path)
37
-#' merged = merge(input_data = exp, groupBy = c("antibody_targer"))
37
+#' merged = merge(input_data = exp, groupBy = c("antibody_target"))
38 38
 #' 
39 39
 #' @export
40 40
 #'
... ...
@@ -36,7 +36,7 @@
36 36
 #'
37 37
 #' @return DAGgraph class object. It contains the value associated to the graph used 
38 38
 #' as input for the subsequent GMQL function
39
-#' #'
39
+#' 
40 40
 #' @details
41 41
 #' mtop, mtopg,mtopp, rtop, rtopg and rtopp are normally numbers: if you specify a vector,
42 42
 #' only the first element will be used
... ...
@@ -47,26 +47,13 @@
47 47
 #'
48 48
 #' @examples
49 49
 #' 
50
-#' ### it orders the samples according to the Region_count metadata attribute and takes the two samples 
51
-#' that have the highest count. 
52
-#'
53
-#' initGMQL("gtf")
54
-#' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
55
-#' r = readDataset(test_path)
56
-#' o = order(r,list(DESC(Region_Count)), mtop = 2)
57
-#' 
58
-#' \dontrun{
59
-#' 
60
-#' ### it extracts the first 5 samples on the basis of their region counter 
61
-#' (those with the smaller RegionCount) and then, for each of them, 
62
-#' 7 regions on the basis of their mutation counter (those with the higher MutationCount).
50
+#' ## it orders the samples according to the Region_count metadata attribute and takes the two samples 
51
+#' ## that have the highest count. 
63 52
 #'
64 53
 #' initGMQL("gtf")
65 54
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
66 55
 #' r = readDataset(test_path)
67
-#' o = order(r,list(ASC(Region_Count)), mtop = 5,regions_ordering = list(DESC(MutationCount)),rtop=7)
68
-#'  
69
-#' }
56
+#' o = order(r,list(DESC("Region_Count")), mtop = 2)
70 57
 #'
71 58
 #' @export
72 59
 #'
... ...
@@ -79,13 +66,13 @@ order <- function(input_data, metadata_ordering = NULL, mtop = 0, mtopg = 0,mtop
79 66
 
80 67
   if(length(mtop)>0 || length(mtopg)>0 || length(rtop)>0 || length(rtopg)>0
81 68
      || length(mtopp)>0 || length(rtopp)>0)
82
-    warning("only the first element is taken by rtop, mtop, mtopg, rtopg")
69
+    warning("only the first element is taken by rtop, mtop, mtopg, rtopg, rtopp, mtopp")
83 70
 
84 71
   # we consider only the first element even if input is a vector of Int
85 72
   # we cut the other arguments
86 73
 
87 74
   mtop = as.integer(mtop[1])
88
-  mtog = as.integer(mtopg[1])
75
+  mtopg = as.integer(mtopg[1])
89 76
   mtopp = as.integer(mtopp[1])
90 77
 
91 78
   rtop = as.integer(rtop[1])
... ...
@@ -95,37 +82,37 @@ order <- function(input_data, metadata_ordering = NULL, mtop = 0, mtopg = 0,mtop
95 82
   if(mtop > 0 && mtopg >0)
96 83
   {
97 84
     warning("cannot be used together.\nWe set mtopg = 0")
98
-    mtopg = 0
85
+    mtopg = 0L
99 86
   }
100 87
 
101 88
   if(mtop >0 && mtopp>0)
102 89
   {
103 90
     warning("cannot be used together.\nWe set mtopp = 0")
104
-    mtopp = 0
91
+    mtopp = 0L
105 92
   }
106 93
 
107 94
   if(mtopg >0 && mtopp>0)
108 95
   {
109 96
     warning("cannot be used together.\nWe set mtopp = 0")
110
-    mtopp = 0
97
+    mtopp = 0L
111 98
   }
112 99
 
113 100
   if(rtop > 0 && rtopg >0)
114 101
   {
115 102
     warning("cannot be used together.\nWe set rtopg = 0")
116
-    rtopg = 0
103
+    rtopg = 0L
117 104
   }
118 105
 
119 106
   if(rtop >0 && rtopp>0)
120 107
   {
121 108
     warning("cannot be used together.\nWe set rtopp = 0")
122
-    rtopp = 0
109
+    rtopp = 0L
123 110
   }
124 111
 
125 112
   if(rtopg >0 && rtopp>0)
126 113
   {
127 114
     warning("cannot be used together.\nWe set rtopp = 0")
128
-    rtopp = 0
115
+    rtopp = 0L
129 116
   }
130 117
 
131 118
   if(!is.null(metadata_ordering))
... ...
@@ -12,7 +12,7 @@
12 12
 #'
13 13
 #' @param input_data string pointer taken from GMQL function
14 14
 #' @param metadata vector of string made up by metadata attribute
15
-#' @param region vector of string made up by schema field attribute
15
+#' @param regions vector of string made up by schema field attribute
16 16
 #' @param all_but_reg logical value indicating which schema filed attribute you want to exclude.
17 17
 #' If FALSE only the regions you choose is kept in the output of the project operation,
18 18
 #' if TRUE the schema region are all except ones include in region parameter.
... ...
@@ -33,24 +33,24 @@
33 33
 #' @examples
34 34
 #' 
35 35
 #' ## it creates a new dataset called CTCF_NORM_SCORE by preserving all region attributes apart from score,
36
-#' and creating a new region attribute called new_score by dividing the existing score value 
37
-#' of each region by 1000.0 and incrementing it by 100.
38
-#' It also generates, for each sample of the new dataset, 
39
-#' a new metadata attribute called normalized with value 1, which can be used in future selections.
36
+#' ## and creating a new region attribute called new_score by dividing the existing score value 
37
+#' ## of each region by 1000.0 and incrementing it by 100.
38
+#' ## It also generates, for each sample of the new dataset, 
39
+#' ## a new metadata attribute called normalized with value 1, which can be used in future selections.
40 40
 #' 
41 41
 #' initGMQL("gtf")
42 42
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
43 43
 #' input = readDataset(test_path)
44
-#' CTCF_NORM_SCORE = project(input,metadata_update="normalized AS 1",
45
-#' regions_update="new_score AS (score / 1000.0) + 100" , regions=c("score"),all_but_reg=T,)
44
+#' CTCF_NORM_SCORE = project(input,metadata_update="normalized AS 1", regions_update="new_score AS (score / 1000.0) + 100" , regions=c("score"), all_but_reg=TRUE)
46 45
 #' 
47 46
 #' 
48 47
 #' \dontrun{
49
-#' ### it produces an output dataset that contains the same samples as the input dataset. 
50
-#' Each output sample only contains, as region attributes, 
51
-#' the four basic coordinates (chr, left, right, strand) and the specified region attributes 
52
-#' 'variant_classification' and 'variant_type', and as metadata attributes only the specified ones, 
53
-#' i.e. manually_curated__tissue_status and manually_curated__tumor_tag.
48
+#' 
49
+#' ## it produces an output dataset that contains the same samples as the input dataset. 
50
+#' ## Each output sample only contains, as region attributes, 
51
+#' ## the four basic coordinates (chr, left, right, strand) and the specified region attributes 
52
+#' ## 'variant_classification' and 'variant_type', and as metadata attributes only the specified ones, 
53
+#' ## i.e. manually_curated__tissue_status and manually_curated__tumor_tag.
54 54
 #' 
55 55
 #' initGMQL("gtf")
56 56
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
... ...
@@ -76,6 +76,8 @@ project <-function(input_data, metadata = NULL,metadata_update=NULL,all_but_meta
76 76
 
77 77
     if(length(metadata)==0)
78 78
       metadata <- scalaNull("Array[String]")
79
+    
80
+    metadata <- (I(as.character(metadata)))
79 81
   }
80 82
   else
81 83
     metadata <- scalaNull("Array[String]")
... ...
@@ -90,6 +92,9 @@ project <-function(input_data, metadata = NULL,metadata_update=NULL,all_but_meta
90 92
 
91 93
     if(length(regions)==0)
92 94
       regions <- scalaNull("Array[String]")
95
+    
96
+    regions <- (I(as.character(regions)))
97
+    
93 98
   }
94 99
   else
95 100
     regions <- scalaNull("Array[String]")
... ...
@@ -68,6 +68,8 @@ initGMQL <- function(output_format="gtf", remote_processing = FALSE)
68 68
 #' and override value is FALSE an error occures.
69 69
 #' useful only in remote processing
70 70
 #' 
71
+#' @importFrom methods is
72
+#' 
71 73
 #' @return DAGgraph class object. It contains the value associated to the graph used 
72 74
 #' as input for the subsequent GMQL function
73 75
 #'
... ...
@@ -158,17 +160,16 @@ readDataset <- function(dataset, parser = "CustomParser",is_local=TRUE,url=NULL,
158 160
 #'
159 161
 #' Read a GrangesList saving in scala memory that can be referenced in R
160 162
 #'
163
+#'
164
+#' @importFrom S4Vectors metadata
161 165
 #' @param samples GrangesList
166
+#' 
162 167
 #'
163 168
 #' @return DAGgraph class object. It contains the value associated to the graph used 
164 169
 #' as input for the subsequent GMQL function
165 170
 #' 
166 171
 #' @examples
167
-#'
168
-#' \dontrun{
169
-#' 
170
-#' }
171
-#' ""
172
+#' "prova prova"
172 173
 #'
173 174
 #' @export
174 175
 #'
... ...
@@ -177,7 +178,7 @@ read <- function(samples)
177 178
   if(!is(samples,"GRangesList"))
178 179
     stop("only GrangesList")
179 180
 
180
-  meta <- metadata(samples)
181
+  meta <- S4Vectors::metadata(samples)
181 182
   if(is.null(meta)) {
182 183
     warning("GrangesList has no metadata. we provide two metadata for you")
183 184
     meta_matrix <- matrix(c("Provider","Polimi", "Application", "R-GMQL"),ncol = 2,byrow = TRUE)
... ...
@@ -36,7 +36,7 @@
36 36
 #' @examples
37 37
 #' 
38 38
 #' ## it selects from input data samples of patients younger than 70 years old, 
39
-#' based on filtering on sample metadata attribute Patient_age
39
+#' ## based on filtering on sample metadata attribute Patient_age
40 40
 #' 
41 41
 #' initGMQL("gtf")
42 42
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
... ...
@@ -108,6 +108,7 @@ select <- function(input_data, predicate = NULL, region_predicate = NULL, semi_j
108 108
     
109 109
     join_condition_matrix <- .join_condition(semi_join)
110 110
   }
111
+  
111 112
   out <- WrappeR$select(predicate,region_predicate,join_condition_matrix,semi_join_dataset,
112 113
                         semi_join_negation,input_data$value)
113 114
   if(grepl("No",out,ignore.case = TRUE) || grepl("expected",out,ignore.case = TRUE))
... ...
@@ -23,16 +23,16 @@
23 23
 #' @references \url{http://www.bioinformatics.deib.polimi.it/genomic_computing/GMQL/doc/GMQLUserTutorial.pdf}
24 24
 #'
25 25
 #' @examples
26
-#' ### it creates a dataset called full which contains all samples from the datasets 
27
-#' data1 and data2 whose schema is defined by merging data1 and data2 dataset schemas 
28
-#' (union of all the attributes present in the two input datasets).
26
+#' ## it creates a dataset called full which contains all samples from the datasets 
27
+#' ## data1 and data2 whose schema is defined by merging data1 and data2 dataset schemas 
28
+#' ## (union of all the attributes present in the two input datasets).
29 29
 #' 
30 30
 #' initGMQL("gtf")
31 31
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
32 32
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
33 33
 #' data1 = readDataset(test_path)
34 34
 #' data2 = readDataset(test_path2)
35
-#' full = union(r2,r)
35
+#' full = union(data1,data2)
36 36
 #' 
37 37
 #'
38 38
 #' @export
... ...
@@ -31,26 +31,26 @@
31 31
 }
32 32
 
33 33
 # aggregates factory
34
-.aggregates <- function(metadata,class)
34
+.aggregates <- function(meta_data,class)
35 35
 {
36
-  if(!is.list(metadata))
37
-    stop("metadata: invalid input")
36
+  if(!is.list(meta_data))
37
+    stop("meta_data: invalid input")
38 38
 
39
-  if(!all(sapply(metadata, function(x) is(x,class))))
39
+  if(!all(sapply(meta_data, function(x) is(x,class))))
40 40
     stop("All elements must be META_OPERATOR object")
41 41
 
42
-  names <- names(metadata)
42
+  names <- names(meta_data)
43 43
   if(is.null(names))
44 44
   {
45 45
     warning("You did not assign a names to a list.\nWe build names for you")
46
-    names <- sapply(metadata, take_value.META_OPERATOR)
46
+    names <- sapply(meta_data, take_value.META_OPERATOR)
47 47
   }
48 48
   else
49 49
   {
50 50
     if("" %in% names)
51 51
       stop("No partial names assignment is allowed")
52 52
   }
53
-  aggregate_matrix <- t(sapply(metadata, function(x) {
53
+  aggregate_matrix <- t(sapply(meta_data, function(x) {
54 54
 
55 55
     new_value = as.character(x)
56 56
     matrix <- matrix(new_value)
... ...
@@ -34,13 +34,6 @@ if(getRversion() >= "3.1.0")
34 34
 #' PolimiUrl = "http://130.186.13.219/gmql-rest"
35 35
 #' login.GMQL(PolimiUrl)
36 36
 #'
37
-#' \dontrun{
38
-#' 
39
-#' ### login with username and password
40
-#' PolimiUrl = "http://130.186.13.219/gmql-rest"
41
-#' login.GMQL(PolimiUrl,"test101","test")
42
-#' 
43
-#' }
44 37
 #' @export
45 38
 #'
46 39
 login.GMQL <- function(url,username = NULL, password = NULL)
... ...
@@ -100,13 +93,6 @@ login.GMQL <- function(url,username = NULL, password = NULL)
100 93
 #' login.GMQL(PolimiUrl)
101 94
 #' logout.GMQL(PolimiUrl)
102 95
 #'
103
-#' \dontrun{
104
-#' ##### login with username and password, then logout
105
-#' PolimiUrl = "http://130.186.13.219/gmql-rest"
106
-#' login.GMQL(PolimiUrl,"test101","test")
107
-#' logout.GMQL(PolimiUrl)
108
-#' }
109
-#'
110 96
 #' @return None
111 97
 #'
112 98
 #' @export
... ...
@@ -156,8 +142,8 @@ logout.GMQL <- function(url)
156 142
 #'
157 143
 #' @examples
158 144
 #' 
159
-#' ##### this user already exist, it's a test account
160
-#' ##### don't use it
145
+#' ### this user already exist, it's a test account, don't use it!!!
146
+#' 
161 147
 #' PolimiUrl = "http://130.186.13.219/gmql-rest"
162 148
 #' register.GMQL(url = PolimiUrl,"jonh","Doe","jonh@doe.com","JD","JD46")
163 149
 #'
... ...
@@ -22,6 +22,7 @@
22 22
 #' @examples
23 23
 #'
24 24
 #' PolimiUrl = "http://130.186.13.219/gmql-rest"
25
+#' 
25 26
 #' login.GMQL(PolimiUrl)
26 27
 #' list <- showQueries(PolimiUrl)
27 28
 #'
... ...
@@ -104,7 +104,7 @@ DEF <- function(value)
104 104
 #' 
105 105
 #' #### select with condition
106 106
 #' #### the first and the third attribute are DEF the second one is EXACT
107
-#' s = select(input_data = r, semi_join = list("cell_type",EXACT("cell"),attribute_tag), semi_join_dataset = r)
107
+#' s = select(input_data = r, semi_join = list("cell_type",EXACT("cell"),"attribute_tag"), semi_join_dataset = r)
108 108
 #'
109 109
 #' \dontrun{
110 110
 #'
... ...
@@ -152,7 +152,7 @@ EXACT <- function(value)
152 152
 #' 
153 153
 #' #### select with condition
154 154
 #' #### the first and the third attribute are DEF the second one is FULL
155
-#' s = select(input_data = r, semi_join = list("cell_type",FULL("cell"),attribute_tag), semi_join_dataset = c)
155
+#' s = select(input_data = r, semi_join = list("cell_type",FULL("cell"),"attribute_tag"), semi_join_dataset = c)
156 156
 #'
157 157
 #' \dontrun{
158 158
 #'
... ...
@@ -23,7 +23,8 @@
23 23
 #' @examples
24 24
 #'
25 25
 #' #### show dataset when logged as guest
26
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
26
+#' 
27
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
27 28
 #' login.GMQL(url = PolimiUrl)
28 29
 #' list <- showDatasets(PolimiUrl)
29 30
 #'
... ...
@@ -72,7 +73,7 @@ showDatasets <- function(url)
72 73
 #'
73 74
 #' @examples
74 75
 #'
75
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
76
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
76 77
 #' login.GMQL(url = PolimiUrl)
77 78
 #' list <- showSamplesFromDataset(PolimiUrl,"public.GRCh38_ENCODE_BROAD_MAY_2017")
78 79
 #'
... ...
@@ -120,7 +121,7 @@ showSamplesFromDataset <- function(url,datasetName)
120 121
 #' @examples
121 122
 #'
122 123
 #' ### show schema of public dataset
123
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
124
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
124 125
 #' login.GMQL(url = PolimiUrl)
125 126
 #' list <- showSchemaFromDataset(PolimiUrl,"public.GRCh38_ENCODE_BROAD_MAY_2017")
126 127
 #'
... ...
@@ -174,7 +175,7 @@ showSchemaFromDataset <- function(url,datasetName)
174 175
 #' 
175 176
 #' ### upload of GMQL dataset with no schema selection
176 177
 #' test_path <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
177
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
178
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
178 179
 #' login.GMQL(url = PolimiUrl)
179 180
 #' uploadSamples(PolimiUrl,"dataset1",folderPath = test_path)
180 181
 #' }
... ...
@@ -253,10 +254,13 @@ uploadSamples <- function(url,datasetName,folderPath,schemaName=NULL,isGMQL=TRUE
253 254
 #' @examples
254 255
 #'
255 256
 #' \dontrun{
256
-#'
257
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
258
-#' login.GMQL(url = PolimiUrl,"test101","test")
257
+#' 
258
+#' ### this dataset does not exist
259
+#' 
260
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
261
+#' login.GMQL(url = PolimiUrl)
259 262
 #' deleteDataset(PolimiUrl,"job_test1_test101_20170604_180908_RESULT_DS")
263
+#' 
260 264
 #' }
261 265
 #' 
262 266
 #' @export
... ...
@@ -297,9 +301,11 @@ deleteDataset <- function(url,datasetName)
297 301
 #' @examples
298 302
 #'
299 303
 #' #### download dataset in r working directory
300
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
301
-#' login.GMQL(url = PolimiUrl,"test101","test")
302
-#' downloadDataset(PolimiUrl,"dataset_test",path = getwd())
304
+#' #### in this case we try to download public dataset
305
+#' 
306
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
307
+#' login.GMQL(url = PolimiUrl)
308
+#' downloadDataset(PolimiUrl,"public.HG19_BED_ANNOTATION",path = getwd())
303 309
 #'
304 310
 #' @export
305 311
 #'
... ...
@@ -313,11 +319,13 @@ downloadDataset <- function(url,datasetName,path = getwd())
313 319
   #print(content$result)
314 320
   content <- httr::content(req)
315 321
   if(req$status_code !=200)
316
-    stop(content)
317
-
318
-  zip_path = paste0(path,"/",datasetName,".zip")
319
-  writeBin(content,zip_path)
320
-  print("Download Complete")
322
+    print(content)
323
+  else
324
+  {
325
+    zip_path = paste0(path,"/",datasetName,".zip")
326
+    writeBin(content,zip_path)
327
+    print("Download Complete")
328
+  }
321 329
 }
322 330
 
323 331
 #' Download Dataset in GrangesList
... ...
@@ -340,11 +348,13 @@ downloadDataset <- function(url,datasetName,path = getwd())
340 348
 #'
341 349
 #' @examples
342 350
 #'
343
-#' #### create grangeslist from dataset in repository
344
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
345
-#' login.GMQL(url = PolimiUrl,"test101","test")
346
-#' downloadDatasetToGrangesList(PolimiUrl,"dataset_test")
347
-#'
351
+#' \dontrun{
352
+#' #### create grangeslist from public dataset HG19_BED_ANNOTATION got from repository
353
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
354
+#' login.GMQL(url = PolimiUrl)
355
+#' downloadDatasetToGrangesList(PolimiUrl,"public.HG19_BED_ANNOTATION")
356
+#' }
357
+#' 
348 358
 #' @export
349 359
 #'
350 360
 downloadDatasetToGrangesList <- function(url,datasetName)
... ...
@@ -396,9 +406,9 @@ downloadDatasetToGrangesList <- function(url,datasetName)
396 406
 #' @examples
397 407
 #'
398 408
 #' ## download metadata with real test login
399
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
400
-#' login.GMQL(url = PolimiUrl,"test101","test")
401
-#' metadataFromSample(PolimiUrl,"job_test1_test101_20170604_180908_RESULT_DS","S_00000")
409
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
410
+#' login.GMQL(url = PolimiUrl)
411
+#' metadataFromSample(PolimiUrl,"public.HG19_BED_ANNOTATION","genes")
402 412
 #'
403 413
 #' @export
404 414
 #'
... ...
@@ -448,10 +458,12 @@ metadataFromSample <- function(url, datasetName,sampleName)
448 458
 #'
449 459
 #' @examples
450 460
 #'
451
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
452
-#' login.GMQL(url = PolimiUrl,"test101","test")
453
-#' regionFromSample(PolimiUrl,"job_test1_test101_20170604_180908_RESULT_DS","S_00000")
454
-#'
461
+#' 
462
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
463
+#' login.GMQL(url = PolimiUrl)
464
+#' regionFromSample(PolimiUrl,"public.HG19_BED_ANNOTATION","genes")
465
+#' 
466
+#' 
455 467
 #' @export
456 468
 #'
457 469
 regionFromSample <- function(url, datasetName,sampleName)
... ...
@@ -466,7 +478,7 @@ regionFromSample <- function(url, datasetName,sampleName)
466 478
   else
467 479
   {
468 480
     list <- showSchemaFromDataset(url,datasetName)
469
-    schema_type <- list$schemaType
481
+    schema_type <- list$type
470 482
 
471 483
     temp <- tempfile("temp") #use temporary files
472 484
     write.table(content,temp,quote = FALSE,sep = '\t',col.names = FALSE,row.names = FALSE)
... ...
@@ -478,9 +490,15 @@ regionFromSample <- function(url, datasetName,sampleName)
478 490
         name <- x$name
479 491
       })
480 492
       df <- data.table::fread(temp,header = FALSE,sep = "\t")
493
+      a <- df[1,2]
494
+      if(is.na(as.numeric(a)))
495
+        df <- df[-1]
481 496
       data.table::setnames(df,vector_field)
482
-      samples <- GenomicRanges::makeGRangesFromDataFrame(df,keep.extra.columns = TRUE,start.field = "left",end.field = "right")
483
-    }
497
+      samples <- GenomicRanges::makeGRangesFromDataFrame(df,keep.extra.columns = TRUE,
498
+                                                         start.field = "left",
499
+                                                         end.field = "right",
500
+                                                         strand.field="strand")
501
+      }
484 502
     unlink(temp)
485 503
     return(samples)
486 504
   }
... ...
@@ -53,17 +53,17 @@ check.DISTAL <- function(value)
53 53
 #' @examples
54 54
 #' 
55 55
 #' ### Given a dataset HM and one called TSS with a sample including Transcription Start Site annotations,
56
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
57
-#' and takes the first/closest one for each TSS, 
58
-#' provided that such distance is lesser than 120K bases and joined TSS and HM samples are obtained 
59
-#' from the same provider (joinby clause).
56
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
57
+#' ## and takes the first/closest one for each TSS, 
58
+#' ## provided that such distance is lesser than 120K bases and joined TSS and HM samples are obtained 
59
+#' ## from the same provider (joinby clause).
60 60
 #' 
61 61
 #' initGMQL("gtf")
62 62
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
63 63
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
64 64
 #' TSS = readDataset(test_path)
65 65
 #' HM = readDataset(test_path2)
66
-#' join_data = join(TSS,HM,genometric_predicate=list(list(MD("1"),DLE("120000"))),c("provider"),region_output="RIGHT")
66
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DLE(120000))),c("provider"),region_output="RIGHT")
67 67
 #'
68 68
 #' 
69 69
 #' @export
... ...
@@ -95,18 +95,18 @@ DLE <- function(value)
95 95
 #' 
96 96
 #' @examples
97 97
 #' 
98
-#' ### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
99
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
100
-#' and takes the first/closest one for each TSS, 
101
-#' provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
102
-#' from the same provider (joinby clause).
98
+#' ## Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
99
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
100
+#' ## and takes the first/closest one for each TSS, 
101
+#' ## provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
102
+#' ## from the same provider (joinby clause).
103 103
 #' 
104
-#' #' initGMQL("gtf")
104
+#' initGMQL("gtf")
105 105
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
106 106
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
107 107
 #' TSS = readDataset(test_path)
108 108
 #' HM = readDataset(test_path2)
109
-#' join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DGE("120000"))),c("provider"),region_output="RIGHT")
109
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DGE(120000))),c("provider"),region_output="RIGHT")
110 110
 #'
111 111
 #' @export
112 112
 #'
... ...
@@ -141,19 +141,19 @@ DGE <- function(value)
141 141
 #' 
142 142
 #' @examples
143 143
 #' 
144
-#' HM_TSS = JOIN(MD(1), DLE(120000); output: RIGHT; joinby: provider) TSS HM;
144
+#' 
145 145
 #' ### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
146
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
147
-#' and takes the first/closest one for each TSS, 
148
-#' provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
149
-#' from the same provider (joinby clause).
146
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
147
+#' ## and takes the first/closest one for each TSS, 
148
+#' ## provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
149
+#' ## from the same provider (joinby clause).
150 150
 #' 
151
-#' #' initGMQL("gtf")
151
+#' initGMQL("gtf")
152 152
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
153 153
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
154 154
 #' TSS = readDataset(test_path)
155 155
 #' HM = readDataset(test_path2)
156
-#' join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DGE("120000"))),c("provider"),region_output="RIGHT")
156
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DGE(120000))),c("provider"),region_output="RIGHT")
157 157
 #'
158 158
 #' @export
159 159
 #'
... ...
@@ -187,19 +187,19 @@ MD <- function(value)
187 187
 #' 
188 188
 #' @examples
189 189
 #' 
190
-#' HM_TSS = JOIN(MD(1), DLE(120000); output: RIGHT; joinby: provider) TSS HM;
190
+#'
191 191
 #' ### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
192
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
193
-#' and takes the first/closest one for each TSS, 
194
-#' provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
195
-#' from the same provider (joinby clause).
192
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
193
+#' ## and takes the first/closest one for each TSS, 
194
+#' ## provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
195
+#' ## from the same provider (joinby clause).
196 196
 #' 
197
-#' #' initGMQL("gtf")
197
+#' initGMQL("gtf")
198 198
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
199 199
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
200 200
 #' TSS = readDataset(test_path)
201 201
 #' HM = readDataset(test_path2)
202
-#' join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DGE("120000"),UP)),c("provider"),region_output="RIGHT")
202
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DGE(120000),UP())),c("provider"),region_output="RIGHT")
203 203
 #' 
204 204
 #' @export
205 205
 #'
... ...
@@ -232,19 +232,19 @@ as.character.UP <- function(obj) {
232 232
 #' 
233 233
 #' @examples
234 234
 #' 
235
-#' HM_TSS = JOIN(MD(1), DLE(120000); output: RIGHT; joinby: provider) TSS HM;
235
+#' 
236 236
 #' ### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
237
-#' it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
238
-#' and takes the first/closest one for each TSS, 
239
-#' provided that such distance is greater than 12K bases and joined 'tss' and 'hm' samples are obtained 
240
-#' from the same provider (joinby clause).
237
+#' ## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
238
+#' ## and takes the first/closest one for each TSS, 
239
+#' ## provided that such distance is greater than 12K bases and joined 'tss' and 'hm' samples are obtained 
240
+#' ## from the same provider (joinby clause).
241 241
 #' 
242
-#' #' initGMQL("gtf")
242
+#' initGMQL("gtf")
243 243
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
244 244
 #' test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
245 245
 #' TSS = readDataset(test_path)
246 246
 #' HM = readDataset(test_path2)
247
-#' join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DGE("12000"),DOWN)),c("provider"),region_output="RIGHT")
247
+#' join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DGE(12000),DOWN())),c("provider"),region_output="RIGHT")
248 248
 #' 
249 249
 #' 
250 250
 #' @export
... ...
@@ -83,9 +83,9 @@ take_value.META_OPERATOR <- function(obj){
83 83
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
84 84
 #' exp = readDataset(test_path)
85 85
 #' 
86
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
87
-#' metadata attributes for each of them: sum_score is the sum of score of the sample regions.
88
-#' res = extend(input_data = exp, list(sum_score = SUM(score))
86
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
87
+#' ## metadata attributes for each of them: sum_score is the sum of score of the sample regions.
88
+#' res = extend(input_data = exp, list(sum_score = SUM("score")))
89 89
 #' 
90 90
 #' @export
91 91
 #'
... ...
@@ -123,9 +123,9 @@ SUM <- function(value)
123 123
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
124 124
 #' exp = readDataset(test_path)
125 125
 #' 
126
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
127
-#' metadata attributes for each of them: MinP is the minimum pvalue of the sample regions.
128
-#' res = extend(input_data = exp, list(minP = MIN(pvalue))
126
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
127
+#' ## metadata attributes for each of them: MinP is the minimum pvalue of the sample regions.
128
+#' res = extend(input_data = exp, list(minP = MIN("pvalue")))
129 129
 #' 
130 130
 #' @export
131 131
 #'
... ...
@@ -163,9 +163,9 @@ MIN <- function(value)
163 163
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
164 164
 #' exp = readDataset(test_path)
165 165
 #' 
166
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
167
-#' metadata attributes for each of them: max_score is the maximum score of the sample regions.
168
-#' res = extend(input_data = exp, list(max_score = MAX(score))
166
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
167
+#' ## metadata attributes for each of them: max_score is the maximum score of the sample regions.
168
+#' res = extend(input_data = exp, list(max_score = MAX("score")))
169 169
 #' 
170 170
 #' 
171 171
 #' @export
... ...
@@ -204,10 +204,10 @@ MAX <- function(value)
204 204
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
205 205
 #' exp = readDataset(test_path)
206 206
 #' 
207
-#' ### The following cover operation produces output regions where at least 2 and at most 3 regions of
208
-#' exp overlap, having as resulting region attributes the avg signal of the overlapping regions;
209
-#' the result has one sample for each input cell.
210
-#' res = cover(input_data = exp,2,3, c("cell"), list(avg_signal = AVG(signal)))
207
+#' ## The following cover operation produces output regions where at least 2 and at most 3 regions of
208
+#' ## exp overlap, having as resulting region attributes the avg signal of the overlapping regions;
209
+#' ## the result has one sample for each input cell.
210
+#' res = cover(input_data = exp,2,3, c("cell"), list(avg_signal = AVG("signal")))
211 211
 #'
212 212
 #' @export
213 213
 #'
... ...
@@ -246,10 +246,10 @@ AVG <- function(value)
246 246
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
247 247
 #' data = readDataset(test_path)
248 248
 #' 
249
-#' ### copies all samples of DATA into OUT dataset, and then for each of them adds another 
250
-#' metadata attribute,  allScores, which is the aggregation comma-separated list of all the 
251
-#' distinct values that the attribute  score takes in the sample.
252
-#' out = extend(input_data = data, list(allScore = BAG("score"))
249
+#' ## copies all samples of DATA into OUT dataset, and then for each of them adds another 
250
+#' ## metadata attribute,  allScores, which is the aggregation comma-separated list of all the 
251
+#' ## distinct values that the attribute  score takes in the sample.
252
+#' out = extend(input_data = data, list(allScore = BAG("score")))
253 253
 #'
254 254
 #' @export
255 255
 #'
... ...
@@ -280,14 +280,14 @@ BAG <- function(value)
280 280
 #' 
281 281
 #' @examples
282 282
 #' 
283
-#' ### local with CustomParser
283
+#' ## local with CustomParser
284 284
 #' initGMQL("gtf")
285 285
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
286 286
 #' exp = readDataset(test_path)
287 287
 #' 
288
-#' ### counts the regions in each sample and stores their number as value of the new metadata 
289
-#' RegionCount attribute of the sample.
290
-#' out = extend(input_data = exp, list(RegionCount = COUNT())
288
+#' ## counts the regions in each sample and stores their number as value of the new metadata 
289
+#' ## RegionCount attribute of the sample.
290
+#' out = extend(input_data = exp, list(RegionCount = COUNT()))
291 291
 #'
292 292
 #' @export
293 293
 #'
... ...
@@ -326,9 +326,9 @@ check.COUNT <- function(obj){}
326 326
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
327 327
 #' exp = readDataset(test_path)
328 328
 #' 
329
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
330
-#' metadata attributes for each of them: std_score is the standard deviation score of the sample regions.
331
-#' res = extend(input_data = exp, list(std_score = STD(score))
329
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
330
+#' ## metadata attributes for each of them: std_score is the standard deviation score of the sample regions.
331
+#' res = extend(input_data = exp, list(std_score = STD("score")))
332 332
 #'
333 333
 #' @export
334 334
 #'
... ...
@@ -366,9 +366,9 @@ STD <- function(value)
366 366
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
367 367
 #' exp = readDataset(test_path)
368 368
 #' 
369
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
370
-#' metadata attributes for each of them: m_score is the median score of the sample regions.
371
-#' res = extend(input_data = exp, list(m_score = MEDIAN(score))
369
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
370
+#' ## metadata attributes for each of them: m_score is the median score of the sample regions.
371
+#' res = extend(input_data = exp, list(m_score = MEDIAN("score")))
372 372
 #'
373 373
 #' @export
374 374
 #'
... ...
@@ -406,9 +406,9 @@ MEDIAN <- function(value)
406 406
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
407 407
 #' exp = readDataset(test_path)
408 408
 #' 
409
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
410
-#' metadata attributes for each of them: q1_score is the first quartile of score of the sample regions.
411
-#' res = extend(input_data = exp, list(q1_score = Q1(score))
409
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
410
+#' ## metadata attributes for each of them: q1_score is the first quartile of score of the sample regions.
411
+#' res = extend(input_data = exp, list(q1_score = Q1("score")))
412 412
 #'
413 413
 #'
414 414
 #' @export
... ...
@@ -447,9 +447,9 @@ Q1 <- function(value)
447 447
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
448 448
 #' exp = readDataset(test_path)
449 449
 #' 
450
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
451
-#' metadata attributes for each of them: q2_score is the second quartile of score of the sample regions.
452
-#' res = extend(input_data = exp, list(q2_score = Q2(score))
450
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
451
+#' ## metadata attributes for each of them: q2_score is the second quartile of score of the sample regions.
452
+#' res = extend(input_data = exp, list(q2_score = Q2("score")))
453 453
 #'
454 454
 #' @export
455 455
 #'
... ...
@@ -487,9 +487,9 @@ Q2 <- function(value)
487 487
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
488 488
 #' exp = readDataset(test_path)
489 489
 #' 
490
-#' ### This statement copies all samples of exp into res dataset, and then calculates new 
491
-#' metadata attributes for each of them: q3_score is the third quartile of score of the sample regions.
492
-#' res = extend(input_data = exp, list(q3_score = Q3(score))
490
+#' ## This statement copies all samples of exp into res dataset, and then calculates new 
491
+#' ## metadata attributes for each of them: q3_score is the third quartile of score of the sample regions.
492
+#' res = extend(input_data = exp, list(q3_score = Q3("score")))
493 493
 #' 
494 494
 #' @export
495 495
 #'
... ...
@@ -50,13 +50,13 @@ as.character.ORDER <- function(obj) {
50 50
 #' 
51 51
 #' @examples
52 52
 #' 
53
-#' ### it orders the samples according to the Region_count metadata attribute and takes the two samples 
54
-#' that have the highest count. 
53
+#' ## it orders the samples according to the Region_count metadata attribute and takes the two samples 
54
+#' ## that have the highest count. 
55 55
 #'
56 56
 #' initGMQL("gtf")
57 57
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
58 58
 #' r = readDataset(test_path)
59
-#' o = order(r,list(DESC(Region_Count)), mtop = 2)
59
+#' o = order(r,list(DESC("Region_Count")), mtop = 2)
60 60
 #' 
61 61
 #' @export
62 62
 #'
... ...
@@ -84,14 +84,14 @@ DESC <- function(value)
84 84
 #'
85 85
 #' @examples
86 86
 #' 
87
-#' ### it extracts the first 5 samples on the basis of their region counter 
88
-#' (those with the smaller RegionCount) and then, for each of them, 
89
-#' 7 regions on the basis of their mutation counter (those with the higher MutationCount).
90
-#'
87
+#' ## it get the first 5 samples on the basis of their region counter, 
88
+#' ## those with the smaller RegionCount and then for each of them, 7 regions on the basis of 
89
+#' ## their score, those with the higher score
90
+#' 
91 91
 #' initGMQL("gtf")
92 92
 #' test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
93 93
 #' r = readDataset(test_path)
94
-#' o = order(r,list(ASC(Region_Count)), mtop = 5,regions_ordering = list(DESC(MutationCount)),rtop=7)
94
+#' o = order(r,list(ASC("Region_Count")), mtop = 5,regions_ordering = list(DESC("score")),rtop=7)
95 95
 #' 
96 96
 #' @export
97 97
 #'
... ...
@@ -19,7 +19,7 @@
19 19
 #'
20 20
 #' @examples
21 21
 #'
22
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
22
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
23 23
 #' login.GMQL(PolimiUrl)
24 24
 #' list_jobs <- showJobs(PolimiUrl)
25 25
 #'
... ...
@@ -58,7 +58,7 @@ showJobs <- function(url)
58 58
 #'
59 59
 #' \dontrun{
60 60
 #' ## login with test user
61
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
61
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
62 62
 #' login.GMQL(PolimiUrl,"test101","test")
63 63
 #' ## list all jobs
64 64
 #' list_jobs <- showJobs(PolimiUrl)
... ...
@@ -100,7 +100,7 @@ showJobLog <- function(url,job_id)
100 100
 #' @examples
101 101
 #'
102 102
 #' \dontrun{
103
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
103
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
104 104
 #' login.GMQL(PolimiUrl,"test101","test")
105 105
 #' list_jobs <- showJobs(PolimiUrl)
106 106
 #' jobs_1 <- list_jobs$jobs[[1]]
... ...
@@ -138,7 +138,7 @@ stopJob <- function(url,job_id)
138 138
 #'
139 139
 #' @examples
140 140
 #' \dontrun{
141
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
141
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
142 142
 #' login.GMQL(PolimiUrl,"test101","test")
143 143
 #' list_jobs <- showJobs(PolimiUrl)
144 144
 #' jobs_1 <- list_jobs$jobs[[1]]
... ...
@@ -183,7 +183,7 @@ traceJob <- function(url, job_id)
183 183
 #'
184 184
 #' @examples
185 185
 #'
186
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
186
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
187 187
 #' login.GMQL(PolimiUrl)
188 188
 #' runQuery(PolimiUrl, "query_1", "DATA_SET_VAR = SELECT() HG19_TCGA_dnaseq;
189 189
 #' MATERIALIZE DATA_SET_VAR INTO RESULT_DS;", output_gtf = FALSE)
... ...
@@ -231,7 +231,7 @@ runQuery <- function(url,fileName,query,output_gtf = TRUE)
231 231
 #'
232 232
 #' ## run query: output GTF
233 233
 #'
234
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
234
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
235 235
 #' login.GMQL(PolimiUrl)
236 236
 #' test_path <- system.file("example",package = "GMQL")
237 237
 #' test_query <- file.path(test_path, "query1.txt")
... ...
@@ -239,7 +239,6 @@ runQuery <- function(url,fileName,query,output_gtf = TRUE)
239 239
 #'
240 240
 #' ## run query: output GDM (tabulated)
241 241
 #'
242
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
243 242
 #' login.GMQL(PolimiUrl)
244 243
 #' test_path <- system.file("example",package = "GMQL")
245 244
 #' test_query <- file.path(test_path, "query1.txt")
... ...
@@ -271,7 +270,7 @@ runQuery.fromfile <- function(url,fileName,filePath,output_gtf = TRUE)
271 270
 #'
272 271
 #' @examples
273 272
 #'
274
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
273
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
275 274
 #' login.GMQL(PolimiUrl)
276 275
 #' compileQuery(PolimiUrl, "DATA_SET_VAR = SELECT() HG19_TCGA_dnaseq;
277 276
 #' MATERIALIZE DATA_SET_VAR INTO RESULT_DS;")
... ...
@@ -305,10 +304,9 @@ compileQuery <- function(url ,query)
305 304
 #' @details
306 305
 #' If error occures a specific error is printed
307 306
 #'
308
-#'
309 307
 #' @examples
310 308
 #'
311
-#' PolimiUrl = "http://genomic.elet.polimi.it/gmql-rest"
309
+#' PolimiUrl = "http://130.186.13.219/gmql-rest"
312 310
 #' login.GMQL(PolimiUrl)
313 311
 #' test_path <- system.file("example",package = "GMQL")
314 312
 #' test_query <- file.path(test_path, "query1.txt")
... ...
@@ -13,6 +13,6 @@ RnwWeave: Sweave
13 13
 LaTeX: pdfLaTeX
14 14
 
15 15
 BuildType: Package
16
-PackageUseDevtools: Yes
17 16
 PackageInstallArgs: --no-multiarch --with-keep.source
17
+PackageCheckArgs: --no-build-vignettes
18 18
 PackageRoxygenize: rd,collate,namespace
... ...
@@ -19,13 +19,13 @@ It define an ascending order for input value
19 19
 }
20 20
 \examples{
21 21
 
22
-### it extracts the first 5 samples on the basis of their region counter 
23
-(those with the smaller RegionCount) and then, for each of them, 
24
-7 regions on the basis of their mutation counter (those with the higher MutationCount).
22
+## it get the first 5 samples on the basis of their region counter, 
23
+## those with the smaller RegionCount and then for each of them, 7 regions on the basis of 
24
+## their score, those with the higher score
25 25
 
26 26
 initGMQL("gtf")
27 27
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
28 28
 r = readDataset(test_path)
29
-o = order(r,list(ASC(Region_Count)), mtop = 5,regions_ordering = list(DESC(MutationCount)),rtop=7)
29
+o = order(r,list(ASC("Region_Count")), mtop = 5,regions_ordering = list(DESC("score")),rtop=7)
30 30
 
31 31
 }
... ...
@@ -25,10 +25,10 @@ initGMQL("gtf")
25 25
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
26 26
 exp = readDataset(test_path)
27 27
 
28
-### The following cover operation produces output regions where at least 2 and at most 3 regions of
29
-exp overlap, having as resulting region attributes the avg signal of the overlapping regions;
30
-the result has one sample for each input cell.
31
-res = cover(input_data = exp,2,3, c("cell"), list(avg_signal = AVG(signal)))
28
+## The following cover operation produces output regions where at least 2 and at most 3 regions of
29
+## exp overlap, having as resulting region attributes the avg signal of the overlapping regions;
30
+## the result has one sample for each input cell.
31
+res = cover(input_data = exp,2,3, c("cell"), list(avg_signal = AVG("signal")))
32 32
 
33 33
 }
34 34
 \seealso{
... ...
@@ -26,10 +26,10 @@ initGMQL("gtf")
26 26
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
27 27
 data = readDataset(test_path)
28 28
 
29
-### copies all samples of DATA into OUT dataset, and then for each of them adds another 
30
-metadata attribute,  allScores, which is the aggregation comma-separated list of all the 
31
-distinct values that the attribute  score takes in the sample.
32
-out = extend(input_data = data, list(allScore = BAG("score"))
29
+## copies all samples of DATA into OUT dataset, and then for each of them adds another 
30
+## metadata attribute,  allScores, which is the aggregation comma-separated list of all the 
31
+## distinct values that the attribute  score takes in the sample.
32
+out = extend(input_data = data, list(allScore = BAG("score")))
33 33
 
34 34
 }
35 35
 \seealso{
... ...
@@ -17,14 +17,14 @@ performing all the type conversion needed
17 17
 }
18 18
 \examples{
19 19
 
20
-### local with CustomParser
20
+## local with CustomParser
21 21
 initGMQL("gtf")
22 22
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
23 23
 exp = readDataset(test_path)
24 24
 
25
-### counts the regions in each sample and stores their number as value of the new metadata 
26
-RegionCount attribute of the sample.
27
-out = extend(input_data = exp, list(RegionCount = COUNT())
25
+## counts the regions in each sample and stores their number as value of the new metadata 
26
+## RegionCount attribute of the sample.
27
+out = extend(input_data = exp, list(RegionCount = COUNT()))
28 28
 
29 29
 }
30 30
 \seealso{
... ...
@@ -19,12 +19,12 @@ It define a descending order for input value
19 19
 }
20 20
 \examples{
21 21
 
22
-### it orders the samples according to the Region_count metadata attribute and takes the two samples 
23
-that have the highest count. 
22
+## it orders the samples according to the Region_count metadata attribute and takes the two samples 
23
+## that have the highest count. 
24 24
 
25 25
 initGMQL("gtf")
26 26
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
27 27
 r = readDataset(test_path)
28
-o = order(r,list(DESC(Region_Count)), mtop = 2)
28
+o = order(r,list(DESC("Region_Count")), mtop = 2)
29 29
 
30 30
 }
... ...
@@ -21,18 +21,18 @@ that their distance from the anchor region is greater than, or equal to, 'value'
21 21
 }
22 22
 \examples{
23 23
 
24
-### Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
25
-it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
26
-and takes the first/closest one for each TSS, 
27
-provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
28
-from the same provider (joinby clause).
24
+## Given a dataset 'hm' and one called 'tss' with a sample including Transcription Start Site annotations,
25
+## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
26
+## and takes the first/closest one for each TSS, 
27
+## provided that such distance is greater than 120K bases and joined 'tss' and 'hm' samples are obtained 
28
+## from the same provider (joinby clause).
29 29
 
30
-#' initGMQL("gtf")
30
+initGMQL("gtf")
31 31
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
32 32
 test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
33 33
 TSS = readDataset(test_path)
34 34
 HM = readDataset(test_path2)
35
-join_data = join(tss,hm,genometric_predicate=list(list(MD("1"),DGE("120000"))),c("provider"),region_output="RIGHT")
35
+join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DGE(120000))),c("provider"),region_output="RIGHT")
36 36
 
37 37
 }
38 38
 \seealso{
... ...
@@ -25,17 +25,17 @@ while DLE(0) searched for experiment regions adjacent to, or overlapping, the an
25 25
 \examples{
26 26
 
27 27
 ### Given a dataset HM and one called TSS with a sample including Transcription Start Site annotations,
28
-it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
29
-and takes the first/closest one for each TSS, 
30
-provided that such distance is lesser than 120K bases and joined TSS and HM samples are obtained 
31
-from the same provider (joinby clause).
28
+## it searches for those regions of hm that are at a minimal distance from a transcription start site (TSS) 
29
+## and takes the first/closest one for each TSS, 
30
+## provided that such distance is lesser than 120K bases and joined TSS and HM samples are obtained 
31
+## from the same provider (joinby clause).
32 32
 
33 33
 initGMQL("gtf")
34 34
 test_path <- system.file("example","DATA_SET_VAR_GTF",package = "GMQL")
35 35
 test_path2 <- system.file("example","DATA_SET_VAR_GDM",package = "GMQL")
36 36
 TSS = readDataset(test_path)
37 37
 HM = readDataset(test_path2)
38
-join_data = join(TSS,HM,genometric_predicate=list(list(MD("1"),DLE("120000"))),c("provider"),region_output="RIGHT")
38
+join_data = join(TSS,HM,genometric_predicate=list(list(MD(1),DLE(120000))),c("provider"),region_output="RIGHT")
39 39