@@ -180,7 +180,7 @@ setMethod("getJRDD", signature(rdd = "PipelinedRDD"),
180
180
}
181
181
# Save the serialization flag after we create a RRDD
182
182
rdd @ env $ serializedMode <- serializedMode
183
- rdd @ env $ jrdd_val <- callJMethod(rddRef , " asJavaRDD" ) # rddRef$asJavaRDD()
183
+ rdd @ env $ jrdd_val <- callJMethod(rddRef , " asJavaRDD" )
184
184
rdd @ env $ jrdd_val
185
185
})
186
186
@@ -225,7 +225,7 @@ setMethod("cache",
225
225
# '
226
226
# ' Persist this RDD with the specified storage level. For details of the
227
227
# ' supported storage levels, refer to
228
- # ' http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence.
228
+ # '\url{ http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence} .
229
229
# '
230
230
# ' @param x The RDD to persist
231
231
# ' @param newLevel The new storage level to be assigned
@@ -382,11 +382,13 @@ setMethod("collectPartition",
382
382
# ' \code{collectAsMap} returns a named list as a map that contains all of the elements
383
383
# ' in a key-value pair RDD.
384
384
# ' @examples
385
+ # nolint start
385
386
# '\dontrun{
386
387
# ' sc <- sparkR.init()
387
388
# ' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)), 2L)
388
389
# ' collectAsMap(rdd) # list(`1` = 2, `3` = 4)
389
390
# '}
391
+ # nolint end
390
392
# ' @rdname collect-methods
391
393
# ' @aliases collectAsMap,RDD-method
392
394
# ' @noRd
@@ -442,11 +444,13 @@ setMethod("length",
442
444
# ' @return list of (value, count) pairs, where count is number of each unique
443
445
# ' value in rdd.
444
446
# ' @examples
447
+ # nolint start
445
448
# '\dontrun{
446
449
# ' sc <- sparkR.init()
447
450
# ' rdd <- parallelize(sc, c(1,2,3,2,1))
448
451
# ' countByValue(rdd) # (1,2L), (2,2L), (3,1L)
449
452
# '}
453
+ # nolint end
450
454
# ' @rdname countByValue
451
455
# ' @aliases countByValue,RDD-method
452
456
# ' @noRd
@@ -597,11 +601,13 @@ setMethod("mapPartitionsWithIndex",
597
601
# ' @param x The RDD to be filtered.
598
602
# ' @param f A unary predicate function.
599
603
# ' @examples
604
+ # nolint start
600
605
# '\dontrun{
601
606
# ' sc <- sparkR.init()
602
607
# ' rdd <- parallelize(sc, 1:10)
603
608
# ' unlist(collect(filterRDD(rdd, function (x) { x < 3 }))) # c(1, 2)
604
609
# '}
610
+ # nolint end
605
611
# ' @rdname filterRDD
606
612
# ' @aliases filterRDD,RDD,function-method
607
613
# ' @noRd
@@ -756,11 +762,13 @@ setMethod("foreachPartition",
756
762
# ' @param x The RDD to take elements from
757
763
# ' @param num Number of elements to take
758
764
# ' @examples
765
+ # nolint start
759
766
# '\dontrun{
760
767
# ' sc <- sparkR.init()
761
768
# ' rdd <- parallelize(sc, 1:10)
762
769
# ' take(rdd, 2L) # list(1, 2)
763
770
# '}
771
+ # nolint end
764
772
# ' @rdname take
765
773
# ' @aliases take,RDD,numeric-method
766
774
# ' @noRd
@@ -824,11 +832,13 @@ setMethod("first",
824
832
# ' @param x The RDD to remove duplicates from.
825
833
# ' @param numPartitions Number of partitions to create.
826
834
# ' @examples
835
+ # nolint start
827
836
# '\dontrun{
828
837
# ' sc <- sparkR.init()
829
838
# ' rdd <- parallelize(sc, c(1,2,2,3,3,3))
830
839
# ' sort(unlist(collect(distinct(rdd)))) # c(1, 2, 3)
831
840
# '}
841
+ # nolint end
832
842
# ' @rdname distinct
833
843
# ' @aliases distinct,RDD-method
834
844
# ' @noRd
@@ -974,11 +984,13 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical",
974
984
# ' @param x The RDD.
975
985
# ' @param func The function to be applied.
976
986
# ' @examples
987
+ # nolint start
977
988
# '\dontrun{
978
989
# ' sc <- sparkR.init()
979
990
# ' rdd <- parallelize(sc, list(1, 2, 3))
980
991
# ' collect(keyBy(rdd, function(x) { x*x })) # list(list(1, 1), list(4, 2), list(9, 3))
981
992
# '}
993
+ # nolint end
982
994
# ' @rdname keyBy
983
995
# ' @aliases keyBy,RDD
984
996
# ' @noRd
@@ -1113,11 +1125,13 @@ setMethod("saveAsTextFile",
1113
1125
# ' @param numPartitions Number of partitions to create.
1114
1126
# ' @return An RDD where all elements are sorted.
1115
1127
# ' @examples
1128
+ # nolint start
1116
1129
# '\dontrun{
1117
1130
# ' sc <- sparkR.init()
1118
1131
# ' rdd <- parallelize(sc, list(3, 2, 1))
1119
1132
# ' collect(sortBy(rdd, function(x) { x })) # list (1, 2, 3)
1120
1133
# '}
1134
+ # nolint end
1121
1135
# ' @rdname sortBy
1122
1136
# ' @aliases sortBy,RDD,RDD-method
1123
1137
# ' @noRd
@@ -1188,11 +1202,13 @@ takeOrderedElem <- function(x, num, ascending = TRUE) {
1188
1202
# ' @param num Number of elements to return.
1189
1203
# ' @return The first N elements from the RDD in ascending order.
1190
1204
# ' @examples
1205
+ # nolint start
1191
1206
# '\dontrun{
1192
1207
# ' sc <- sparkR.init()
1193
1208
# ' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
1194
1209
# ' takeOrdered(rdd, 6L) # list(1, 2, 3, 4, 5, 6)
1195
1210
# '}
1211
+ # nolint end
1196
1212
# ' @rdname takeOrdered
1197
1213
# ' @aliases takeOrdered,RDD,RDD-method
1198
1214
# ' @noRd
@@ -1209,11 +1225,13 @@ setMethod("takeOrdered",
1209
1225
# ' @return The top N elements from the RDD.
1210
1226
# ' @rdname top
1211
1227
# ' @examples
1228
+ # nolint start
1212
1229
# '\dontrun{
1213
1230
# ' sc <- sparkR.init()
1214
1231
# ' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
1215
1232
# ' top(rdd, 6L) # list(10, 9, 7, 6, 5, 4)
1216
1233
# '}
1234
+ # nolint end
1217
1235
# ' @aliases top,RDD,RDD-method
1218
1236
# ' @noRd
1219
1237
setMethod ("top ",
@@ -1261,6 +1279,7 @@ setMethod("fold",
1261
1279
# ' @rdname aggregateRDD
1262
1280
# ' @seealso reduce
1263
1281
# ' @examples
1282
+ # nolint start
1264
1283
# '\dontrun{
1265
1284
# ' sc <- sparkR.init()
1266
1285
# ' rdd <- parallelize(sc, list(1, 2, 3, 4))
@@ -1269,6 +1288,7 @@ setMethod("fold",
1269
1288
# ' combOp <- function(x, y) { list(x[[1]] + y[[1]], x[[2]] + y[[2]]) }
1270
1289
# ' aggregateRDD(rdd, zeroValue, seqOp, combOp) # list(10, 4)
1271
1290
# '}
1291
+ # nolint end
1272
1292
# ' @aliases aggregateRDD,RDD,RDD-method
1273
1293
# ' @noRd
1274
1294
setMethod ("aggregateRDD ",
@@ -1367,12 +1387,14 @@ setMethod("setName",
1367
1387
# ' @return An RDD with zipped items.
1368
1388
# ' @seealso zipWithIndex
1369
1389
# ' @examples
1390
+ # nolint start
1370
1391
# '\dontrun{
1371
1392
# ' sc <- sparkR.init()
1372
1393
# ' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
1373
1394
# ' collect(zipWithUniqueId(rdd))
1374
1395
# ' # list(list("a", 0), list("b", 3), list("c", 1), list("d", 4), list("e", 2))
1375
1396
# '}
1397
+ # nolint end
1376
1398
# ' @rdname zipWithUniqueId
1377
1399
# ' @aliases zipWithUniqueId,RDD
1378
1400
# ' @noRd
@@ -1408,12 +1430,14 @@ setMethod("zipWithUniqueId",
1408
1430
# ' @return An RDD with zipped items.
1409
1431
# ' @seealso zipWithUniqueId
1410
1432
# ' @examples
1433
+ # nolint start
1411
1434
# '\dontrun{
1412
1435
# ' sc <- sparkR.init()
1413
1436
# ' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
1414
1437
# ' collect(zipWithIndex(rdd))
1415
1438
# ' # list(list("a", 0), list("b", 1), list("c", 2), list("d", 3), list("e", 4))
1416
1439
# '}
1440
+ # nolint end
1417
1441
# ' @rdname zipWithIndex
1418
1442
# ' @aliases zipWithIndex,RDD
1419
1443
# ' @noRd
@@ -1454,12 +1478,14 @@ setMethod("zipWithIndex",
1454
1478
# ' @return An RDD created by coalescing all elements within
1455
1479
# ' each partition into a list.
1456
1480
# ' @examples
1481
+ # nolint start
1457
1482
# '\dontrun{
1458
1483
# ' sc <- sparkR.init()
1459
1484
# ' rdd <- parallelize(sc, as.list(1:4), 2L)
1460
1485
# ' collect(glom(rdd))
1461
1486
# ' # list(list(1, 2), list(3, 4))
1462
1487
# '}
1488
+ # nolint end
1463
1489
# ' @rdname glom
1464
1490
# ' @aliases glom,RDD
1465
1491
# ' @noRd
@@ -1519,13 +1545,15 @@ setMethod("unionRDD",
1519
1545
# ' @param other Another RDD to be zipped.
1520
1546
# ' @return An RDD zipped from the two RDDs.
1521
1547
# ' @examples
1548
+ # nolint start
1522
1549
# '\dontrun{
1523
1550
# ' sc <- sparkR.init()
1524
1551
# ' rdd1 <- parallelize(sc, 0:4)
1525
1552
# ' rdd2 <- parallelize(sc, 1000:1004)
1526
1553
# ' collect(zipRDD(rdd1, rdd2))
1527
1554
# ' # list(list(0, 1000), list(1, 1001), list(2, 1002), list(3, 1003), list(4, 1004))
1528
1555
# '}
1556
+ # nolint end
1529
1557
# ' @rdname zipRDD
1530
1558
# ' @aliases zipRDD,RDD
1531
1559
# ' @noRd
@@ -1557,12 +1585,14 @@ setMethod("zipRDD",
1557
1585
# ' @param other An RDD.
1558
1586
# ' @return A new RDD which is the Cartesian product of these two RDDs.
1559
1587
# ' @examples
1588
+ # nolint start
1560
1589
# '\dontrun{
1561
1590
# ' sc <- sparkR.init()
1562
1591
# ' rdd <- parallelize(sc, 1:2)
1563
1592
# ' sortByKey(cartesian(rdd, rdd))
1564
1593
# ' # list(list(1, 1), list(1, 2), list(2, 1), list(2, 2))
1565
1594
# '}
1595
+ # nolint end
1566
1596
# ' @rdname cartesian
1567
1597
# ' @aliases cartesian,RDD,RDD-method
1568
1598
# ' @noRd
@@ -1587,13 +1617,15 @@ setMethod("cartesian",
1587
1617
# ' @param numPartitions Number of the partitions in the result RDD.
1588
1618
# ' @return An RDD with the elements from this that are not in other.
1589
1619
# ' @examples
1620
+ # nolint start
1590
1621
# '\dontrun{
1591
1622
# ' sc <- sparkR.init()
1592
1623
# ' rdd1 <- parallelize(sc, list(1, 1, 2, 2, 3, 4))
1593
1624
# ' rdd2 <- parallelize(sc, list(2, 4))
1594
1625
# ' collect(subtract(rdd1, rdd2))
1595
1626
# ' # list(1, 1, 3)
1596
1627
# '}
1628
+ # nolint end
1597
1629
# ' @rdname subtract
1598
1630
# ' @aliases subtract,RDD
1599
1631
# ' @noRd
@@ -1619,13 +1651,15 @@ setMethod("subtract",
1619
1651
# ' @param numPartitions The number of partitions in the result RDD.
1620
1652
# ' @return An RDD which is the intersection of these two RDDs.
1621
1653
# ' @examples
1654
+ # nolint start
1622
1655
# '\dontrun{
1623
1656
# ' sc <- sparkR.init()
1624
1657
# ' rdd1 <- parallelize(sc, list(1, 10, 2, 3, 4, 5))
1625
1658
# ' rdd2 <- parallelize(sc, list(1, 6, 2, 3, 7, 8))
1626
1659
# ' collect(sortBy(intersection(rdd1, rdd2), function(x) { x }))
1627
1660
# ' # list(1, 2, 3)
1628
1661
# '}
1662
+ # nolint end
1629
1663
# ' @rdname intersection
1630
1664
# ' @aliases intersection,RDD
1631
1665
# ' @noRd
@@ -1653,6 +1687,7 @@ setMethod("intersection",
1653
1687
# ' Assumes that all the RDDs have the *same number of partitions*, but
1654
1688
# ' does *not* require them to have the same number of elements in each partition.
1655
1689
# ' @examples
1690
+ # nolint start
1656
1691
# '\dontrun{
1657
1692
# ' sc <- sparkR.init()
1658
1693
# ' rdd1 <- parallelize(sc, 1:2, 2L) # 1, 2
@@ -1662,6 +1697,7 @@ setMethod("intersection",
1662
1697
# ' func = function(x, y, z) { list(list(x, y, z))} ))
1663
1698
# ' # list(list(1, c(1,2), c(1,2,3)), list(2, c(3,4), c(4,5,6)))
1664
1699
# '}
1700
+ # nolint end
1665
1701
# ' @rdname zipRDD
1666
1702
# ' @aliases zipPartitions,RDD
1667
1703
# ' @noRd
0 commit comments