From 2292bd98a3c064f6bc79c9cdd025b2a14748186a Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Tue, 29 Nov 2016 13:50:24 +0000 Subject: [PATCH] [SPARK-18615][DOCS] Switch to multi-line doc to avoid a genjavadoc bug for backticks ## What changes were proposed in this pull request? Currently, single line comment does not mark down backticks to `..` but prints as they are (`` `..` ``). For example, the line below: ```scala /** Return an RDD with the pairs from `this` whose keys are not in `other`. */ ``` So, we could work around this as below: ```scala /** * Return an RDD with the pairs from `this` whose keys are not in `other`. */ ``` - javadoc - **Before** ![2016-11-29 10 39 14](https://cloud.githubusercontent.com/assets/6477701/20693606/e64c8f90-b622-11e6-8dfc-4a029216e23d.png) - **After** ![2016-11-29 10 39 08](https://cloud.githubusercontent.com/assets/6477701/20693607/e7280d36-b622-11e6-8502-d2e21cd5556b.png) - scaladoc (this one looks fine either way) - **Before** ![2016-11-29 10 38 22](https://cloud.githubusercontent.com/assets/6477701/20693640/12c18aa8-b623-11e6-901a-693e2f6f8066.png) - **After** ![2016-11-29 10 40 05](https://cloud.githubusercontent.com/assets/6477701/20693642/14eb043a-b623-11e6-82ac-7cd0000106d1.png) I suspect this is related with SPARK-16153 and genjavadoc issue in ` typesafehub/genjavadoc#85`. ## How was this patch tested? I found them via ``` grep -r "\/\*\*.*\`" . | grep .scala ```` and then checked if each is in the public API documentation with manually built docs (`jekyll build`) with Java 7. Author: hyukjinkwon Closes #16050 from HyukjinKwon/javadoc-markdown. --- .../scala/org/apache/spark/SparkConf.scala | 4 +++- .../apache/spark/api/java/JavaDoubleRDD.scala | 4 +++- .../apache/spark/api/java/JavaPairRDD.scala | 12 ++++++++--- .../org/apache/spark/api/java/JavaRDD.scala | 4 +++- .../apache/spark/rdd/PairRDDFunctions.scala | 8 ++++++-- .../main/scala/org/apache/spark/rdd/RDD.scala | 8 ++++++-- .../spark/graphx/impl/EdgeRDDImpl.scala | 4 +++- .../apache/spark/graphx/impl/GraphImpl.scala | 12 ++++++++--- .../spark/graphx/impl/VertexRDDImpl.scala | 4 +++- .../org/apache/spark/ml/linalg/Matrices.scala | 16 +++++++++++---- .../scala/org/apache/spark/ml/Pipeline.scala | 4 +++- .../spark/ml/attribute/AttributeGroup.scala | 4 +++- .../spark/ml/attribute/attributes.scala | 20 ++++++++++++++----- .../classification/LogisticRegression.scala | 4 +++- .../GeneralizedLinearRegression.scala | 4 +++- .../spark/mllib/feature/ChiSqSelector.scala | 8 ++++++-- .../apache/spark/mllib/linalg/Matrices.scala | 16 +++++++++++---- .../linalg/distributed/BlockMatrix.scala | 4 +++- .../linalg/distributed/CoordinateMatrix.scala | 4 +++- .../linalg/distributed/IndexedRowMatrix.scala | 4 +++- .../apache/spark/mllib/stat/Statistics.scala | 8 ++++++-- .../scala/org/apache/spark/sql/Encoder.scala | 4 +++- .../apache/spark/sql/types/ArrayType.scala | 4 +++- .../apache/spark/streaming/StateSpec.scala | 8 ++++++-- 24 files changed, 129 insertions(+), 43 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index 0c1c68de89f81..d78b9f1b29685 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -378,7 +378,9 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria settings.entrySet().asScala.map(x => (x.getKey, x.getValue)).toArray } - /** Get all parameters that start with `prefix` */ + /** + * Get all parameters that start with `prefix` + */ def getAllWithPrefix(prefix: String): Array[(String, String)] = { getAll.filter { case (k, v) => k.startsWith(prefix) } .map { case (k, v) => (k.substring(prefix.length), v) } diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala index a32a4b28c1731..b71af0d42cdb0 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala @@ -45,7 +45,9 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) import JavaDoubleRDD.fromRDD - /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ + /** + * Persist this RDD with the default storage level (`MEMORY_ONLY`). + */ def cache(): JavaDoubleRDD = fromRDD(srdd.cache()) /** diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala index d7e3a1b1be48c..766aea213a972 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala @@ -54,7 +54,9 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)]) // Common RDD functions - /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ + /** + * Persist this RDD with the default storage level (`MEMORY_ONLY`). + */ def cache(): JavaPairRDD[K, V] = new JavaPairRDD[K, V](rdd.cache()) /** @@ -454,13 +456,17 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)]) fromRDD(rdd.subtractByKey(other)) } - /** Return an RDD with the pairs from `this` whose keys are not in `other`. */ + /** + * Return an RDD with the pairs from `this` whose keys are not in `other`. + */ def subtractByKey[W](other: JavaPairRDD[K, W], numPartitions: Int): JavaPairRDD[K, V] = { implicit val ctag: ClassTag[W] = fakeClassTag fromRDD(rdd.subtractByKey(other, numPartitions)) } - /** Return an RDD with the pairs from `this` whose keys are not in `other`. */ + /** + * Return an RDD with the pairs from `this` whose keys are not in `other`. + */ def subtractByKey[W](other: JavaPairRDD[K, W], p: Partitioner): JavaPairRDD[K, V] = { implicit val ctag: ClassTag[W] = fakeClassTag fromRDD(rdd.subtractByKey(other, p)) diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala index 94e26e687c66b..41b5cab601c36 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala @@ -34,7 +34,9 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T]) // Common RDD functions - /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ + /** + * Persist this RDD with the default storage level (`MEMORY_ONLY`). + */ def cache(): JavaRDD[T] = wrapRDD(rdd.cache()) /** diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala index aad99e3eb2c5b..ec12b9963e7c2 100644 --- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala @@ -914,14 +914,18 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) subtractByKey(other, self.partitioner.getOrElse(new HashPartitioner(self.partitions.length))) } - /** Return an RDD with the pairs from `this` whose keys are not in `other`. */ + /** + * Return an RDD with the pairs from `this` whose keys are not in `other`. + */ def subtractByKey[W: ClassTag]( other: RDD[(K, W)], numPartitions: Int): RDD[(K, V)] = self.withScope { subtractByKey(other, new HashPartitioner(numPartitions)) } - /** Return an RDD with the pairs from `this` whose keys are not in `other`. */ + /** + * Return an RDD with the pairs from `this` whose keys are not in `other`. + */ def subtractByKey[W: ClassTag](other: RDD[(K, W)], p: Partitioner): RDD[(K, V)] = self.withScope { new SubtractedRDD[K, V, W](self, other, p) } diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala index f723fcb837f88..d285e917b8a67 100644 --- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala @@ -195,10 +195,14 @@ abstract class RDD[T: ClassTag]( } } - /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ + /** + * Persist this RDD with the default storage level (`MEMORY_ONLY`). + */ def persist(): this.type = persist(StorageLevel.MEMORY_ONLY) - /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ + /** + * Persist this RDD with the default storage level (`MEMORY_ONLY`). + */ def cache(): this.type = persist() /** diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeRDDImpl.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeRDDImpl.scala index faa985594ec08..376c7b06f9d2b 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeRDDImpl.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgeRDDImpl.scala @@ -63,7 +63,9 @@ class EdgeRDDImpl[ED: ClassTag, VD: ClassTag] private[graphx] ( this } - /** Persists the edge partitions using `targetStorageLevel`, which defaults to MEMORY_ONLY. */ + /** + * Persists the edge partitions using `targetStorageLevel`, which defaults to MEMORY_ONLY. + */ override def cache(): this.type = { partitionsRDD.persist(targetStorageLevel) this diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala index 3810110099993..5d2a53782b55d 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala @@ -277,7 +277,9 @@ class GraphImpl[VD: ClassTag, ED: ClassTag] protected ( object GraphImpl { - /** Create a graph from edges, setting referenced vertices to `defaultVertexAttr`. */ + /** + * Create a graph from edges, setting referenced vertices to `defaultVertexAttr`. + */ def apply[VD: ClassTag, ED: ClassTag]( edges: RDD[Edge[ED]], defaultVertexAttr: VD, @@ -286,7 +288,9 @@ object GraphImpl { fromEdgeRDD(EdgeRDD.fromEdges(edges), defaultVertexAttr, edgeStorageLevel, vertexStorageLevel) } - /** Create a graph from EdgePartitions, setting referenced vertices to `defaultVertexAttr`. */ + /** + * Create a graph from EdgePartitions, setting referenced vertices to `defaultVertexAttr`. + */ def fromEdgePartitions[VD: ClassTag, ED: ClassTag]( edgePartitions: RDD[(PartitionID, EdgePartition[ED, VD])], defaultVertexAttr: VD, @@ -296,7 +300,9 @@ object GraphImpl { vertexStorageLevel) } - /** Create a graph from vertices and edges, setting missing vertices to `defaultVertexAttr`. */ + /** + * Create a graph from vertices and edges, setting missing vertices to `defaultVertexAttr`. + */ def apply[VD: ClassTag, ED: ClassTag]( vertices: RDD[(VertexId, VD)], edges: RDD[Edge[ED]], diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexRDDImpl.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexRDDImpl.scala index d314522de9916..3c6f22d97360d 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexRDDImpl.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexRDDImpl.scala @@ -63,7 +63,9 @@ class VertexRDDImpl[VD] private[graphx] ( this } - /** Persists the vertex partitions at `targetStorageLevel`, which defaults to MEMORY_ONLY. */ + /** + * Persists the vertex partitions at `targetStorageLevel`, which defaults to MEMORY_ONLY. + */ override def cache(): this.type = { partitionsRDD.persist(targetStorageLevel) this diff --git a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala index 4d4b06b0952bd..d9ffdeb797fb8 100644 --- a/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala +++ b/mllib-local/src/main/scala/org/apache/spark/ml/linalg/Matrices.scala @@ -85,11 +85,15 @@ sealed trait Matrix extends Serializable { @Since("2.0.0") def copy: Matrix - /** Transpose the Matrix. Returns a new `Matrix` instance sharing the same underlying data. */ + /** + * Transpose the Matrix. Returns a new `Matrix` instance sharing the same underlying data. + */ @Since("2.0.0") def transpose: Matrix - /** Convenience method for `Matrix`-`DenseMatrix` multiplication. */ + /** + * Convenience method for `Matrix`-`DenseMatrix` multiplication. + */ @Since("2.0.0") def multiply(y: DenseMatrix): DenseMatrix = { val C: DenseMatrix = DenseMatrix.zeros(numRows, y.numCols) @@ -97,13 +101,17 @@ sealed trait Matrix extends Serializable { C } - /** Convenience method for `Matrix`-`DenseVector` multiplication. For binary compatibility. */ + /** + * Convenience method for `Matrix`-`DenseVector` multiplication. For binary compatibility. + */ @Since("2.0.0") def multiply(y: DenseVector): DenseVector = { multiply(y.asInstanceOf[Vector]) } - /** Convenience method for `Matrix`-`Vector` multiplication. */ + /** + * Convenience method for `Matrix`-`Vector` multiplication. + */ @Since("2.0.0") def multiply(y: Vector): DenseVector = { val output = new DenseVector(new Array[Double](numRows)) diff --git a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala index 38176b96ba2ed..08e9cb9ba8668 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/Pipeline.scala @@ -216,7 +216,9 @@ object Pipeline extends MLReadable[Pipeline] { } } - /** Methods for `MLReader` and `MLWriter` shared between [[Pipeline]] and [[PipelineModel]] */ + /** + * Methods for `MLReader` and `MLWriter` shared between [[Pipeline]] and [[PipelineModel]] + */ private[ml] object SharedReadWrite { import org.json4s.JsonDSL._ diff --git a/mllib/src/main/scala/org/apache/spark/ml/attribute/AttributeGroup.scala b/mllib/src/main/scala/org/apache/spark/ml/attribute/AttributeGroup.scala index 527cb2d547b63..21a246e454c83 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/attribute/AttributeGroup.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/attribute/AttributeGroup.scala @@ -239,7 +239,9 @@ object AttributeGroup { } } - /** Creates an attribute group from a `StructField` instance. */ + /** + * Creates an attribute group from a `StructField` instance. + */ def fromStructField(field: StructField): AttributeGroup = { require(field.dataType == new VectorUDT) if (field.metadata.contains(ML_ATTR)) { diff --git a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala index cc7e8bc301ad3..7fbfee75e96a9 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala @@ -109,7 +109,9 @@ sealed abstract class Attribute extends Serializable { StructField(name.get, DoubleType, nullable = false, newMetadata) } - /** Converts to a `StructField`. */ + /** + * Converts to a `StructField`. + */ def toStructField(): StructField = toStructField(Metadata.empty) override def toString: String = toMetadataImpl(withType = true).toString @@ -369,12 +371,16 @@ class NominalAttribute private[ml] ( override def withIndex(index: Int): NominalAttribute = copy(index = Some(index)) override def withoutIndex: NominalAttribute = copy(index = None) - /** Copy with new values and empty `numValues`. */ + /** + * Copy with new values and empty `numValues`. + */ def withValues(values: Array[String]): NominalAttribute = { copy(numValues = None, values = Some(values)) } - /** Copy with new values and empty `numValues`. */ + /** + * Copy with new values and empty `numValues`. + */ @varargs def withValues(first: String, others: String*): NominalAttribute = { copy(numValues = None, values = Some((first +: others).toArray)) @@ -385,12 +391,16 @@ class NominalAttribute private[ml] ( copy(values = None) } - /** Copy with a new `numValues` and empty `values`. */ + /** + * Copy with a new `numValues` and empty `values`. + */ def withNumValues(numValues: Int): NominalAttribute = { copy(numValues = Some(numValues), values = None) } - /** Copy without the `numValues`. */ + /** + * Copy without the `numValues`. + */ def withoutNumValues: NominalAttribute = copy(numValues = None) /** diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala index ec582266e6a47..d3ae62e243302 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala @@ -1105,7 +1105,9 @@ sealed trait LogisticRegressionTrainingSummary extends LogisticRegressionSummary */ sealed trait LogisticRegressionSummary extends Serializable { - /** Dataframe output by the model's `transform` method. */ + /** + * Dataframe output by the model's `transform` method. + */ def predictions: DataFrame /** Field in "predictions" which gives the probability of each class as a vector. */ diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala index e718cda2623a0..770a2571bb9c2 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala @@ -886,7 +886,9 @@ class GeneralizedLinearRegressionSummary private[regression] ( protected val model: GeneralizedLinearRegressionModel = origModel.copy(ParamMap.empty).setPredictionCol(predictionCol) - /** Predictions output by the model's `transform` method. */ + /** + * Predictions output by the model's `transform` method. + */ @Since("2.0.0") @transient val predictions: DataFrame = model.transform(dataset) private[regression] lazy val family: Family = Family.fromName(model.getFamily) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala index f9156b642785f..05ad2492f8c43 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala @@ -255,10 +255,14 @@ class ChiSqSelector @Since("2.1.0") () extends Serializable { private[spark] object ChiSqSelector { - /** String name for `numTopFeatures` selector type. */ + /** + * String name for `numTopFeatures` selector type. + */ val NumTopFeatures: String = "numTopFeatures" - /** String name for `percentile` selector type. */ + /** + * String name for `percentile` selector type. + */ val Percentile: String = "percentile" /** String name for `fpr` selector type. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala index 542a69b3ef8cf..6c39fe5d84865 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala @@ -91,11 +91,15 @@ sealed trait Matrix extends Serializable { @Since("1.2.0") def copy: Matrix - /** Transpose the Matrix. Returns a new `Matrix` instance sharing the same underlying data. */ + /** + * Transpose the Matrix. Returns a new `Matrix` instance sharing the same underlying data. + */ @Since("1.3.0") def transpose: Matrix - /** Convenience method for `Matrix`-`DenseMatrix` multiplication. */ + /** + * Convenience method for `Matrix`-`DenseMatrix` multiplication. + */ @Since("1.2.0") def multiply(y: DenseMatrix): DenseMatrix = { val C: DenseMatrix = DenseMatrix.zeros(numRows, y.numCols) @@ -103,13 +107,17 @@ sealed trait Matrix extends Serializable { C } - /** Convenience method for `Matrix`-`DenseVector` multiplication. For binary compatibility. */ + /** + * Convenience method for `Matrix`-`DenseVector` multiplication. For binary compatibility. + */ @Since("1.2.0") def multiply(y: DenseVector): DenseVector = { multiply(y.asInstanceOf[Vector]) } - /** Convenience method for `Matrix`-`Vector` multiplication. */ + /** + * Convenience method for `Matrix`-`Vector` multiplication. + */ @Since("1.4.0") def multiply(y: Vector): DenseVector = { val output = new DenseVector(new Array[Double](numRows)) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala index 9e75217410d36..ff81a2f03e2a8 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrix.scala @@ -295,7 +295,9 @@ class BlockMatrix @Since("1.3.0") ( new IndexedRowMatrix(rows) } - /** Collect the distributed matrix on the driver as a `DenseMatrix`. */ + /** + * Collect the distributed matrix on the driver as a `DenseMatrix`. + */ @Since("1.3.0") def toLocalMatrix(): Matrix = { require(numRows() < Int.MaxValue, "The number of rows of this matrix should be less than " + diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.scala index d2c5b14a5b128..26ca1ef9be870 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/CoordinateMatrix.scala @@ -101,7 +101,9 @@ class CoordinateMatrix @Since("1.0.0") ( toIndexedRowMatrix().toRowMatrix() } - /** Converts to BlockMatrix. Creates blocks of `SparseMatrix` with size 1024 x 1024. */ + /** + * Converts to BlockMatrix. Creates blocks of `SparseMatrix` with size 1024 x 1024. + */ @Since("1.3.0") def toBlockMatrix(): BlockMatrix = { toBlockMatrix(1024, 1024) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.scala index 590e959daa1f4..d7255d527f036 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrix.scala @@ -90,7 +90,9 @@ class IndexedRowMatrix @Since("1.0.0") ( new RowMatrix(rows.map(_.vector), 0L, nCols) } - /** Converts to BlockMatrix. Creates blocks of `SparseMatrix` with size 1024 x 1024. */ + /** + * Converts to BlockMatrix. Creates blocks of `SparseMatrix` with size 1024 x 1024. + */ @Since("1.3.0") def toBlockMatrix(): BlockMatrix = { toBlockMatrix(1024, 1024) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala index 7ba9b292969e7..5ebbfb2b6298d 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/Statistics.scala @@ -176,7 +176,9 @@ object Statistics { ChiSqTest.chiSquaredFeatures(data) } - /** Java-friendly version of `chiSqTest()` */ + /** + * Java-friendly version of `chiSqTest()` + */ @Since("1.5.0") def chiSqTest(data: JavaRDD[LabeledPoint]): Array[ChiSqTestResult] = chiSqTest(data.rdd) @@ -218,7 +220,9 @@ object Statistics { KolmogorovSmirnovTest.testOneSample(data, distName, params: _*) } - /** Java-friendly version of `kolmogorovSmirnovTest()` */ + /** + * Java-friendly version of `kolmogorovSmirnovTest()` + */ @Since("1.5.0") @varargs def kolmogorovSmirnovTest( diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala index b9f8c46443021..68ea47cedac9a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala @@ -77,6 +77,8 @@ trait Encoder[T] extends Serializable { /** Returns the schema of encoding this type of object as a Row. */ def schema: StructType - /** A ClassTag that can be used to construct and Array to contain a collection of `T`. */ + /** + * A ClassTag that can be used to construct and Array to contain a collection of `T`. + */ def clsTag: ClassTag[T] } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala index 5d70ef01373f5..d409271fbc6b5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala @@ -31,7 +31,9 @@ import org.apache.spark.sql.catalyst.util.ArrayData */ @InterfaceStability.Stable object ArrayType extends AbstractDataType { - /** Construct a [[ArrayType]] object with the given element type. The `containsNull` is true. */ + /** + * Construct a [[ArrayType]] object with the given element type. The `containsNull` is true. + */ def apply(elementType: DataType): ArrayType = ArrayType(elementType, containsNull = true) override private[sql] def defaultConcreteType: DataType = ArrayType(NullType, containsNull = true) diff --git a/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala b/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala index c3b28bd516da5..dcd698c860d8b 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala @@ -70,10 +70,14 @@ import org.apache.spark.util.ClosureCleaner @Experimental sealed abstract class StateSpec[KeyType, ValueType, StateType, MappedType] extends Serializable { - /** Set the RDD containing the initial states that will be used by `mapWithState` */ + /** + * Set the RDD containing the initial states that will be used by `mapWithState` + */ def initialState(rdd: RDD[(KeyType, StateType)]): this.type - /** Set the RDD containing the initial states that will be used by `mapWithState` */ + /** + * Set the RDD containing the initial states that will be used by `mapWithState` + */ def initialState(javaPairRDD: JavaPairRDD[KeyType, StateType]): this.type /**