Skip to content

Commit

Permalink
[MINOR] Fix typos in comments and testcase name of code
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?

This PR fixes typos in comments and testcase name of code.

## How was this patch tested?

manual.

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #11481 from dongjoon-hyun/minor_fix_typos_in_code.
  • Loading branch information
dongjoon-hyun authored and srowen committed Mar 3, 2016
1 parent 52035d1 commit 941b270
Show file tree
Hide file tree
Showing 19 changed files with 22 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.spark._
* of them will be combined together, showed in one line.
*/
private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
// Carrige return
// Carriage return
val CR = '\r'
// Update period of progress bar, in milliseconds
val UPDATE_PERIOD = 200L
Expand Down
4 changes: 2 additions & 2 deletions core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ package org.apache.sparktest
*/
class ImplicitSuite {

// We only want to test if `implict` works well with the compiler, so we don't need a real
// We only want to test if `implicit` works well with the compiler, so we don't need a real
// SparkContext.
def mockSparkContext[T]: org.apache.spark.SparkContext = null

// We only want to test if `implict` works well with the compiler, so we don't need a real RDD.
// We only want to test if `implicit` works well with the compiler, so we don't need a real RDD.
def mockRDD[T]: org.apache.spark.rdd.RDD[T] = null

def testRddToPairRDDFunctions(): Unit = {
Expand Down
2 changes: 1 addition & 1 deletion dev/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ def main():

# backwards compatibility checks
if build_tool == "sbt":
# Note: compatiblity tests only supported in sbt for now
# Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima()

# run the test suites
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ public Vector predictRaw(Vector features) {
* Create a copy of the model.
* The copy is shallow, except for the embedded paramMap, which gets a deep copy.
* <p>
* This is used for the defaul implementation of [[transform()]].
* This is used for the default implementation of [[transform()]].
*
* In Java, we have to make this method public since Java does not understand Scala's protected
* modifier.
Expand Down
2 changes: 1 addition & 1 deletion examples/src/main/python/mllib/naive_bayes_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def parseLine(line):
# $example on$
data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)

# Split data aproximately into training (60%) and test (40%)
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=0)

# Train a naive Bayes model.
Expand Down
2 changes: 1 addition & 1 deletion examples/src/main/python/mllib/ranking_metrics_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def parseLine(line):
# Instantiate regression metrics to compare predicted and actual ratings
metrics = RegressionMetrics(scoreAndLabels)

# Root mean sqaured error
# Root mean squared error
print("RMSE = %s" % metrics.rootMeanSquaredError)

# R-squared
Expand Down
2 changes: 1 addition & 1 deletion examples/src/main/python/mllib/word2vec.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#

# This example uses text8 file from http://mattmahoney.net/dc/text8.zip
# The file was downloadded, unziped and split into multiple lines using
# The file was downloaded, unzipped and split into multiple lines using
#
# wget http://mattmahoney.net/dc/text8.zip
# unzip text8.zip
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import breeze.linalg.{DenseVector, Vector}
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
*/
object LocalFileLR {
val D = 10 // Numer of dimensions
val D = 10 // Number of dimensions
val rand = new Random(42)

case class DataPoint(x: Vector[Double], y: Double)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import org.apache.spark._
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
*/
object SparkHdfsLR {
val D = 10 // Numer of dimensions
val D = 10 // Number of dimensions
val rand = new Random(42)

case class DataPoint(x: Vector[Double], y: Double)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import org.apache.spark._
*/
object SparkLR {
val N = 10000 // Number of data points
val D = 10 // Numer of dimensions
val D = 10 // Number of dimensions
val R = 0.7 // Scaling factor
val ITERATIONS = 5
val rand = new Random(42)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,10 @@ object RDDRelation {
// Write out an RDD as a parquet file with overwrite mode.
df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")

// Read in parquet file. Parquet files are self-describing so the schmema is preserved.
// Read in parquet file. Parquet files are self-describing so the schema is preserved.
val parquetFile = sqlContext.read.parquet("pair.parquet")

// Queries can be run using the DSL on parequet files just like the original RDD.
// Queries can be run using the DSL on parquet files just like the original RDD.
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)

// These files can also be registered as tables.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ object TwitterPopularTags {
val filters = args.takeRight(args.length - 4)

// Set the system properties so that Twitter4j library used by twitter stream
// can use them to generat OAuth credentials
// can use them to generate OAuth credentials
System.setProperty("twitter4j.oauth.consumerKey", consumerKey)
System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret)
System.setProperty("twitter4j.oauth.accessToken", accessToken)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import org.apache.spark.SparkFunSuite

class EdgeSuite extends SparkFunSuite {
test ("compare") {
// decending order
// descending order
val testEdges: Array[Edge[Int]] = Array(
Edge(0x7FEDCBA987654321L, -0x7FEDCBA987654321L, 1),
Edge(0x2345L, 0x1234L, 1),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,7 @@ object PrefixSpan extends Logging {
}

/**
* Represents a frequence sequence.
* Represents a frequent sequence.
* @param sequence a sequence of itemsets stored as an Array of Arrays
* @param freq frequency
* @tparam Item item type
Expand Down
2 changes: 1 addition & 1 deletion project/SparkBuild.scala
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ object Unidoc {
"-noqualifier", "java.lang"
),

// Use GitHub repository for Scaladoc source linke
// Use GitHub repository for Scaladoc source links
unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}",

scalacOptions in (ScalaUnidoc, unidoc) ++= Seq(
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/mllib/fpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class PrefixSpanModel(JavaModelWrapper):

@since("1.6.0")
def freqSequences(self):
"""Gets frequence sequences"""
"""Gets frequent sequences"""
return self.call("getFreqSequences").map(lambda x: PrefixSpan.FreqSequence(x[0], x[1]))


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ import org.apache.spark.annotation.DeveloperApi
* all variables defined by that code. To extract the result of an
* interpreted line to show the user, a second "result object" is created
* which imports the variables exported by the above object and then
* exports members called "$eval" and "$print". To accomodate user expressions
* exports members called "$eval" and "$print". To accommodate user expressions
* that read from variables or methods defined in previous statements, "import"
* statements are used.
*
Expand Down Expand Up @@ -1515,7 +1515,7 @@ import org.apache.spark.annotation.DeveloperApi
exprTyper.symbolOfLine(code)

/**
* Constucts type information based on the provided expression's final
* Constructs type information based on the provided expression's final
* result or the definition provided.
*
* @param expr The expression or definition
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ private case object OracleDialect extends JdbcDialect {
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
// Handle NUMBER fields that have no precision/scale in special way
// because JDBC ResultSetMetaData converts this to 0 procision and -127 scale
// because JDBC ResultSetMetaData converts this to 0 precision and -127 scale
// For more details, please see
// https://github.com/apache/spark/pull/8780#issuecomment-145598968
// and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.sql.execution.streaming.{CompositeOffset, LongOffset, Of
trait OffsetSuite extends SparkFunSuite {
/** Creates test to check all the comparisons of offsets given a `one` that is less than `two`. */
def compare(one: Offset, two: Offset): Unit = {
test(s"comparision $one <=> $two") {
test(s"comparison $one <=> $two") {
assert(one < two)
assert(one <= two)
assert(one <= one)
Expand Down

0 comments on commit 941b270

Please sign in to comment.