From b1177aa20863292eed3e692e4d50bcaf3c98c77c Mon Sep 17 00:00:00 2001 From: David Rabinowitz Date: Mon, 27 Apr 2020 09:00:02 -0700 Subject: [PATCH] prepare release 0.15.1-beta --- CHANGES.md | 4 ++++ README.md | 14 +++++++------- build.sbt | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 83b532d46..cd0f333bd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,9 @@ # Release Notes +## 0.15.1-beta - 2020-04-27 +* PR #158: Users can now add the `spark.datasource.bigquery` prefix to the configuration options in order to support Spark's `--conf` command line flag +* PR #160: View materialization is performed only on action, fixing a bug where view materialization was done too early + ## 0.15.0-beta - 2020-04-20 * PR #150: Reading `DataFrame`s should be quicker, especially in interactive usage such in notebooks * PR #154: Upgraded to the BigQuery Storage v1 API diff --git a/README.md b/README.md index 8dd773910..1563a379f 100644 --- a/README.md +++ b/README.md @@ -76,8 +76,8 @@ repository. It can be used using the `--packages` option or the | Scala version | Connector Artifact | | --- | --- | -| Scala 2.11 | `com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.0-beta` | -| Scala 2.12 | `com.google.cloud.spark:spark-bigquery-with-dependencies_2.12:0.15.0-beta` | +| Scala 2.11 | `com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.1-beta` | +| Scala 2.12 | `com.google.cloud.spark:spark-bigquery-with-dependencies_2.12:0.15.1-beta` | ## Hello World Example @@ -533,7 +533,7 @@ using the following code: ```python from pyspark.sql import SparkSession spark = SparkSession.builder\ - .config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.0-beta")\ + .config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.1-beta")\ .getOrCreate() df = spark.read.format("bigquery")\ .option("table","dataset.table")\ @@ -543,7 +543,7 @@ df = spark.read.format("bigquery")\ **Scala:** ```python val spark = SparkSession.builder - .config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.0-beta") + .config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2.11:0.15.1-beta") .getOrCreate() val df = spark.read.format("bigquery") .option("table","dataset.table") @@ -552,7 +552,7 @@ val df = spark.read.format("bigquery") In case Spark cluster is using Scala 2.12 (it's optional for Spark 2.4.x, mandatory in 3.0.x), then the relevant package is -com.google.cloud.spark:spark-bigquery-with-dependencies_**2.12**:0.15.0-beta. In +com.google.cloud.spark:spark-bigquery-with-dependencies_**2.12**:0.15.1-beta. In order to know which Scala version is used, please run the following code: **Python:** @@ -576,14 +576,14 @@ To include the connector in your project: com.google.cloud.spark spark-bigquery-with-dependencies_${scala.version} - 0.15.0-beta + 0.15.1-beta ``` ### SBT ```sbt -libraryDependencies += "com.google.cloud.spark" %% "spark-bigquery-with-dependencies" % "0.15.0-beta" +libraryDependencies += "com.google.cloud.spark" %% "spark-bigquery-with-dependencies" % "0.15.1-beta" ``` ## Building the Connector diff --git a/build.sbt b/build.sbt index 56c02f6b5..28482355b 100644 --- a/build.sbt +++ b/build.sbt @@ -4,7 +4,7 @@ lazy val sparkVersion = "2.4.0" lazy val commonSettings = Seq( organization := "com.google.cloud.spark", - version := "0.15.1-beta-SNAPSHOT", + version := "0.15.1-beta", scalaVersion := scala211Version, crossScalaVersions := Seq(scala211Version, scala212Version) )