Skip to content

Commit

Permalink
Add hourly partitioning to bigquery table (upstream PR) (#3707) (#6702)
Browse files Browse the repository at this point in the history
* Add TODO for checking the go library for HOUR support, update description and validation value.

* Remove TODO from code and run gofmt.

* Test table creation with HOUR instead of DAY.

* Update google/resource_bigquery_table.go

* Add testAccBigQueryTableHourlyTimePartitioning test.

Co-authored-by: fpopic <filip.popic@gmail.com>
Signed-off-by: Modular Magician <magic-modules@google.com>

Co-authored-by: fpopic <filip.popic@gmail.com>
  • Loading branch information
modular-magician and fpopic committed Jun 25, 2020
1 parent d690c16 commit 05c20a4
Show file tree
Hide file tree
Showing 3 changed files with 97 additions and 6 deletions.
3 changes: 3 additions & 0 deletions .changelog/3707.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
bigquery: Added `"HOUR"` option for `google_bigquery_table` time partitioning (`type`)
```
8 changes: 4 additions & 4 deletions google/resource_bigquery_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -336,13 +336,13 @@ func resourceBigQueryTable() *schema.Resource {
Description: `Number of milliseconds for which to keep the storage for a partition.`,
},

// Type: [Required] The only type supported is DAY, which will generate
// one partition per day based on data loading time.
// Type: [Required] The supported types are DAY and HOUR, which will generate
// one partition per day or hour based on data loading time.
"type": {
Type: schema.TypeString,
Required: true,
Description: `The only type supported is DAY, which will generate one partition per day based on data loading time.`,
ValidateFunc: validation.StringInSlice([]string{"DAY"}, false),
Description: `The supported types are DAY and HOUR, which will generate one partition per day or hour based on data loading time.`,
ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR"}, false),
},

// Field: [Optional] The field used to determine how to create a time-based
Expand Down
92 changes: 90 additions & 2 deletions google/resource_bigquery_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func TestAccBigQueryTable_Basic(t *testing.T) {
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigQueryTable(datasetID, tableID),
Config: testAccBigQueryTableDailyTimePartitioning(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
Expand Down Expand Up @@ -64,6 +64,37 @@ func TestAccBigQueryTable_Kms(t *testing.T) {
})
}

func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) {
t.Parallel()

datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10))
tableID := fmt.Sprintf("tf_test_%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccBigQueryTableUpdated(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccBigQueryTable_HivePartitioning(t *testing.T) {
t.Parallel()
bucketName := testBucketName(t)
Expand Down Expand Up @@ -261,7 +292,7 @@ func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.St
}
}

func testAccBigQueryTable(datasetID, tableID string) string {
func testAccBigQueryTableDailyTimePartitioning(datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
dataset_id = "%s"
Expand Down Expand Up @@ -318,6 +349,63 @@ EOH
`, datasetID, tableID)
}

func testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
dataset_id = "%s"
}
resource "google_bigquery_table" "test" {
table_id = "%s"
dataset_id = google_bigquery_dataset.test.dataset_id
time_partitioning {
type = "HOUR"
field = "ts"
require_partition_filter = true
}
clustering = ["some_int", "some_string"]
schema = <<EOH
[
{
"name": "ts",
"type": "TIMESTAMP"
},
{
"name": "some_string",
"type": "STRING"
},
{
"name": "some_int",
"type": "INTEGER"
},
{
"name": "city",
"type": "RECORD",
"fields": [
{
"name": "id",
"type": "INTEGER"
},
{
"name": "coord",
"type": "RECORD",
"fields": [
{
"name": "lon",
"type": "FLOAT"
}
]
}
]
}
]
EOH
}
`, datasetID, tableID)
}

func testAccBigQueryTableKms(cryptoKeyName, datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
Expand Down

0 comments on commit 05c20a4

Please sign in to comment.