From b16e13e9cf57ec2fc5ed48eab5595e603c9614bf Mon Sep 17 00:00:00 2001
From: VitaliiMaltsev <39538064+VitaliiMaltsev@users.noreply.github.com>
Date: Wed, 27 Apr 2022 14:01:26 +0300
Subject: [PATCH 001/152] Redshift Destination: update spec (#12100)
* Redshift Destination: update spec
* update spec.json
* update links in spec.json
* added more links to spec.json | refactoring
* updated docs with stadard connector template
* added hyperlink to documentation for part_size field
---
.../src/main/resources/spec.json | 28 ++--
docs/integrations/destinations/redshift.md | 158 ++++++++++--------
2 files changed, 101 insertions(+), 85 deletions(-)
diff --git a/airbyte-integrations/connectors/destination-redshift/src/main/resources/spec.json b/airbyte-integrations/connectors/destination-redshift/src/main/resources/spec.json
index 360372f2ca8956..243259955ddf86 100644
--- a/airbyte-integrations/connectors/destination-redshift/src/main/resources/spec.json
+++ b/airbyte-integrations/connectors/destination-redshift/src/main/resources/spec.json
@@ -49,22 +49,22 @@
"title": "Default Schema"
},
"s3_bucket_name": {
- "title": "S3 Bucket Name",
+ "title": "S3 Bucket Name (Optional)",
"type": "string",
- "description": "The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.",
+ "description": "The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.",
"examples": ["airbyte.staging"]
},
"s3_bucket_path": {
- "title": "S3 Bucket Path",
+ "title": "S3 Bucket Path (Optional)",
"type": "string",
- "description": "The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory.",
+ "description": "The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.",
"examples": ["data_sync/test"]
},
"s3_bucket_region": {
- "title": "S3 Bucket Region",
+ "title": "S3 Bucket Region (Optional)",
"type": "string",
"default": "",
- "description": "The region of the S3 staging bucket to use if utilising a copy strategy.",
+ "description": "The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.",
"enum": [
"",
"us-east-1",
@@ -94,14 +94,14 @@
},
"access_key_id": {
"type": "string",
- "description": "The Access Key Id granting allow one to access the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket.",
- "title": "S3 Key Id",
+ "description": "This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.",
+ "title": "S3 Key Id (Optional)",
"airbyte_secret": true
},
"secret_access_key": {
"type": "string",
- "description": "The corresponding secret to the above access key id.",
- "title": "S3 Access Key",
+ "description": "The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.",
+ "title": "S3 Access Key (Optional)",
"airbyte_secret": true
},
"part_size": {
@@ -109,13 +109,13 @@
"minimum": 10,
"maximum": 100,
"examples": ["10"],
- "description": "Optional. Increase this if syncing tables larger than 100GB. Only relevant for COPY. Files are streamed to S3 in parts. This determines the size of each part, in MBs. As S3 has a limit of 10,000 parts per file, part size affects the table size. This is 10MB by default, resulting in a default limit of 100GB tables. Note, a larger part size will result in larger memory requirements. A rule of thumb is to multiply the part size by 10 to get the memory requirement. Modify this with care.",
- "title": "Stream Part Size"
+ "description": "Increase this if syncing tables larger than 100GB. Only relevant for COPY. Files are streamed to S3 in parts. This determines the size of each part, in MBs. As S3 has a limit of 10,000 parts per file, part size affects the table size. This is 10MB by default, resulting in a default limit of 100GB tables. Note: a larger part size will result in larger memory requirements. A rule of thumb is to multiply the part size by 10 to get the memory requirement. Modify this with care. See docs for details.",
+ "title": "Stream Part Size (Optional)"
},
"purge_staging_data": {
- "title": "Purge Staging Files and Tables",
+ "title": "Purge Staging Files and Tables (Optional)",
"type": "boolean",
- "description": "Whether to delete the staging files from S3 after completing the sync. See the docs for details. Only relevant for COPY. Defaults to true.",
+ "description": "Whether to delete the staging files from S3 after completing the sync. See docs for details.",
"default": true
}
}
diff --git a/docs/integrations/destinations/redshift.md b/docs/integrations/destinations/redshift.md
index 8339f8e7c8ecee..6dd424fdaa3db4 100644
--- a/docs/integrations/destinations/redshift.md
+++ b/docs/integrations/destinations/redshift.md
@@ -1,124 +1,139 @@
# Redshift
-## Overview
+This page guides you through the process of setting up the Redshift destination connector.
+
+## Prerequisites
The Airbyte Redshift destination allows you to sync data to Redshift.
This Redshift destination connector has two replication strategies:
1. INSERT: Replicates data via SQL INSERT queries. This is built on top of the destination-jdbc code base and is configured to rely on JDBC 4.2 standard drivers provided by Amazon via Mulesoft [here](https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42) as described in Redshift documentation [here](https://docs.aws.amazon.com/redshift/latest/mgmt/jdbc20-install.html). **Not recommended for production workloads as this does not scale well**.
-2. COPY: Replicates data by first uploading data to an S3 bucket and issuing a COPY command. This is the recommended loading approach described by Redshift [best practices](https://docs.aws.amazon.com/redshift/latest/dg/c_loading-data-best-practices.html). Requires an S3 bucket and credentials.
-
-Airbyte automatically picks an approach depending on the given configuration - if S3 configuration is present, Airbyte will use the COPY strategy and vice versa.
-
-We recommend users use INSERT for testing, to avoid any additional setup, and switch to COPY for production workloads.
-
-### Sync overview
-
-#### Output schema
-
-Each stream will be output into its own raw table in Redshift. Each table will contain 3 columns:
-
-* `_airbyte_ab_id`: a uuid assigned by Airbyte to each event that is processed. The column type in Redshift is `VARCHAR`.
-* `_airbyte_emitted_at`: a timestamp representing when the event was pulled from the data source. The column type in Redshift is `TIMESTAMP WITH TIME ZONE`.
-* `_airbyte_data`: a json blob representing with the event data. The column type in Redshift is `VARCHAR` but can be be parsed with JSON functions.
-
-#### Features
-| Feature | Supported?\(Yes/No\) | Notes |
-| :--- | :--- | :--- |
-| Full Refresh Sync | Yes | |
-| Incremental - Append Sync | Yes | |
-| Incremental - Deduped History | Yes | |
-| Namespaces | Yes | |
-| SSL Support | Yes | |
+For INSERT strategy:
+* **Host**
+* **Port**
+* **Username**
+* **Password**
+* **Schema**
+* **Database**
+ * This database needs to exist within the cluster provided.
-#### Target Database
+2. COPY: Replicates data by first uploading data to an S3 bucket and issuing a COPY command. This is the recommended loading approach described by Redshift [best practices](https://docs.aws.amazon.com/redshift/latest/dg/c_loading-data-best-practices.html). Requires an S3 bucket and credentials.
-You will need to choose an existing database or create a new database that will be used to store synced data from Airbyte.
+Airbyte automatically picks an approach depending on the given configuration - if S3 configuration is present, Airbyte will use the COPY strategy and vice versa.
-## Getting started
+For COPY strategy:
-### Requirements
+* **S3 Bucket Name**
+ * See [this](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) to create an S3 bucket.
+* **S3 Bucket Region**
+ * Place the S3 bucket and the Redshift cluster in the same region to save on networking costs.
+* **Access Key Id**
+ * See [this](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) on how to generate an access key.
+ * We recommend creating an Airbyte-specific user. This user will require [read and write permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) to objects in the staging bucket.
+* **Secret Access Key**
+ * Corresponding key to the above key id.
+* **Part Size**
+ * Affects the size limit of an individual Redshift table. Optional. Increase this if syncing tables larger than 100GB. Files are streamed to S3 in parts. This determines the size of each part, in MBs. As S3 has a limit of 10,000 parts per file, part size affects the table size. This is 10MB by default, resulting in a default table limit of 100GB. Note, a larger part size will result in larger memory requirements. A rule of thumb is to multiply the part size by 10 to get the memory requirement. Modify this with care.
-1. Active Redshift cluster
-2. Allow connections from Airbyte to your Redshift cluster \(if they exist in separate VPCs\)
-3. A staging S3 bucket with credentials \(for the COPY strategy\).
+Optional parameters:
+* **Bucket Path**
+ * The directory within the S3 bucket to place the staging data. For example, if you set this to `yourFavoriteSubdirectory`, we will place the staging data inside `s3://yourBucket/yourFavoriteSubdirectory`. If not provided, defaults to the root directory.
+* **Purge Staging Data**
+ * Whether to delete the staging files from S3 after completing the sync. Specifically, the connector will create CSV files named `bucketPath/namespace/streamName/syncDate_epochMillis_randomUuid.csv` containing three columns (`ab_id`, `data`, `emitted_at`). Normally these files are deleted after the `COPY` command completes; if you want to keep them for other purposes, set `purge_staging_data` to `false`.
-:::info
-Even if your Airbyte instance is running on a server in the same VPC as your Redshift cluster, you may need to place them in the **same security group** to allow connections between the two.
+## Step 1: Set up Redshift
-:::
+1. [Log in](https://aws.amazon.com/console/) to AWS Management console.
+ If you don't have a AWS account already, you’ll need to [create](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/) one in order to use the API.
+2. Go to the AWS Redshift service
+3. [Create](https://docs.aws.amazon.com/ses/latest/dg/event-publishing-redshift-cluster.html) and activate AWS Redshift cluster if you don't have one ready
+4. (Optional) [Allow](https://aws.amazon.com/premiumsupport/knowledge-center/cannot-connect-redshift-cluster/) connections from Airbyte to your Redshift cluster \(if they exist in separate VPCs\)
+5. (Optional) [Create](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) a staging S3 bucket \(for the COPY strategy\).
-### Setup guide
+## Step 2: Set up the destination connector in Airbyte
-#### 1. Make sure your cluster is active and accessible from the machine running Airbyte
+**For Airbyte Cloud:**
-This is dependent on your networking setup. The easiest way to verify if Airbyte is able to connect to your Redshift cluster is via the check connection tool in the UI. You can check AWS Redshift documentation with a tutorial on how to properly configure your cluster's access [here](https://docs.aws.amazon.com/redshift/latest/gsg/rs-gsg-authorize-cluster-access.html)
+1. [Log into your Airbyte Cloud](https://cloud.airbyte.io/workspaces) account.
+2. In the left navigation bar, click **Destinations**. In the top-right corner, click **+ new destination**.
+3. On the destination setup page, select **Redshift** from the Destination type dropdown and enter a name for this connector.
+4. Fill in all the required fields to use the INSERT or COPY strategy
+5. Click `Set up destination`.
-#### 2. Fill up connection info
+**For Airbyte OSS:**
-Next is to provide the necessary information on how to connect to your cluster such as the `host` whcih is part of the connection string or Endpoint accessible [here](https://docs.aws.amazon.com/redshift/latest/gsg/rs-gsg-connect-to-cluster.html#rs-gsg-how-to-get-connection-string) without the `port` and `database` name \(it typically includes the cluster-id, region and end with `.redshift.amazonaws.com`\).
+1. Go to local Airbyte page.
+2. In the left navigation bar, click **Destinations**. In the top-right corner, click **+ new destination**.
+3. On the destination setup page, select **Redshift** from the Destination type dropdown and enter a name for this connector.
+4. Fill in all the required fields to use the INSERT or COPY strategy
+5. Click `Set up destination`.
-You should have all the requirements needed to configure Redshift as a destination in the UI. You'll need the following information to configure the destination:
-* **Host**
-* **Port**
-* **Username**
-* **Password**
-* **Schema**
-* **Database**
- * This database needs to exist within the cluster provided.
+## Supported sync modes
-#### 2a. Fill up S3 info \(for COPY strategy\)
+The Redshift destination connector supports the following [sync modes](https://docs.airbyte.com/cloud/core-concepts/#connection-sync-mode):
+- Full Refresh
+- Incremental - Append Sync
+- Incremental - Deduped History
-Provide the required S3 info.
+## Performance considerations
-* **S3 Bucket Name**
- * See [this](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) to create an S3 bucket.
-* **S3 Bucket Region**
- * Place the S3 bucket and the Redshift cluster in the same region to save on networking costs.
-* **Access Key Id**
- * See [this](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) on how to generate an access key.
- * We recommend creating an Airbyte-specific user. This user will require [read and write permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) to objects in the staging bucket.
-* **Secret Access Key**
- * Corresponding key to the above key id.
-* **Part Size**
- * Affects the size limit of an individual Redshift table. Optional. Increase this if syncing tables larger than 100GB. Files are streamed to S3 in parts. This determines the size of each part, in MBs. As S3 has a limit of 10,000 parts per file, part size affects the table size. This is 10MB by default, resulting in a default table limit of 100GB. Note, a larger part size will result in larger memory requirements. A rule of thumb is to multiply the part size by 10 to get the memory requirement. Modify this with care.
+Synchronization performance depends on the amount of data to be transferred.
+Cluster scaling issues can be resolved directly using the cluster settings in the AWS Redshift console
-Optional parameters:
-* **Bucket Path**
- * The directory within the S3 bucket to place the staging data. For example, if you set this to `yourFavoriteSubdirectory`, we will place the staging data inside `s3://yourBucket/yourFavoriteSubdirectory`. If not provided, defaults to the root directory.
-* **Purge Staging Data**
- * Whether to delete the staging files from S3 after completing the sync. Specifically, the connector will create CSV files named `bucketPath/namespace/streamName/syncDate_epochMillis_randomUuid.csv` containing three columns (`ab_id`, `data`, `emitted_at`). Normally these files are deleted after the `COPY` command completes; if you want to keep them for other purposes, set `purge_staging_data` to `false`.
+## Connector-specific features & highlights
-## Notes about Redshift Naming Conventions
+### Notes about Redshift Naming Conventions
From [Redshift Names & Identifiers](https://docs.aws.amazon.com/redshift/latest/dg/r_names.html):
-### Standard Identifiers
+#### Standard Identifiers
* Begin with an ASCII single-byte alphabetic character or underscore character, or a UTF-8 multibyte character two to four bytes long.
* Subsequent characters can be ASCII single-byte alphanumeric characters, underscores, or dollar signs, or UTF-8 multibyte characters two to four bytes long.
* Be between 1 and 127 bytes in length, not including quotation marks for delimited identifiers.
* Contain no quotation marks and no spaces.
-### Delimited Identifiers
+#### Delimited Identifiers
Delimited identifiers \(also known as quoted identifiers\) begin and end with double quotation marks \("\). If you use a delimited identifier, you must use the double quotation marks for every reference to that object. The identifier can contain any standard UTF-8 printable characters other than the double quotation mark itself. Therefore, you can create column or table names that include otherwise illegal characters, such as spaces or the percent symbol. ASCII letters in delimited identifiers are case-insensitive and are folded to lowercase. To use a double quotation mark in a string, you must precede it with another double quotation mark character.
Therefore, Airbyte Redshift destination will create tables and schemas using the Unquoted identifiers when possible or fallback to Quoted Identifiers if the names are containing special characters.
-## Data Size Limitations
+### Data Size Limitations
Redshift specifies a maximum limit of 65535 bytes to store the raw JSON record data. Thus, when a row is too big to fit, the Redshift destination fails to load such data and currently ignores that record.
See [docs](https://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html)
-## Encryption
+### Encryption
All Redshift connections are encrypted using SSL
+### Output schema
+
+Each stream will be output into its own raw table in Redshift. Each table will contain 3 columns:
+
+* `_airbyte_ab_id`: a uuid assigned by Airbyte to each event that is processed. The column type in Redshift is `VARCHAR`.
+* `_airbyte_emitted_at`: a timestamp representing when the event was pulled from the data source. The column type in Redshift is `TIMESTAMP WITH TIME ZONE`.
+* `_airbyte_data`: a json blob representing with the event data. The column type in Redshift is `VARCHAR` but can be be parsed with JSON functions.
+
+## Data type mapping
+
+| Redshift Type | Airbyte Type | Notes |
+| :--- | :--- | :--- |
+| `boolean` | `boolean` | |
+| `int` | `integer` | |
+| `float` | `number` | |
+| `varchar` | `string` | |
+| `date/varchar` | `date` | |
+| `time/varchar` | `time` | |
+| `timestamptz/varchar` | `timestamp_with_timezone` | |
+| `varchar` | `array` | |
+| `varchar` | `object` | |
+
## Changelog
| Version | Date | Pull Request | Subject |
@@ -142,3 +157,4 @@ All Redshift connections are encrypted using SSL
| 0.3.12 | 2021-07-21 | [3555](https://github.com/airbytehq/airbyte/pull/3555) | Enable partial checkpointing for halfway syncs |
| 0.3.11 | 2021-07-20 | [4874](https://github.com/airbytehq/airbyte/pull/4874) | allow `additionalProperties` in connector spec |
+
From 9f577bb027199320066d7f69da0845f873ea974c Mon Sep 17 00:00:00 2001
From: noahkawasaki-airbyte
<103465980+noahkawasaki-airbyte@users.noreply.github.com>
Date: Wed, 27 Apr 2022 07:07:54 -0700
Subject: [PATCH 002/152] Update specs and definitions files for
destination-postgres 0.3.19 (#12317)
* Generate specs and definitions files after destination-postgres 0.3.19
* Bump destination-postgres-strict-encrypt to 0.1.5
---
.../src/main/resources/seed/destination_definitions.yaml | 2 +-
.../init/src/main/resources/seed/destination_specs.yaml | 2 +-
.../destination-postgres-strict-encrypt/Dockerfile | 2 +-
.../src/test/resources/expected_spec.json | 6 ++++++
4 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/airbyte-config/init/src/main/resources/seed/destination_definitions.yaml b/airbyte-config/init/src/main/resources/seed/destination_definitions.yaml
index bcf75228f11348..e9f195a598225e 100644
--- a/airbyte-config/init/src/main/resources/seed/destination_definitions.yaml
+++ b/airbyte-config/init/src/main/resources/seed/destination_definitions.yaml
@@ -167,7 +167,7 @@
- name: Postgres
destinationDefinitionId: 25c5221d-dce2-4163-ade9-739ef790f503
dockerRepository: airbyte/destination-postgres
- dockerImageTag: 0.3.18
+ dockerImageTag: 0.3.19
documentationUrl: https://docs.airbyte.io/integrations/destinations/postgres
icon: postgresql.svg
- name: Pulsar
diff --git a/airbyte-config/init/src/main/resources/seed/destination_specs.yaml b/airbyte-config/init/src/main/resources/seed/destination_specs.yaml
index af9b9c4fb3d5ff..dfa8a93846274e 100644
--- a/airbyte-config/init/src/main/resources/seed/destination_specs.yaml
+++ b/airbyte-config/init/src/main/resources/seed/destination_specs.yaml
@@ -3047,7 +3047,7 @@
supported_destination_sync_modes:
- "overwrite"
- "append"
-- dockerImage: "airbyte/destination-postgres:0.3.18"
+- dockerImage: "airbyte/destination-postgres:0.3.19"
spec:
documentationUrl: "https://docs.airbyte.io/integrations/destinations/postgres"
connectionSpecification:
diff --git a/airbyte-integrations/connectors/destination-postgres-strict-encrypt/Dockerfile b/airbyte-integrations/connectors/destination-postgres-strict-encrypt/Dockerfile
index 1c32dea0e209b0..0c472d5343f89d 100644
--- a/airbyte-integrations/connectors/destination-postgres-strict-encrypt/Dockerfile
+++ b/airbyte-integrations/connectors/destination-postgres-strict-encrypt/Dockerfile
@@ -16,5 +16,5 @@ ENV APPLICATION destination-postgres-strict-encrypt
COPY --from=build /airbyte /airbyte
-LABEL io.airbyte.version=0.1.4
+LABEL io.airbyte.version=0.1.5
LABEL io.airbyte.name=airbyte/destination-postgres-strict-encrypt
diff --git a/airbyte-integrations/connectors/destination-postgres-strict-encrypt/src/test/resources/expected_spec.json b/airbyte-integrations/connectors/destination-postgres-strict-encrypt/src/test/resources/expected_spec.json
index 10e1c1251a4402..8ba1678fcb554a 100644
--- a/airbyte-integrations/connectors/destination-postgres-strict-encrypt/src/test/resources/expected_spec.json
+++ b/airbyte-integrations/connectors/destination-postgres-strict-encrypt/src/test/resources/expected_spec.json
@@ -54,6 +54,12 @@
"airbyte_secret": true,
"order": 5
},
+ "jdbc_url_params": {
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "title": "JDBC URL Params",
+ "type": "string",
+ "order": 7
+ },
"tunnel_method": {
"type": "object",
"title": "SSH Tunnel Method",
From b70a6fbd87aff1c960e35f25109942ca05daa4bd Mon Sep 17 00:00:00 2001
From: LiRen Tu
Date: Wed, 27 Apr 2022 07:28:58 -0700
Subject: [PATCH 003/152] Format java code (#12401)
---
.../databricks/DatabricksConstants.java | 3 +-
.../destination/jdbc/SqlOperations.java | 47 ++++++++++---------
.../jdbc/copy/CopyConsumerFactory.java | 42 ++++++++---------
.../destination/jdbc/copy/StreamCopier.java | 1 +
.../staging/StagingOperations.java | 2 +-
.../LocalJsonDestinationAcceptanceTest.java | 1 -
.../MariaDbTestDataComparator.java | 24 ++++++----
...bColumnstoreDestinationAcceptanceTest.java | 5 +-
.../MeiliSearchDestinationAcceptanceTest.java | 5 +-
.../MongodbDestinationAcceptanceTest.java | 5 +-
.../mqtt/MqttDestinationAcceptanceTest.java | 5 +-
.../destination/oracle/OracleOperations.java | 22 ++++-----
.../oracle/OracleTestDataComparator.java | 30 +++++++-----
.../SshOracleDestinationAcceptanceTest.java | 4 +-
...ryptedOracleDestinationAcceptanceTest.java | 5 +-
15 files changed, 100 insertions(+), 101 deletions(-)
diff --git a/airbyte-integrations/connectors/destination-databricks/src/main/java/io/airbyte/integrations/destination/databricks/DatabricksConstants.java b/airbyte-integrations/connectors/destination-databricks/src/main/java/io/airbyte/integrations/destination/databricks/DatabricksConstants.java
index f3d014d63726bc..4a5c1a4b146af0 100644
--- a/airbyte-integrations/connectors/destination-databricks/src/main/java/io/airbyte/integrations/destination/databricks/DatabricksConstants.java
+++ b/airbyte-integrations/connectors/destination-databricks/src/main/java/io/airbyte/integrations/destination/databricks/DatabricksConstants.java
@@ -15,7 +15,6 @@ public class DatabricksConstants {
"delta.autoOptimize.optimizeWrite = true",
"delta.autoOptimize.autoCompact = true");
- private DatabricksConstants() {
- }
+ private DatabricksConstants() {}
}
diff --git a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/SqlOperations.java b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/SqlOperations.java
index 3d4eea93012b52..37212fcff9e1d1 100644
--- a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/SqlOperations.java
+++ b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/SqlOperations.java
@@ -21,7 +21,7 @@ public interface SqlOperations {
/**
* Create a schema with provided name if it does not already exist.
*
- * @param database Database that the connector is syncing
+ * @param database Database that the connector is syncing
* @param schemaName Name of schema.
* @throws Exception exception
*/
@@ -30,7 +30,7 @@ public interface SqlOperations {
/**
* Denotes whether the schema exists in destination database
*
- * @param database Database that the connector is syncing
+ * @param database Database that the connector is syncing
* @param schemaName Name of schema.
* @return true if the schema exists in destination database, false if it doesn't
*/
@@ -41,9 +41,9 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
/**
* Create a table with provided name in provided schema if it does not already exist.
*
- * @param database Database that the connector is syncing
+ * @param database Database that the connector is syncing
* @param schemaName Name of schema
- * @param tableName Name of table
+ * @param tableName Name of table
* @throws Exception exception
*/
void createTableIfNotExists(JdbcDatabase database, String schemaName, String tableName) throws Exception;
@@ -51,9 +51,9 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
/**
* Query to create a table with provided name in provided schema if it does not already exist.
*
- * @param database Database that the connector is syncing
+ * @param database Database that the connector is syncing
* @param schemaName Name of schema
- * @param tableName Name of table
+ * @param tableName Name of table
* @return query
*/
String createTableQuery(JdbcDatabase database, String schemaName, String tableName);
@@ -62,7 +62,7 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
* Drop the table if it exists.
*
* @param schemaName Name of schema
- * @param tableName Name of table
+ * @param tableName Name of table
* @throws Exception exception
*/
void dropTableIfExists(JdbcDatabase database, String schemaName, String tableName) throws Exception;
@@ -70,9 +70,9 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
/**
* Query to remove all records from a table. Assumes the table exists.
*
- * @param database Database that the connector is syncing
+ * @param database Database that the connector is syncing
* @param schemaName Name of schema
- * @param tableName Name of table
+ * @param tableName Name of table
* @return Query
*/
String truncateTableQuery(JdbcDatabase database, String schemaName, String tableName);
@@ -80,20 +80,21 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
/**
* Insert records into table. Assumes the table exists.
*
- * @param database Database that the connector is syncing
- * @param records Records to insert.
+ * @param database Database that the connector is syncing
+ * @param records Records to insert.
* @param schemaName Name of schema
- * @param tableName Name of table
+ * @param tableName Name of table
* @throws Exception exception
*/
void insertRecords(JdbcDatabase database, List records, String schemaName, String tableName) throws Exception;
/**
- * Query to copy all records from source table to destination table. Both tables must be in the specified schema. Assumes both table exist.
+ * Query to copy all records from source table to destination table. Both tables must be in the
+ * specified schema. Assumes both table exist.
*
- * @param database Database that the connector is syncing
- * @param schemaName Name of schema
- * @param sourceTableName Name of source table
+ * @param database Database that the connector is syncing
+ * @param schemaName Name of schema
+ * @param sourceTableName Name of source table
* @param destinationTableName Name of destination table
* @return Query
*/
@@ -103,7 +104,7 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
* Given an arbitrary number of queries, execute a transaction.
*
* @param database Database that the connector is syncing
- * @param queries Queries to execute
+ * @param queries Queries to execute
* @throws Exception exception
*/
void executeTransaction(JdbcDatabase database, List queries) throws Exception;
@@ -120,19 +121,21 @@ default boolean isSchemaExists(final JdbcDatabase database, final String schemaN
*/
boolean isSchemaRequired();
-
/**
- * The method is responsible for executing some specific DB Engine logic in onClose method. We can override this method to execute specific logic
- * e.g. to handle any necessary migrations in the destination, etc.
+ * The method is responsible for executing some specific DB Engine logic in onClose method. We can
+ * override this method to execute specific logic e.g. to handle any necessary migrations in the
+ * destination, etc.
*
- * In next example you can see how migration from VARCHAR to SUPER column is handled for the Redshift destination:
+ * In next example you can see how migration from VARCHAR to SUPER column is handled for the
+ * Redshift destination:
*
* @param database - Database that the connector is interacting with
- * @param schemaNames - schemas will be discovered
+ * @param schemaNames - schemas will be discovered
* @see io.airbyte.integrations.destination.redshift.RedshiftSqlOperations#onDestinationCloseOperations
*/
default void onDestinationCloseOperations(JdbcDatabase database, Set schemaNames) {
// do nothing
LOGGER.info("No onDestinationCloseOperations required for this destination.");
}
+
}
diff --git a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/CopyConsumerFactory.java b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/CopyConsumerFactory.java
index 9970402d67870f..2fb4d0b3bf3df1 100644
--- a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/CopyConsumerFactory.java
+++ b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/CopyConsumerFactory.java
@@ -35,13 +35,13 @@ public class CopyConsumerFactory {
private static final Logger LOGGER = LoggerFactory.getLogger(CopyConsumerFactory.class);
public static AirbyteMessageConsumer create(final Consumer outputRecordCollector,
- final JdbcDatabase database,
- final SqlOperations sqlOperations,
- final ExtendedNameTransformer namingResolver,
- final T config,
- final ConfiguredAirbyteCatalog catalog,
- final StreamCopierFactory streamCopierFactory,
- final String defaultSchema) {
+ final JdbcDatabase database,
+ final SqlOperations sqlOperations,
+ final ExtendedNameTransformer namingResolver,
+ final T config,
+ final ConfiguredAirbyteCatalog catalog,
+ final StreamCopierFactory streamCopierFactory,
+ final String defaultSchema) {
final Map pairToCopier = createWriteConfigs(
namingResolver,
config,
@@ -65,12 +65,12 @@ public static AirbyteMessageConsumer create(final Consumer o
}
private static Map createWriteConfigs(final ExtendedNameTransformer namingResolver,
- final T config,
- final ConfiguredAirbyteCatalog catalog,
- final StreamCopierFactory streamCopierFactory,
- final String defaultSchema,
- final JdbcDatabase database,
- final SqlOperations sqlOperations) {
+ final T config,
+ final ConfiguredAirbyteCatalog catalog,
+ final StreamCopierFactory streamCopierFactory,
+ final String defaultSchema,
+ final JdbcDatabase database,
+ final SqlOperations sqlOperations) {
final Map pairToCopier = new HashMap<>();
final String stagingFolder = UUID.randomUUID().toString();
for (final var configuredStream : catalog.getStreams()) {
@@ -89,8 +89,8 @@ private static OnStartFunction onStartFunction(final Map recordWriterFunction(final Map pairToCopier,
- final SqlOperations sqlOperations,
- final Map pairToIgnoredRecordCount) {
+ final SqlOperations sqlOperations,
+ final Map pairToIgnoredRecordCount) {
return (AirbyteStreamNameNamespacePair pair, List records) -> {
final var fileName = pairToCopier.get(pair).prepareStagingFile();
for (final AirbyteRecordMessage recordMessage : records) {
@@ -117,9 +117,9 @@ private static CheckAndRemoveRecordWriter removeStagingFilePrinter(final Map pairToCopier,
- final JdbcDatabase database,
- final SqlOperations sqlOperations,
- final Map pairToIgnoredRecordCount) {
+ final JdbcDatabase database,
+ final SqlOperations sqlOperations,
+ final Map pairToIgnoredRecordCount) {
return (hasFailed) -> {
pairToIgnoredRecordCount
.forEach((pair, count) -> LOGGER.warn("A total of {} record(s) of data from stream {} were invalid and were ignored.", count, pair));
@@ -128,9 +128,9 @@ private static OnCloseFunction onCloseFunction(final Map pairToCopier,
- boolean hasFailed,
- final JdbcDatabase db,
- final SqlOperations sqlOperations)
+ boolean hasFailed,
+ final JdbcDatabase db,
+ final SqlOperations sqlOperations)
throws Exception {
Exception firstException = null;
List streamCopiers = new ArrayList<>(pairToCopier.values());
diff --git a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/StreamCopier.java b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/StreamCopier.java
index 93eb78cadafc28..d655bea2f147e9 100644
--- a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/StreamCopier.java
+++ b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/jdbc/copy/StreamCopier.java
@@ -77,4 +77,5 @@ public interface StreamCopier {
* @return current staging file name
*/
String getCurrentFile();
+
}
diff --git a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/staging/StagingOperations.java b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/staging/StagingOperations.java
index 5af382004d7536..e2a1b799e48ca9 100644
--- a/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/staging/StagingOperations.java
+++ b/airbyte-integrations/connectors/destination-jdbc/src/main/java/io/airbyte/integrations/destination/staging/StagingOperations.java
@@ -11,7 +11,7 @@
import java.util.UUID;
import org.joda.time.DateTime;
-public interface StagingOperations extends SqlOperations {
+public interface StagingOperations extends SqlOperations {
String getStageName(String namespace, String streamName);
diff --git a/airbyte-integrations/connectors/destination-local-json/src/test-integration/java/io/airbyte/integrations/destination/local_json/LocalJsonDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-local-json/src/test-integration/java/io/airbyte/integrations/destination/local_json/LocalJsonDestinationAcceptanceTest.java
index aa17f0a82513cc..63e7dd55d6c686 100644
--- a/airbyte-integrations/connectors/destination-local-json/src/test-integration/java/io/airbyte/integrations/destination/local_json/LocalJsonDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-local-json/src/test-integration/java/io/airbyte/integrations/destination/local_json/LocalJsonDestinationAcceptanceTest.java
@@ -14,7 +14,6 @@
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
-
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
diff --git a/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariaDbTestDataComparator.java b/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariaDbTestDataComparator.java
index 45b6f093f5f94b..d10d4349dc23e7 100644
--- a/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariaDbTestDataComparator.java
+++ b/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariaDbTestDataComparator.java
@@ -1,22 +1,26 @@
+/*
+ * Copyright (c) 2021 Airbyte, Inc., all rights reserved.
+ */
+
package io.airbyte.integrations.destination.mariadb_columnstore;
import io.airbyte.integrations.destination.ExtendedNameTransformer;
import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-
import java.util.ArrayList;
import java.util.List;
public class MariaDbTestDataComparator extends AdvancedTestDataComparator {
- private final ExtendedNameTransformer namingResolver = new MariadbColumnstoreNameTransformer();
+ private final ExtendedNameTransformer namingResolver = new MariadbColumnstoreNameTransformer();
+
+ @Override
+ protected List resolveIdentifier(final String identifier) {
+ final List result = new ArrayList<>();
+ final String resolved = namingResolver.getIdentifier(identifier);
+ result.add(identifier);
+ result.add(resolved);
- @Override
- protected List resolveIdentifier(final String identifier) {
- final List result = new ArrayList<>();
- final String resolved = namingResolver.getIdentifier(identifier);
- result.add(identifier);
- result.add(resolved);
+ return result;
+ }
- return result;
- }
}
diff --git a/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariadbColumnstoreDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariadbColumnstoreDestinationAcceptanceTest.java
index 442e684de020cb..8098ab53ae4564 100644
--- a/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariadbColumnstoreDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-mariadb-columnstore/src/test-integration/java/io/airbyte/integrations/destination/mariadb_columnstore/MariadbColumnstoreDestinationAcceptanceTest.java
@@ -13,13 +13,10 @@
import io.airbyte.integrations.base.JavaBaseConstants;
import io.airbyte.integrations.destination.ExtendedNameTransformer;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
+import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import java.sql.SQLException;
-import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
-
-import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.MariaDBContainer;
diff --git a/airbyte-integrations/connectors/destination-meilisearch/src/test-integration/java/io/airbyte/integrations/destination/meilisearch/MeiliSearchDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-meilisearch/src/test-integration/java/io/airbyte/integrations/destination/meilisearch/MeiliSearchDestinationAcceptanceTest.java
index 66cd9a83e29bf5..bd94430bebdb1d 100644
--- a/airbyte-integrations/connectors/destination-meilisearch/src/test-integration/java/io/airbyte/integrations/destination/meilisearch/MeiliSearchDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-meilisearch/src/test-integration/java/io/airbyte/integrations/destination/meilisearch/MeiliSearchDestinationAcceptanceTest.java
@@ -14,15 +14,14 @@
import io.airbyte.commons.stream.MoreStreams;
import io.airbyte.commons.text.Names;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
+import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
+import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
-
-import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.utility.DockerImageName;
diff --git a/airbyte-integrations/connectors/destination-mongodb/src/test-integration/java/io/airbyte/integrations/destination/mongodb/MongodbDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-mongodb/src/test-integration/java/io/airbyte/integrations/destination/mongodb/MongodbDestinationAcceptanceTest.java
index ac93c4e54a5df5..a04fdd273ec6fc 100644
--- a/airbyte-integrations/connectors/destination-mongodb/src/test-integration/java/io/airbyte/integrations/destination/mongodb/MongodbDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-mongodb/src/test-integration/java/io/airbyte/integrations/destination/mongodb/MongodbDestinationAcceptanceTest.java
@@ -12,11 +12,10 @@
import io.airbyte.commons.json.Jsons;
import io.airbyte.db.mongodb.MongoDatabase;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
-import java.util.ArrayList;
-import java.util.List;
-
import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
+import java.util.ArrayList;
+import java.util.List;
import org.bson.Document;
import org.testcontainers.containers.MongoDBContainer;
diff --git a/airbyte-integrations/connectors/destination-mqtt/src/test-integration/java/io/airbyte/integrations/destination/mqtt/MqttDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-mqtt/src/test-integration/java/io/airbyte/integrations/destination/mqtt/MqttDestinationAcceptanceTest.java
index 8e216112d4622b..d4f9b381187c86 100644
--- a/airbyte-integrations/connectors/destination-mqtt/src/test-integration/java/io/airbyte/integrations/destination/mqtt/MqttDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-mqtt/src/test-integration/java/io/airbyte/integrations/destination/mqtt/MqttDestinationAcceptanceTest.java
@@ -13,6 +13,8 @@
import com.hivemq.testcontainer.junit5.HiveMQTestContainerExtension;
import io.airbyte.commons.json.Jsons;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
+import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
+import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
@@ -23,9 +25,6 @@
import java.util.List;
import java.util.Map;
import java.util.UUID;
-
-import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import org.eclipse.paho.client.mqttv3.MqttClient;
import org.eclipse.paho.client.mqttv3.MqttConnectOptions;
import org.eclipse.paho.client.mqttv3.MqttException;
diff --git a/airbyte-integrations/connectors/destination-oracle/src/main/java/io/airbyte/integrations/destination/oracle/OracleOperations.java b/airbyte-integrations/connectors/destination-oracle/src/main/java/io/airbyte/integrations/destination/oracle/OracleOperations.java
index b32cf07cbb4568..a133dfb5285f34 100644
--- a/airbyte-integrations/connectors/destination-oracle/src/main/java/io/airbyte/integrations/destination/oracle/OracleOperations.java
+++ b/airbyte-integrations/connectors/destination-oracle/src/main/java/io/airbyte/integrations/destination/oracle/OracleOperations.java
@@ -94,9 +94,9 @@ public String truncateTableQuery(final JdbcDatabase database, final String schem
@Override
public void insertRecords(final JdbcDatabase database,
- final List records,
- final String schemaName,
- final String tempTableName)
+ final List records,
+ final String schemaName,
+ final String tempTableName)
throws Exception {
final String tableName = String.format("%s.%s", schemaName, tempTableName);
final String columns = String.format("(%s, %s, %s)",
@@ -107,11 +107,11 @@ public void insertRecords(final JdbcDatabase database,
// Adapted from SqlUtils.insertRawRecordsInSingleQuery to meet some needs specific to Oracle syntax
private static void insertRawRecordsInSingleQuery(final String tableName,
- final String columns,
- final String recordQueryComponent,
- final JdbcDatabase jdbcDatabase,
- final List records,
- final Supplier uuidSupplier)
+ final String columns,
+ final String recordQueryComponent,
+ final JdbcDatabase jdbcDatabase,
+ final List records,
+ final Supplier uuidSupplier)
throws SQLException {
if (records.isEmpty()) {
return;
@@ -152,9 +152,9 @@ private static void insertRawRecordsInSingleQuery(final String tableName,
@Override
public String copyTableQuery(final JdbcDatabase database,
- final String schemaName,
- final String sourceTableName,
- final String destinationTableName) {
+ final String schemaName,
+ final String sourceTableName,
+ final String destinationTableName) {
return String.format("INSERT INTO %s.%s SELECT * FROM %s.%s\n", schemaName, destinationTableName, schemaName, sourceTableName);
}
diff --git a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/OracleTestDataComparator.java b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/OracleTestDataComparator.java
index 5be791e75410a4..0ddb650fe2c6db 100644
--- a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/OracleTestDataComparator.java
+++ b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/OracleTestDataComparator.java
@@ -1,25 +1,29 @@
+/*
+ * Copyright (c) 2021 Airbyte, Inc., all rights reserved.
+ */
+
package io.airbyte.integrations.destination.oracle;
import io.airbyte.integrations.destination.ExtendedNameTransformer;
import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-
import java.util.ArrayList;
import java.util.List;
public class OracleTestDataComparator extends AdvancedTestDataComparator {
- private final ExtendedNameTransformer namingResolver = new OracleNameTransformer();
+ private final ExtendedNameTransformer namingResolver = new OracleNameTransformer();
- @Override
- protected List resolveIdentifier(final String identifier) {
- final List result = new ArrayList<>();
- final String resolved = namingResolver.getIdentifier(identifier);
- result.add(identifier);
- result.add(resolved);
- if (!resolved.startsWith("\"")) {
- result.add(resolved.toLowerCase());
- result.add(resolved.toUpperCase());
- }
- return result;
+ @Override
+ protected List resolveIdentifier(final String identifier) {
+ final List result = new ArrayList<>();
+ final String resolved = namingResolver.getIdentifier(identifier);
+ result.add(identifier);
+ result.add(resolved);
+ if (!resolved.startsWith("\"")) {
+ result.add(resolved.toLowerCase());
+ result.add(resolved.toUpperCase());
}
+ return result;
+ }
+
}
diff --git a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/SshOracleDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/SshOracleDestinationAcceptanceTest.java
index ee48ebcbcc7481..133a44263c2511 100644
--- a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/SshOracleDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/SshOracleDestinationAcceptanceTest.java
@@ -17,13 +17,11 @@
import io.airbyte.integrations.base.ssh.SshTunnel;
import io.airbyte.integrations.destination.ExtendedNameTransformer;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
+import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
-
-import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import org.testcontainers.containers.Network;
public abstract class SshOracleDestinationAcceptanceTest extends DestinationAcceptanceTest {
diff --git a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/UnencryptedOracleDestinationAcceptanceTest.java b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/UnencryptedOracleDestinationAcceptanceTest.java
index fd404bee795564..1342c57dafd375 100644
--- a/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/UnencryptedOracleDestinationAcceptanceTest.java
+++ b/airbyte-integrations/connectors/destination-oracle/src/test-integration/java/io/airbyte/integrations/destination/oracle/UnencryptedOracleDestinationAcceptanceTest.java
@@ -17,13 +17,10 @@
import io.airbyte.db.jdbc.JdbcUtils;
import io.airbyte.integrations.destination.ExtendedNameTransformer;
import io.airbyte.integrations.standardtest.destination.DestinationAcceptanceTest;
+import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import java.sql.SQLException;
-import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
-
-import io.airbyte.integrations.standardtest.destination.comparator.AdvancedTestDataComparator;
-import io.airbyte.integrations.standardtest.destination.comparator.TestDataComparator;
import org.junit.Test;
public class UnencryptedOracleDestinationAcceptanceTest extends DestinationAcceptanceTest {
From eea6d1a95ed5d83715b96140a982c3439948a1a9 Mon Sep 17 00:00:00 2001
From: Serhii Lazebnyi <53845333+lazebnyi@users.noreply.github.com>
Date: Wed, 27 Apr 2022 17:42:01 +0300
Subject: [PATCH 004/152] Source Instagram: Deleted read_insights scope from
OAuth (#12344)
* Deleted read_insights scope from oauth
* Deleted read_insights scope from test
---
.../io/airbyte/oauth/flows/facebook/InstagramOAuthFlow.java | 2 +-
.../io/airbyte/oauth/flows/facebook/InstagramOAuthFlowTest.java | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/airbyte-oauth/src/main/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlow.java b/airbyte-oauth/src/main/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlow.java
index c330b42980a6f7..48f72b58144a11 100644
--- a/airbyte-oauth/src/main/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlow.java
+++ b/airbyte-oauth/src/main/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlow.java
@@ -12,7 +12,7 @@
// Instagram Graph API require Facebook API User token
public class InstagramOAuthFlow extends FacebookMarketingOAuthFlow {
- private static final String SCOPES = "ads_management,instagram_basic,instagram_manage_insights,read_insights";
+ private static final String SCOPES = "ads_management,instagram_basic,instagram_manage_insights";
public InstagramOAuthFlow(final ConfigRepository configRepository, final HttpClient httpClient) {
super(configRepository, httpClient);
diff --git a/airbyte-oauth/src/test/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlowTest.java b/airbyte-oauth/src/test/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlowTest.java
index f4ed295a230056..31cb39bc935f98 100644
--- a/airbyte-oauth/src/test/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlowTest.java
+++ b/airbyte-oauth/src/test/java/io/airbyte/oauth/flows/facebook/InstagramOAuthFlowTest.java
@@ -20,7 +20,7 @@ protected BaseOAuthFlow getOAuthFlow() {
@Override
protected String getExpectedConsentUrl() {
- return "https://www.facebook.com/v12.0/dialog/oauth?client_id=test_client_id&redirect_uri=https%3A%2F%2Fairbyte.io&state=state&scope=ads_management%2Cinstagram_basic%2Cinstagram_manage_insights%2Cread_insights";
+ return "https://www.facebook.com/v12.0/dialog/oauth?client_id=test_client_id&redirect_uri=https%3A%2F%2Fairbyte.io&state=state&scope=ads_management%2Cinstagram_basic%2Cinstagram_manage_insights";
}
@Override
From 3ece0c4774fdbf57ed472dc0e8c5ea945b5e60f8 Mon Sep 17 00:00:00 2001
From: Edmundo Ruiz Ghanem <168664+edmundito@users.noreply.github.com>
Date: Wed, 27 Apr 2022 10:43:46 -0400
Subject: [PATCH 005/152] Replace DeleteModal with Confirmation Modal (#12275)
When delete is confirmed, navigate away from route
---
.../ConfirmationModal/ConfirmationModal.tsx | 4 +-
.../components/DeleteBlock/DeleteBlock.tsx | 25 +++++++--
.../DeleteBlock/components/DeleteModal.tsx | 52 -------------------
.../ConfirmationModalService.tsx | 1 +
4 files changed, 24 insertions(+), 58 deletions(-)
delete mode 100644 airbyte-webapp/src/components/DeleteBlock/components/DeleteModal.tsx
diff --git a/airbyte-webapp/src/components/ConfirmationModal/ConfirmationModal.tsx b/airbyte-webapp/src/components/ConfirmationModal/ConfirmationModal.tsx
index 2747f956b9aaf7..5bc37008896083 100644
--- a/airbyte-webapp/src/components/ConfirmationModal/ConfirmationModal.tsx
+++ b/airbyte-webapp/src/components/ConfirmationModal/ConfirmationModal.tsx
@@ -29,6 +29,7 @@ export interface ConfirmationModalProps {
text: string;
submitButtonText: string;
onSubmit: () => void;
+ submitButtonDataId?: string;
}
export const ConfirmationModal: React.FC = ({
@@ -37,6 +38,7 @@ export const ConfirmationModal: React.FC = ({
text,
onSubmit,
submitButtonText,
+ submitButtonDataId,
}) => (
}>
@@ -45,7 +47,7 @@ export const ConfirmationModal: React.FC = ({
-