diff --git a/.changelog/643.txt b/.changelog/643.txt
new file mode 100644
index 000000000..055bc6f58
--- /dev/null
+++ b/.changelog/643.txt
@@ -0,0 +1,3 @@
+```release-note:enhancement
+harness_platform_pipeline - Added support to import pipeline entity from git.
+```
\ No newline at end of file
diff --git a/.changelog/665.txt b/.changelog/665.txt
new file mode 100644
index 000000000..c90385a48
--- /dev/null
+++ b/.changelog/665.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+Fixed harness_platform_file_store_folder create resource plugin crash, when service account token was used to create
+```
diff --git a/.changelog/668.txt b/.changelog/668.txt
new file mode 100644
index 000000000..c77d2475d
--- /dev/null
+++ b/.changelog/668.txt
@@ -0,0 +1,3 @@
+```release-note:enhancement
+resource/harness_plaform_user: Limit the user creation call to 1 at a time.
+```
diff --git a/.changelog/669.txt b/.changelog/669.txt
new file mode 100644
index 000000000..74b24df2c
--- /dev/null
+++ b/.changelog/669.txt
@@ -0,0 +1,8 @@
+```release-note:enhancement
+data_source_monitored_service_test.go Added tests for multiple healthsources such as Prometheus, Datadog etc.
+resource_monitored_service.go Added version field and renamed MonitoredServiceSpec to MonitoredService
+resource_monitored_service_test.go renamed MonitoredServiceSpec to MonitoredService
+utils.go Deserializer updated with new health sources such as azure, signalFx, loki and sumologic
+platform_monitored_service.md Added docs for health sources such as azure, signalFx, loki and sumologic
+resource.tf Added examples for all newly added health sources, datadog and prometheus
+```
\ No newline at end of file
diff --git a/.changelog/670.txt b/.changelog/670.txt
new file mode 100644
index 000000000..fd1d6aa0a
--- /dev/null
+++ b/.changelog/670.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+Fixed policy-set api to correctly enable/disable policy-sets
+```
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3bdfffe04..fde8e0679 100755
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,26 @@
+# 0.24.5 (August 29,2023)
+
+BUG FIXES:
+
+* Fixed policy-set api to correctly enable/disable policy-sets ([#670](https://github.com/harness/terraform-provider-harness/issues/670))
+
+# 0.24.4 (August 29,2023)
+
+ENHANCEMENTS:
+
+* data_source_monitored_service_test.go Added tests for multiple healthsources such as Prometheus, Datadog etc.
+resource_monitored_service.go Added version field and renamed MonitoredServiceSpec to MonitoredService
+resource_monitored_service_test.go renamed MonitoredServiceSpec to MonitoredService
+utils.go Deserializer updated with new health sources such as azure, signalFx, loki and sumologic
+platform_monitored_service.md Added docs for health sources such as azure, signalFx, loki and sumologic
+resource.tf Added examples for all newly added health sources, datadog and prometheus ([#669](https://github.com/harness/terraform-provider-harness/issues/669))
+* harness_platform_pipeline - Added support to import pipeline entity from git. ([#643](https://github.com/harness/terraform-provider-harness/issues/643))
+* resource/harness_plaform_user: Limit the user creation call to 1 at a time. ([#668](https://github.com/harness/terraform-provider-harness/issues/668))
+
+BUG FIXES:
+
+* Fixed harness_platform_file_store_folder create resource plugin crash, when service account token was used to create ([#665](https://github.com/harness/terraform-provider-harness/issues/665))
+
# 0.24.3 (August 22,2023)
BUG FIXES:
diff --git a/docs/data-sources/platform_pipeline.md b/docs/data-sources/platform_pipeline.md
index 73574d0f5..328b27580 100644
--- a/docs/data-sources/platform_pipeline.md
+++ b/docs/data-sources/platform_pipeline.md
@@ -60,5 +60,3 @@ Read-Only:
- `file_path` (String) File path of the Entity in the repository.
- `repo_name` (String) Name of the repository.
- `store_type` (String) Specifies whether the Entity is to be stored in Git or not. Possible values: INLINE, REMOTE.
-
-
diff --git a/docs/resources/platform_monitored_service.md b/docs/resources/platform_monitored_service.md
index e8ea3c6ee..fede13b38 100644
--- a/docs/resources/platform_monitored_service.md
+++ b/docs/resources/platform_monitored_service.md
@@ -13,11 +13,11 @@ Resource for creating a monitored service.
## Example Usage
```terraform
+#Sample template for Elastic Search Log Health Source
resource "harness_platform_monitored_service" "example" {
- account_id = "account_id"
- org_id = "default"
- project_id = "default_project"
- identifier = "Terraform"
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
request {
name = "name"
type = "Application"
@@ -29,27 +29,35 @@ resource "harness_platform_monitored_service" "example" {
name = "name"
identifier = "identifier"
type = "ElasticSearch"
+ version = "v2"
spec = jsonencode({
connectorRef = "connectorRef"
- feature = "feature"
- queries = [
- {
- name = "name"
- query = "query"
- index = "index"
- serviceInstanceIdentifier = "serviceInstanceIdentifier"
- timeStampIdentifier = "timeStampIdentifier"
- timeStampFormat = "timeStampFormat"
- messageIdentifier = "messageIdentifier"
+ queryDefinitions = [
+ {
+ name = "name"
+ query = "query"
+ index = "index"
+ groupName = "Logs_Group"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
},
{
- name = "name2"
- query = "query2"
- index = "index2"
- serviceInstanceIdentifier = "serviceInstanceIdentifier2"
- timeStampIdentifier = "timeStampIdentifier2"
- timeStampFormat = "timeStampFormat2"
- messageIdentifier = "messageIdentifier2"
+ name = "name2"
+ query = "query2"
+ index = "index2"
+ groupName = "Logs_Group"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
}
]
})
@@ -71,6 +79,468 @@ resource "harness_platform_monitored_service" "example" {
notification_rule_ref = "notification_rule_ref1"
enabled = false
}
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Sumologic Metrics Health Source
+resource "harness_platform_monitored_service" "example1" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologicmetrics"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_cpu"
+ identifier = "metric_cpu"
+ query = "metric=cpu"
+ groupName = "g1"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "metric=memory"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Sumologic Log Health Source
+resource "harness_platform_monitored_service" "example2" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologic"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "log1"
+ identifier = "log1"
+ query = "*"
+ groupName = "Logs Group"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Splunk Signal FX Health Source
+resource "harness_platform_monitored_service" "example3" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "signalfxmetrics"
+ identifier = "signalfxmetrics"
+ type = "SplunkSignalFXMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_infra_cpu"
+ identifier = "metric_infra_cpu"
+ query = "***"
+ groupName = "g"
+ riskProfile = {
+ riskCategory = "Errors"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER",
+ "ACT_WHEN_LOWER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Grafana Loki Log Health Source
+resource "harness_platform_monitored_service" "example4" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "Test"
+ identifier = "Test"
+ type = "GrafanaLokiLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "Demo"
+ identifier = "Demo"
+ query = "{job=~\".+\"}"
+ groupName = "Log_Group"
+ queryParams = {
+ serviceInstanceField = "job"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Azure Metrics Health Source
+resource "harness_platform_monitored_service" "example5" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "azure metrics verify step"
+ identifier = "azure_metrics_verify_step"
+ type = "AzureMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric"
+ identifier = "metric"
+ query = "default"
+ groupName = "g1"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Azure Log Health Source
+resource "harness_platform_monitored_service" "example6" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "Demo azure"
+ identifier = "Demo_azure"
+ type = "AzureLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ queryParams = {
+ serviceInstanceField = "Name",
+ timeStampIdentifier = "StartedTime",
+ messageIdentifier = "Image",
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Prometheus Metrics Health Source
+resource "harness_platform_monitored_service" "example7" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "prometheus metrics verify step"
+ identifier = "prometheus_metrics"
+ type = "Prometheus"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ metricDefinitions = [
+ {
+ identifier = "Prometheus_Metric",
+ metricName = "Prometheus Metric",
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod_name"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ query = "count(up{group=\"cv\",group=\"cv\"})"
+ groupName = "met"
+ isManualQuery = true
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Datadog Metrics Health Source
+resource "harness_platform_monitored_service" "example8" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "ddm"
+ identifier = "ddm"
+ type = "DatadogMetrics"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ feature = "Datadog Cloud Metrics"
+ metricDefinitions = [
+ {
+ metricName = "metric"
+ metricPath = "M1"
+ identifier = "metric"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = true
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ },
+ {
+ metricName = "dashboard_metric_cpu"
+ identifier = "metric_cpu"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = false
+ dashboardName = "dashboard"
+ metricPath = "M1"
+ groupingQuery = "avg:kubernetes.cpu.limits{*} by {host}.rollup(avg, 60)"
+ metric = "kubernetes.cpu.limits"
+ aggregation = "avg"
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
}
}
```
@@ -104,16 +574,16 @@ Required:
Optional:
+- `change_sources` (Block Set) Set of change sources for the monitored service. (see [below for nested schema](#nestedblock--request--change_sources))
- `dependencies` (Block Set) Dependencies of the monitored service. (see [below for nested schema](#nestedblock--request--dependencies))
- `description` (String) Description for the monitored service.
+- `enabled` (Boolean, Deprecated) Enable or disable the monitored service.
- `environment_ref_list` (List of String) Environment reference list for the monitored service.
+- `health_sources` (Block Set) Set of health sources for the monitored service. (see [below for nested schema](#nestedblock--request--health_sources))
- `notification_rule_refs` (Block List) Notification rule references for the monitored service. (see [below for nested schema](#nestedblock--request--notification_rule_refs))
-- `enabled` (Boolean) Enable or disable the monitored service. Enabled field is deprecated.
- `tags` (Set of String) Tags for the monitored service. comma-separated key value string pairs.
- `template_ref` (String) Template reference for the monitored service.
- `version_label` (String) Template version label for the monitored service.
-- `change_sources` (Block Set) Set of change sources for the monitored service. (see [below for nested schema](#nestedblock--request--change_sources))
-- `health_sources` (Block Set) Set of health sources for the monitored service. (see [below for nested schema](#nestedblock--request--health_sources))
### Nested Schema for `request.change_sources`
@@ -131,6 +601,19 @@ Optional:
- `spec` (String) Specification of the change source. Depends on the type of the change source.
+
+### Nested Schema for `request.dependencies`
+
+Required:
+
+- `monitored_service_identifier` (String) Monitored service identifier of the dependency.
+- `type` (String) Type of the service dependency.
+
+Optional:
+
+- `dependency_metadata` (String) Dependency metadata for the monitored service.
+
+
### Nested Schema for `request.health_sources`
@@ -141,18 +624,9 @@ Required:
- `spec` (String) Specification of the health source. Depends on the type of the health source.
- `type` (String) Type of the health source.
-
-
-### Nested Schema for `request.dependencies`
-
-Required:
-
-- `monitored_service_identifier` (String) Monitored service identifier of the dependency.
-- `type` (String) Type of the service dependency.
-
Optional:
-- `dependency_metadata` (String) Dependency metadata for the monitored service.
+- `version` (String) Version of the health source.
diff --git a/docs/resources/platform_pipeline.md b/docs/resources/platform_pipeline.md
index ae3bccbd2..c8b6e1b4f 100644
--- a/docs/resources/platform_pipeline.md
+++ b/docs/resources/platform_pipeline.md
@@ -115,6 +115,29 @@ resource "harness_platform_pipeline" "example" {
type: StageRollback
EOT
}
+
+### Importing Pipeline from Git
+resource "harness_platform_organization" "test" {
+ identifier = "identifier"
+ name = "name"
+}
+resource "harness_platform_pipeline" "test" {
+ identifier = "gitx"
+ org_id = "default"
+ project_id = "V"
+ name = "gitx"
+ import_from_git = true
+ git_import_info {
+ branch_name = "main"
+ file_path = ".harness/gitx.yaml"
+ connector_ref = "account.DoNotDeleteGithub"
+ repo_name = "open-repo"
+ }
+ pipeline_import_request {
+ pipeline_name = "gitx"
+ pipeline_description = "Pipeline Description"
+ }
+}
```
@@ -126,15 +149,18 @@ resource "harness_platform_pipeline" "example" {
- `name` (String) Name of the resource.
- `org_id` (String) Unique identifier of the organization.
- `project_id` (String) Unique identifier of the project.
-- `yaml` (String) YAML of the pipeline. In YAML, to reference an entity at the organization scope, prefix 'org' to the expression: org.{identifier}. To reference an entity at the account scope, prefix 'account` to the expression: account.{identifier}. For eg, to reference a connector with identifier 'connectorId' at the organization scope in a stage mention it as connectorRef: org.connectorId.
### Optional
- `description` (String) Description of the resource.
- `git_details` (Block List, Max: 1) Contains parameters related to creating an Entity for Git Experience. (see [below for nested schema](#nestedblock--git_details))
+- `git_import_info` (Block List, Max: 1) Contains Git Information for importing entities from Git (see [below for nested schema](#nestedblock--git_import_info))
+- `import_from_git` (Boolean) Flag to set if importing from Git
+- `pipeline_import_request` (Block List, Max: 1) Contains parameters for importing a pipeline (see [below for nested schema](#nestedblock--pipeline_import_request))
- `tags` (Set of String) Tags to associate with the resource.
- `template_applied` (Boolean) If true, returns Pipeline YAML with Templates applied on it.
- `template_applied_pipeline_yaml` (String) Pipeline YAML after resolving Templates (returned as a String).
+- `yaml` (String) YAML of the pipeline. In YAML, to reference an entity at the organization scope, prefix 'org' to the expression: org.{identifier}. To reference an entity at the account scope, prefix 'account` to the expression: account.{identifier}. For eg, to reference a connector with identifier 'connectorId' at the organization scope in a stage mention it as connectorRef: org.connectorId.
### Read-Only
@@ -155,6 +181,26 @@ Optional:
- `repo_name` (String) Name of the repository.
- `store_type` (String) Specifies whether the Entity is to be stored in Git or not. Possible values: INLINE, REMOTE.
+
+
+### Nested Schema for `git_import_info`
+
+Optional:
+
+- `branch_name` (String) Name of the branch.
+- `connector_ref` (String) Identifier of the Harness Connector used for importing entity from Git To reference a connector at the organization scope, prefix 'org' to the expression: org.{identifier}. To reference a connector at the account scope, prefix 'account` to the expression: account.{identifier}.
+- `file_path` (String) File path of the Entity in the repository.
+- `repo_name` (String) Name of the repository.
+
+
+
+### Nested Schema for `pipeline_import_request`
+
+Optional:
+
+- `pipeline_description` (String) Description of the pipeline.
+- `pipeline_name` (String) Name of the pipeline.
+
## Import
Import is supported using the following syntax:
diff --git a/examples/resources/harness_platform_monitored_service/resource.tf b/examples/resources/harness_platform_monitored_service/resource.tf
index 754f4406a..512053c0f 100644
--- a/examples/resources/harness_platform_monitored_service/resource.tf
+++ b/examples/resources/harness_platform_monitored_service/resource.tf
@@ -1,3 +1,4 @@
+#Sample template for Elastic Search Log Health Source
resource "harness_platform_monitored_service" "example" {
org_id = "org_id"
project_id = "project_id"
@@ -13,29 +14,38 @@ resource "harness_platform_monitored_service" "example" {
name = "name"
identifier = "identifier"
type = "ElasticSearch"
+ version = "v2"
spec = jsonencode({
connectorRef = "connectorRef"
- feature = "feature"
- queries = [
+ queryDefinitions = [
{
- name = "name"
- query = "query"
- index = "index"
- serviceInstanceIdentifier = "serviceInstanceIdentifier"
- timeStampIdentifier = "timeStampIdentifier"
- timeStampFormat = "timeStampFormat"
- messageIdentifier = "messageIdentifier"
+ name = "name"
+ query = "query"
+ index = "index"
+ groupName = "Logs_Group"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
},
{
- name = "name2"
- query = "query2"
- index = "index2"
- serviceInstanceIdentifier = "serviceInstanceIdentifier2"
- timeStampIdentifier = "timeStampIdentifier2"
- timeStampFormat = "timeStampFormat2"
- messageIdentifier = "messageIdentifier2"
+ name = "name2"
+ query = "query2"
+ index = "index2"
+ groupName = "Logs_Group"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
}
- ] })
+ ]
+ })
}
change_sources {
name = "csName1"
@@ -57,4 +67,464 @@ resource "harness_platform_monitored_service" "example" {
template_ref = "template_ref"
version_label = "version_label"
}
+}
+#Sample template for Sumologic Metrics Health Source
+resource "harness_platform_monitored_service" "example1" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologicmetrics"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_cpu"
+ identifier = "metric_cpu"
+ query = "metric=cpu"
+ groupName = "g1"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "metric=memory"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Sumologic Log Health Source
+resource "harness_platform_monitored_service" "example2" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologic"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "log1"
+ identifier = "log1"
+ query = "*"
+ groupName = "Logs Group"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Splunk Signal FX Health Source
+resource "harness_platform_monitored_service" "example3" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "signalfxmetrics"
+ identifier = "signalfxmetrics"
+ type = "SplunkSignalFXMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_infra_cpu"
+ identifier = "metric_infra_cpu"
+ query = "***"
+ groupName = "g"
+ riskProfile = {
+ riskCategory = "Errors"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER",
+ "ACT_WHEN_LOWER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Grafana Loki Log Health Source
+resource "harness_platform_monitored_service" "example4" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "Test"
+ identifier = "Test"
+ type = "GrafanaLokiLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "Demo"
+ identifier = "Demo"
+ query = "{job=~\".+\"}"
+ groupName = "Log_Group"
+ queryParams = {
+ serviceInstanceField = "job"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+#Sample template for Azure Metrics Health Source
+resource "harness_platform_monitored_service" "example5" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "azure metrics verify step"
+ identifier = "azure_metrics_verify_step"
+ type = "AzureMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric"
+ identifier = "metric"
+ query = "default"
+ groupName = "g1"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Azure Log Health Source
+resource "harness_platform_monitored_service" "example6" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "Demo azure"
+ identifier = "Demo_azure"
+ type = "AzureLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ queryParams = {
+ serviceInstanceField = "Name",
+ timeStampIdentifier = "StartedTime",
+ messageIdentifier = "Image",
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Prometheus Metrics Health Source
+resource "harness_platform_monitored_service" "example7" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "prometheus metrics verify step"
+ identifier = "prometheus_metrics"
+ type = "Prometheus"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ metricDefinitions = [
+ {
+ identifier = "Prometheus_Metric",
+ metricName = "Prometheus Metric",
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod_name"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ query = "count(up{group=\"cv\",group=\"cv\"})"
+ groupName = "met"
+ isManualQuery = true
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+#Sample template for Datadog Metrics Health Source
+resource "harness_platform_monitored_service" "example8" {
+ org_id = "org_id"
+ project_id = "project_id"
+ identifier = "identifier"
+ request {
+ name = "name"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "ddm"
+ identifier = "ddm"
+ type = "DatadogMetrics"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ feature = "Datadog Cloud Metrics"
+ metricDefinitions = [
+ {
+ metricName = "metric"
+ metricPath = "M1"
+ identifier = "metric"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = true
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ },
+ {
+ metricName = "dashboard_metric_cpu"
+ identifier = "metric_cpu"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = false
+ dashboardName = "dashboard"
+ metricPath = "M1"
+ groupingQuery = "avg:kubernetes.cpu.limits{*} by {host}.rollup(avg, 60)"
+ metric = "kubernetes.cpu.limits"
+ aggregation = "avg"
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "pod"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
}
\ No newline at end of file
diff --git a/examples/resources/harness_platform_pipeline/resource.tf b/examples/resources/harness_platform_pipeline/resource.tf
index d760ed864..9c4c86805 100644
--- a/examples/resources/harness_platform_pipeline/resource.tf
+++ b/examples/resources/harness_platform_pipeline/resource.tf
@@ -100,3 +100,26 @@ resource "harness_platform_pipeline" "example" {
type: StageRollback
EOT
}
+
+### Importing Pipeline from Git
+resource "harness_platform_organization" "test" {
+ identifier = "identifier"
+ name = "name"
+}
+resource "harness_platform_pipeline" "test" {
+ identifier = "gitx"
+ org_id = "default"
+ project_id = "V"
+ name = "gitx"
+ import_from_git = true
+ git_import_info {
+ branch_name = "main"
+ file_path = ".harness/gitx.yaml"
+ connector_ref = "account.DoNotDeleteGithub"
+ repo_name = "open-repo"
+ }
+ pipeline_import_request {
+ pipeline_name = "gitx"
+ pipeline_description = "Pipeline Description"
+ }
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 60a6fe563..7a46117c8 100644
--- a/go.mod
+++ b/go.mod
@@ -5,8 +5,8 @@ go 1.18
require (
github.com/antihax/optional v1.0.0
github.com/docker/docker v24.0.5+incompatible
- github.com/harness/harness-go-sdk v0.3.44
- github.com/harness/harness-openapi-go-client v0.0.17
+ github.com/harness/harness-go-sdk v0.3.47
+ github.com/harness/harness-openapi-go-client v0.0.18
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-retryablehttp v0.7.4
github.com/hashicorp/terraform-plugin-sdk/v2 v2.27.0
@@ -45,11 +45,11 @@ require (
github.com/hashicorp/terraform-json v0.17.1 // indirect
github.com/hashicorp/terraform-plugin-go v0.18.0 // indirect
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
- github.com/hashicorp/terraform-registry-address v0.2.1 // indirect
+ github.com/hashicorp/terraform-registry-address v0.2.2 // indirect
github.com/hashicorp/terraform-svchost v0.1.1 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
github.com/jhump/protoreflect v1.6.1 // indirect
- github.com/jinzhu/copier v0.3.5 // indirect
+ github.com/jinzhu/copier v0.4.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
@@ -75,12 +75,13 @@ require (
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.12.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895 // indirect
- google.golang.org/grpc v1.56.2 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
+ google.golang.org/grpc v1.57.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.3.0 // indirect
)
-// replace github.com/harness/harness-go-sdk => ../harness-go-sdk
+//replace github.com/harness/harness-go-sdk => ../harness-go-sdk
+
// replace github.com/harness/harness-openapi-go-client => ../harness-openapi-go-client
diff --git a/go.sum b/go.sum
index 38706f29a..878ac7796 100644
--- a/go.sum
+++ b/go.sum
@@ -46,10 +46,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/harness/harness-go-sdk v0.3.44 h1:DMcLBqtRiuQGeBT/cqx6jax4oe0NrDJ9Fbjspz7NNdM=
-github.com/harness/harness-go-sdk v0.3.44/go.mod h1:CPXydorp4zd5Dz2u2FXiHyWL4yd5PQafOMN69cgPSvk=
-github.com/harness/harness-openapi-go-client v0.0.17 h1:EZneIyi6sV+dlTgXbawxdVD0OoDmG3mnGHEJbwslRzc=
-github.com/harness/harness-openapi-go-client v0.0.17/go.mod h1:u0vqYb994BJGotmEwJevF4L3BNAdU9i8ui2d22gmLPA=
+github.com/harness/harness-go-sdk v0.3.47 h1:2XwMDY33ygt1Zxyloyy6/s/ARk0o4J54lD9dRv2h534=
+github.com/harness/harness-go-sdk v0.3.47/go.mod h1:CPXydorp4zd5Dz2u2FXiHyWL4yd5PQafOMN69cgPSvk=
+github.com/harness/harness-openapi-go-client v0.0.18 h1:gPhLOSOwjmZJ3aLjJiBbWPOMSBm8h72wbVSfx19eWZM=
+github.com/harness/harness-openapi-go-client v0.0.18/go.mod h1:u0vqYb994BJGotmEwJevF4L3BNAdU9i8ui2d22gmLPA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -90,8 +90,8 @@ github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9T
github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.27.0 h1:I8efBnjuDrgPjNF1MEypHy48VgcTIUY4X6rOFunrR3Y=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.27.0/go.mod h1:cUEP4ly/nxlHy5HzD6YRrHydtlheGvGRJDhiWqqVik4=
-github.com/hashicorp/terraform-registry-address v0.2.1 h1:QuTf6oJ1+WSflJw6WYOHhLgwUiQ0FrROpHPYFtwTYWM=
-github.com/hashicorp/terraform-registry-address v0.2.1/go.mod h1:BSE9fIFzp0qWsJUUyGquo4ldV9k2n+psif6NYkBRS3Y=
+github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno=
+github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo=
github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ=
github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
@@ -100,8 +100,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8=
github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
-github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
-github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
+github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -274,11 +274,11 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895 h1:co8AMhI481nhd3WBfW2mq5msyQHNBcGn7G9GCEqz45k=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
-google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/internal/service/platform/environment_service_overrides/resource_environment_service_overrides.go b/internal/service/platform/environment_service_overrides/resource_environment_service_overrides.go
index 81f4cf67b..8624b9338 100644
--- a/internal/service/platform/environment_service_overrides/resource_environment_service_overrides.go
+++ b/internal/service/platform/environment_service_overrides/resource_environment_service_overrides.go
@@ -63,10 +63,17 @@ func resourceEnvironmentServiceOverridesRead(ctx context.Context, d *schema.Reso
OrgIdentifier: helpers.BuildField(d, "org_id"),
ProjectIdentifier: helpers.BuildField(d, "project_id"),
})
+
if err != nil {
return helpers.HandleReadApiError(err, d, httpResp)
}
+ if resp.Data == nil || len(resp.Data.Content) == 0 {
+ d.SetId("")
+ d.MarkNewResource()
+ return nil
+ }
+
readEnvironmentServiceOverridesList(d, resp.Data)
return nil
diff --git a/internal/service/platform/file_store/file_store_utils.go b/internal/service/platform/file_store/file_store_utils.go
index 5cde1da92..523cbe8b8 100644
--- a/internal/service/platform/file_store/file_store_utils.go
+++ b/internal/service/platform/file_store/file_store_utils.go
@@ -79,5 +79,16 @@ func getOptionalString(str interface{}) optional.String {
return optional.NewString(v)
}
+func getEmail(user *nextgen.EmbeddedUserDetailsDto) string {
+ if user != nil {
+ return user.Email
+ }
+ return ""
+}
-
+func getName(user *nextgen.EmbeddedUserDetailsDto) string {
+ if user != nil {
+ return user.Name
+ }
+ return ""
+}
diff --git a/internal/service/platform/file_store/resource_file.go b/internal/service/platform/file_store/resource_file.go
index 91221fc17..648305702 100644
--- a/internal/service/platform/file_store/resource_file.go
+++ b/internal/service/platform/file_store/resource_file.go
@@ -269,14 +269,14 @@ func readFileNode(d *schema.ResourceData, file *nextgen.File, fileContentOpt opt
d.Set(tags, FlattenTags(file.Tags))
d.Set(createdBy, []interface{}{
map[string]interface{}{
- "email": file.CreatedBy.Email,
- "name": file.CreatedBy.Name,
+ "email": getEmail(file.CreatedBy),
+ "name": getName(file.CreatedBy),
},
})
d.Set(lastModifiedBy, []interface{}{
map[string]interface{}{
- "email": file.LastModifiedBy.Email,
- "name": file.LastModifiedBy.Name,
+ "email": getEmail(file.LastModifiedBy),
+ "name": getName(file.LastModifiedBy),
},
})
d.Set(lastModifiedAt, file.LastModifiedAt)
diff --git a/internal/service/platform/file_store/resource_folder.go b/internal/service/platform/file_store/resource_folder.go
index fe2afd405..2113ca466 100644
--- a/internal/service/platform/file_store/resource_folder.go
+++ b/internal/service/platform/file_store/resource_folder.go
@@ -217,14 +217,14 @@ func readFolderNode(d *schema.ResourceData, file *nextgen.File, fileContentOpt o
d.Set(path, file.Path)
d.Set(createdBy, []interface{}{
map[string]interface{}{
- "email": file.CreatedBy.Email,
- "name": file.CreatedBy.Name,
+ "email": getEmail(file.CreatedBy),
+ "name": getName(file.CreatedBy),
},
})
d.Set(lastModifiedBy, []interface{}{
map[string]interface{}{
- "email": file.LastModifiedBy.Email,
- "name": file.LastModifiedBy.Name,
+ "email": getEmail(file.LastModifiedBy),
+ "name": getName(file.LastModifiedBy),
},
})
d.Set(lastModifiedAt, file.LastModifiedAt)
diff --git a/internal/service/platform/monitored_service/data_source_monitored_service_test.go b/internal/service/platform/monitored_service/data_source_monitored_service_test.go
index b6b8d0a34..5c90d5ac7 100644
--- a/internal/service/platform/monitored_service/data_source_monitored_service_test.go
+++ b/internal/service/platform/monitored_service/data_source_monitored_service_test.go
@@ -11,16 +11,135 @@ import (
func TestAccDataSourceMonitoredService(t *testing.T) {
- id := fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ id := fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6)) //Add with muliptle logs and metrics
name := id
resourceName := "data.harness_platform_monitored_service.test"
-
resource.UnitTest(t, resource.TestCase{
PreCheck: func() { acctest.TestAccPreCheck(t) },
ProviderFactories: acctest.ProviderFactories,
Steps: []resource.TestStep{
{
- Config: testAccDataSourceMonitoredService(id, name),
+ Config: testAccELKDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccSumologicMetricDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccSumologicLogDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccSplunkSignalFXDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccGrafanaLokiLogsDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAzureMetricsDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAzureLogsDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccPrometheusMetricsDataSourceMonitoredService(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "identifier", id),
+ resource.TestCheckResourceAttr(resourceName, "org_id", id),
+ resource.TestCheckResourceAttr(resourceName, "project_id", id),
+ ),
+ },
+ },
+ })
+ id = fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccDatadogMetricsDataSourceMonitoredService(id, name),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "identifier", id),
resource.TestCheckResourceAttr(resourceName, "org_id", id),
@@ -31,8 +150,9 @@ func TestAccDataSourceMonitoredService(t *testing.T) {
})
}
-func testAccDataSourceMonitoredService(id string, name string) string {
- return fmt.Sprintf(`
+func testAccELKDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
resource "harness_platform_organization" "test" {
identifier = "%[1]s"
name = "%[2]s"
@@ -60,27 +180,463 @@ func testAccDataSourceMonitoredService(id string, name string) string {
name = "name"
identifier = "identifier"
type = "ElasticSearch"
+ version = "v2"
spec = jsonencode({
connectorRef = "connectorRef"
- feature = "feature"
- queries = [
+ queryDefinitions = [
{
- name = "name"
+ name = "name"
+ identifier = "identifier"
query = "query"
- index = "index"
- serviceInstanceIdentifier = "serviceInstanceIdentifier"
- timeStampIdentifier = "timeStampIdentifier"
- timeStampFormat = "timeStampFormat"
- messageIdentifier = "messageIdentifier"
+ groupName = "Logs Group"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "Logs Group"
+ query = "query"
+ queryParams = {
+ index = "index"
+ serviceInstanceField = "serviceInstanceIdentifier"
+ timeStampIdentifier = "timeStampIdentifier"
+ timeStampFormat = "timeStampFormat"
+ messageIdentifier = "messageIdentifier"
+ }
+ }
+ ]})
+ }
+ change_sources {
+ name = "csName1"
+ identifier = "harness_cd_next_gen"
+ type = "HarnessCDNextGen"
+ enabled = true
+ spec = jsonencode({
+ })
+ category = "Deployment"
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccSumologicMetricDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologicmetrics"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_cpu"
+ identifier = "metric_cpu"
+ query = "metric=cpu"
+ groupName = "g1"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "metric=memory"
+ queryParams = {
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]})
+ }
+ change_sources {
+ name = "csName1"
+ identifier = "harness_cd_next_gen"
+ type = "HarnessCDNextGen"
+ enabled = true
+ spec = jsonencode({
+ })
+ category = "Deployment"
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccSumologicLogDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "sumologic"
+ identifier = "sumo_metric_identifier"
+ type = "SumologicLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "log1"
+ identifier = "log1"
+ query = "*"
+ groupName = "Logs Group"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ }
+ ]})
+ }
+ change_sources {
+ name = "csName1"
+ identifier = "harness_cd_next_gen"
+ type = "HarnessCDNextGen"
+ enabled = true
+ spec = jsonencode({
+ })
+ category = "Deployment"
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccSplunkSignalFXDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "signalfxmetrics"
+ identifier = "signalfxmetrics"
+ type = "SplunkSignalFXMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric_infra_cpu"
+ identifier = "metric_infra_cpu"
+ query = "***"
+ groupName = "g"
+ riskProfile = {
+ riskCategory = "Errors"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER",
+ "ACT_WHEN_LOWER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
+ },
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]})
+ }
+ change_sources {
+ name = "csName1"
+ identifier = "harness_cd_next_gen"
+ type = "HarnessCDNextGen"
+ enabled = true
+ spec = jsonencode({
+ })
+ category = "Deployment"
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccGrafanaLokiLogsDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "Test"
+ identifier = "Test"
+ type = "GrafanaLokiLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "Demo"
+ identifier = "Demo"
+ query = "{job=~\".+\"}"
+ groupName = "Log_Group"
+ queryParams = {
+ serviceInstanceField = "job"
+ }
+ },
+ {
+ name = "log2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "error"
+ queryParams = {
+ serviceInstanceField = "_sourcehost"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
+ }
+ ]})
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccAzureMetricsDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "azure metrics verify step"
+ identifier = "azure_metrics_verify_step"
+ type = "AzureMetrics"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "metric"
+ identifier = "metric"
+ query = "default"
+ groupName = "g1"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "true"
+ continuousVerificationEnabled = "true"
+ sliEnabled = "false"
},
{
- name = "name2"
- query = "query2"
- index = "index2"
- serviceInstanceIdentifier = "serviceInstanceIdentifier2"
- timeStampIdentifier = "timeStampIdentifier2"
- timeStampFormat = "timeStampFormat2"
- messageIdentifier = "messageIdentifier2"
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ queryParams = {
+ serviceInstanceField = "host"
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test",
+ healthSourceMetricName = "cpuUsagePercentage",
+ healthSourceMetricNamespace = "insights.container/nodes",
+ aggregationType = "average"
+ }
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ sliEnabled = "false"
}
]})
}
@@ -102,6 +658,257 @@ func testAccDataSourceMonitoredService(id string, name string) string {
identifier = harness_platform_monitored_service.test.identifier
org_id = harness_platform_organization.test.id
project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccAzureLogsDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
}
-`, id, name)
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+ resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = []
+ health_sources {
+ name = "Demo azure"
+ identifier = "Demo_azure"
+ type = "AzureLogs"
+ version = "v2"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ queryDefinitions = [
+ {
+ name = "name2"
+ identifier = "identifier2"
+ groupName = "g2"
+ query = "*"
+ queryParams = {
+ serviceInstanceField = "Name",
+ timeStampIdentifier = "StartedTime",
+ messageIdentifier = "Image",
+ index = "/subscriptions/12d2db62-5aa9-471d-84bb-faa489b3e319/resourceGroups/srm-test/providers/Microsoft.ContainerService/managedClusters/srm-test"
+ }
+ liveMonitoringEnabled = "false"
+ continuousVerificationEnabled = "false"
+ }
+ ]})
+ }
+ change_sources {
+ name = "csName1"
+ identifier = "harness_cd_next_gen"
+ type = "HarnessCDNextGen"
+ enabled = true
+ spec = jsonencode({
+ })
+ category = "Deployment"
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+ }
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccPrometheusMetricsDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "prometheus metrics verify step"
+ identifier = "prometheus_metrics"
+ type = "Prometheus"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ metricDefinitions = [
+ {
+ identifier = "Prometheus_Metric",
+ metricName = "Prometheus Metric",
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ query = "count(up{group=\"cv\",group=\"cv\"})"
+ groupName = "met"
+ serviceInstanceFieldName = "pod_name"
+ isManualQuery = true
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
+}
+
+func testAccDatadogMetricsDataSourceMonitoredService(id string, name string) string {
+ return fmt.Sprintf(
+ `
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+
+ resource "harness_platform_project" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ org_id = harness_platform_organization.test.id
+ color = "#472848"
+ }
+
+resource "harness_platform_monitored_service" "test" {
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ identifier = "%[1]s"
+ request {
+ name = "%[2]s"
+ type = "Application"
+ description = "description"
+ service_ref = "service_ref"
+ environment_ref = "environment_ref"
+ tags = ["foo:bar", "bar:foo"]
+ health_sources {
+ name = "ddm"
+ identifier = "ddm"
+ type = "DatadogMetrics"
+ spec = jsonencode({
+ connectorRef = "connectorRef"
+ feature = "Datadog Cloud Metrics"
+ metricDefinitions = [
+ {
+ metricName = "metric"
+ identifier = "metric"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = true
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "group"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ },
+ {
+ metricName = "dashboard_metric_cpu"
+ identifier = "metric_cpu"
+ query = "avg:kubernetes.cpu.limits{*}.rollup(avg, 60);\navg:kubernetes.cpu.limits{*}.rollup(avg, 30);\n(a+b)/10"
+ isManualQuery = false
+ dashboardName = "dashboard"
+ metricPath = "M1"
+ groupingQuery = "avg:kubernetes.cpu.limits{*} by {host}.rollup(avg, 60)"
+ metric = "kubernetes.cpu.limits"
+ aggregation = "avg"
+ isCustomCreatedMetric = true
+ riskProfile = {
+ riskCategory = "Performance_Other"
+ thresholdTypes = [
+ "ACT_WHEN_HIGHER"
+ ]
+ }
+ analysis = {
+ liveMonitoring = {
+ enabled = true
+ }
+ deploymentVerification = {
+ enabled = true
+ serviceInstanceFieldName = "group"
+ }
+ }
+ sli : {
+ enabled = true
+ }
+ }
+ ]
+ })
+ }
+ template_ref = "template_ref"
+ version_label = "version_label"
+ }
+}
+
+ data "harness_platform_monitored_service" "test" {
+ identifier = harness_platform_monitored_service.test.identifier
+ org_id = harness_platform_organization.test.id
+ project_id = harness_platform_project.test.id
+ }`,
+ id, name)
}
diff --git a/internal/service/platform/monitored_service/resource_monitored_service.go b/internal/service/platform/monitored_service/resource_monitored_service.go
index a3f595805..66576ef11 100644
--- a/internal/service/platform/monitored_service/resource_monitored_service.go
+++ b/internal/service/platform/monitored_service/resource_monitored_service.go
@@ -106,6 +106,11 @@ func ResourceMonitoredService() *schema.Resource {
Type: schema.TypeString,
Required: true,
},
+ "version": {
+ Description: "Version of the health source.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
"spec": {
Description: "Specification of the health source. Depends on the type of the health source.",
Type: schema.TypeString,
@@ -312,35 +317,35 @@ func resourceMonitoredServiceDelete(ctx context.Context, d *schema.ResourceData,
}
func buildMonitoredServiceRequest(d *schema.ResourceData) *nextgen.MonitoredServiceDto {
- monitoredServiceDto := &nextgen.MonitoredServiceDto{}
+ monitoredService := &nextgen.MonitoredServiceDto{}
if attr, ok := d.GetOk("org_id"); ok {
- monitoredServiceDto.OrgIdentifier = attr.(string)
+ monitoredService.OrgIdentifier = attr.(string)
}
if attr, ok := d.GetOk("project_id"); ok {
- monitoredServiceDto.ProjectIdentifier = attr.(string)
+ monitoredService.ProjectIdentifier = attr.(string)
}
if attr, ok := d.GetOk("identifier"); ok {
- monitoredServiceDto.Identifier = attr.(string)
+ monitoredService.Identifier = attr.(string)
}
if attr, ok := d.GetOk("request"); ok {
request := attr.([]interface{})[0].(map[string]interface{})
- monitoredServiceDto.Name = request["name"].(string)
- monitoredServiceDto.Type_ = request["type"].(string)
- monitoredServiceDto.Description = request["description"].(string)
- monitoredServiceDto.ServiceRef = request["service_ref"].(string)
- monitoredServiceDto.EnvironmentRef = request["environment_ref"].(string)
+ monitoredService.Name = request["name"].(string)
+ monitoredService.Type_ = request["type"].(string)
+ monitoredService.Description = request["description"].(string)
+ monitoredService.ServiceRef = request["service_ref"].(string)
+ monitoredService.EnvironmentRef = request["environment_ref"].(string)
environmentRefListReq := request["environment_ref_list"].([]interface{})
environmentRefList := make([]string, len(environmentRefListReq))
for i, environmentRef := range environmentRefListReq {
environmentRefList[i] = environmentRef.(string)
}
- monitoredServiceDto.EnvironmentRefList = environmentRefList
+ monitoredService.EnvironmentRefList = environmentRefList
tags := map[string]string{}
for _, t := range request["tags"].(*schema.Set).List() {
@@ -348,7 +353,7 @@ func buildMonitoredServiceRequest(d *schema.ResourceData) *nextgen.MonitoredServ
parts := strings.Split(tagStr, ":")
tags[parts[0]] = parts[1]
}
- monitoredServiceDto.Tags = tags
+ monitoredService.Tags = tags
healthSources := request["health_sources"].(*schema.Set).List()
hss := make([]nextgen.HealthSource, len(healthSources))
@@ -366,7 +371,7 @@ func buildMonitoredServiceRequest(d *schema.ResourceData) *nextgen.MonitoredServ
csDto[i] = changeSourceDto
}
- monitoredServiceDto.Sources = &nextgen.Sources{
+ monitoredService.Sources = &nextgen.Sources{
HealthSources: hss,
ChangeSources: csDto,
}
@@ -378,7 +383,7 @@ func buildMonitoredServiceRequest(d *schema.ResourceData) *nextgen.MonitoredServ
serviceDependency := getServiceDependencyByType(sd)
serviceDependencyDto[i] = serviceDependency
}
- monitoredServiceDto.Dependencies = serviceDependencyDto
+ monitoredService.Dependencies = serviceDependencyDto
notificationRuleRefsReq := request["notification_rule_refs"].([]interface{})
notificationRuleRefs := make([]nextgen.NotificationRuleRefDto, len(notificationRuleRefsReq))
@@ -386,29 +391,28 @@ func buildMonitoredServiceRequest(d *schema.ResourceData) *nextgen.MonitoredServ
test := notificationRuleRef.(map[string]interface{})
notificationRuleRefDto := &nextgen.NotificationRuleRefDto{
NotificationRuleRef: test["notification_rule_ref"].(string),
- Enabled: test["enabled"].(bool),
+ Enabled: test["enabled"].(bool),
}
notificationRuleRefs[i] = *notificationRuleRefDto
}
- monitoredServiceDto.NotificationRuleRefs = notificationRuleRefs
+ monitoredService.NotificationRuleRefs = notificationRuleRefs
- monitoredServiceDto.Template = &nextgen.TemplateDto{
- TemplateRef: request["template_ref"].(string),
+ monitoredService.Template = &nextgen.TemplateDto{
+ TemplateRef: request["template_ref"].(string),
VersionLabel: request["version_label"].(string),
}
}
- return monitoredServiceDto
+ return monitoredService
}
func readMonitoredService(d *schema.ResourceData, monitoredServiceResponse **nextgen.MonitoredServiceResponse) {
- monitoredServiceDto := &(*monitoredServiceResponse).MonitoredService
+ monitoredService := &(*monitoredServiceResponse).MonitoredService
- d.SetId((*monitoredServiceDto).Identifier)
+ d.SetId((*monitoredService).Identifier)
- d.Set("org_id", (*monitoredServiceDto).OrgIdentifier)
- d.Set("project_id", (*monitoredServiceDto).ProjectIdentifier)
- d.Set("identifier", (*monitoredServiceDto).Identifier)
+ d.Set("org_id", (*monitoredService).OrgIdentifier)
+ d.Set("project_id", (*monitoredService).ProjectIdentifier)
+ d.Set("identifier", (*monitoredService).Identifier)
}
-
diff --git a/internal/service/platform/monitored_service/resource_monitored_service_test.go b/internal/service/platform/monitored_service/resource_monitored_service_test.go
index e6524df13..094446405 100644
--- a/internal/service/platform/monitored_service/resource_monitored_service_test.go
+++ b/internal/service/platform/monitored_service/resource_monitored_service_test.go
@@ -76,7 +76,7 @@ func TestAccResourceMonitoredService(t *testing.T) {
})
}*/
-func testAccGetMonitoredService(resourceName string, state *terraform.State) (*nextgen.MonitoredServiceDto, error) {
+func testAccGetMonitoredService(resourceName string, state *terraform.State) (*nextgen.MonitoredService, error) {
r := acctest.TestAccGetResource(resourceName, state)
c, ctx := acctest.TestAccGetPlatformClientWithContext()
id := r.Primary.ID
@@ -320,4 +320,3 @@ func testMonitoredServiceWithoutEnabled(id string, name string) string {
}
`, id, name)
}
-
diff --git a/internal/service/platform/monitored_service/utils.go b/internal/service/platform/monitored_service/utils.go
index f84221394..507bcb1f6 100644
--- a/internal/service/platform/monitored_service/utils.go
+++ b/internal/service/platform/monitored_service/utils.go
@@ -8,174 +8,272 @@ import (
func getHealthSourceByType(hs map[string]interface{}) nextgen.HealthSource {
healthSourceType := hs["type"].(string)
- healthSourceSpec := hs["spec"].(string)
+ healthSource := hs["spec"].(string)
if healthSourceType == "AppDynamics" {
- data := nextgen.AppDynamicsHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.AppDynamicsHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
AppDynamics: &data,
}
}
if healthSourceType == "NewRelic" {
- data := nextgen.NewRelicHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.NewRelicHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
- NewRelic: &data,
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ NewRelic: &data,
}
}
if healthSourceType == "StackdriverLog" {
- data := nextgen.StackdriverLogHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.StackdriverLogHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
StackdriverLog: &data,
}
}
if healthSourceType == "Splunk" {
- data := nextgen.SplunkHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.SplunkHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
- Splunk: &data,
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ Splunk: &data,
}
}
if healthSourceType == "Prometheus" {
- data := nextgen.PrometheusHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.PrometheusHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
Prometheus: &data,
}
}
if healthSourceType == "Stackdriver" {
- data := nextgen.StackdriverMetricHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.StackdriverMetricHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
Stackdriver: &data,
}
}
if healthSourceType == "DatadogMetrics" {
- data := nextgen.DatadogMetricHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.DatadogMetricHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
DatadogMetrics: &data,
}
}
if healthSourceType == "DatadogLog" {
- data := nextgen.DatadogLogHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.DatadogLogHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
DatadogLog: &data,
}
}
if healthSourceType == "Dynatrace" {
- data := nextgen.DynatraceHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.DynatraceHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
- Dynatrace: &data,
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ Dynatrace: &data,
}
}
if healthSourceType == "ErrorTracking" {
- data := nextgen.ErrorTrackingHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.ErrorTrackingHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
Name: hs["name"].(string),
Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
ErrorTracking: &data,
}
}
if healthSourceType == "CustomHealthMetric" {
- data := nextgen.CustomHealthSourceMetricSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.CustomHealthSourceMetric{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
CustomHealthMetric: &data,
}
}
if healthSourceType == "CustomHealthLog" {
- data := nextgen.CustomHealthSourceLogSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.CustomHealthSourceLog{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
CustomHealthLog: &data,
}
}
if healthSourceType == "SplunkMetric" {
- data := nextgen.SplunkMetricHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.SplunkMetricHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
- Name: hs["name"].(string),
- Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
SplunkMetric: &data,
}
}
if healthSourceType == "ElasticSearch" {
- data := nextgen.ElkHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
Name: hs["name"].(string),
Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
ElasticSearch: &data,
}
}
if healthSourceType == "CloudWatchMetrics" {
- data := nextgen.CloudWatchMetricsHealthSourceSpec{}
- json.Unmarshal([]byte(healthSourceSpec), &data)
+ data := nextgen.CloudWatchMetricsHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ CloudWatchMetrics: &data,
+ }
+ }
+ if healthSourceType == "AwsPrometheus" {
+ data := nextgen.AwsPrometheusHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
return nextgen.HealthSource{
Name: hs["name"].(string),
Identifier: hs["identifier"].(string),
- Type_: nextgen.HealthSourceType(healthSourceType),
- CloudWatchMetrics: &data,
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ AwsPrometheus: &data,
+ }
+ }
+ if healthSourceType == "SumologicMetrics" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ SumologicMetrics: &data,
+ }
+ }
+ if healthSourceType == "SumologicLogs" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ SumologicLogs: &data,
}
}
+ if healthSourceType == "SplunkSignalFXMetrics" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ SplunkSignalFXMetrics: &data,
+ }
+ }
+ if healthSourceType == "GrafanaLokiLogs" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ GrafanaLokiLogs: &data,
+ }
+ }
+ if healthSourceType == "AzureLogs" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ AzureLogs: &data,
+ }
+ }
+ if healthSourceType == "AzureMetrics" {
+ data := nextgen.NextGenHealthSource{}
+ json.Unmarshal([]byte(healthSource), &data)
+
+ return nextgen.HealthSource{
+ Name: hs["name"].(string),
+ Identifier: hs["identifier"].(string),
+ Version: hs["version"].(string),
+ Type_: nextgen.HealthSourceType(healthSourceType),
+ AzureMetrics: &data,
+ }
+ }
panic(fmt.Sprintf("Invalid health source type for monitored service"))
}
@@ -188,12 +286,12 @@ func getChangeSourceByType(cs map[string]interface{}) nextgen.ChangeSourceDto {
json.Unmarshal([]byte(changeSourceSpec), &data)
return nextgen.ChangeSourceDto{
- Name: cs["name"].(string),
- Identifier: cs["identifier"].(string),
- Type_: nextgen.ChangeSourceType(changeSourceType),
+ Name: cs["name"].(string),
+ Identifier: cs["identifier"].(string),
+ Type_: nextgen.ChangeSourceType(changeSourceType),
HarnessCDNextGen: &data,
- Enabled: cs["enabled"].(bool),
- Category: cs["category"].(string),
+ Enabled: cs["enabled"].(bool),
+ Category: cs["category"].(string),
}
}
if changeSourceType == "PagerDuty" {
@@ -201,12 +299,12 @@ func getChangeSourceByType(cs map[string]interface{}) nextgen.ChangeSourceDto {
json.Unmarshal([]byte(changeSourceSpec), &data)
return nextgen.ChangeSourceDto{
- Name: cs["name"].(string),
- Identifier: cs["identifier"].(string),
- Type_: nextgen.ChangeSourceType(changeSourceType),
- PagerDuty: &data,
- Enabled: cs["enabled"].(bool),
- Category: cs["category"].(string),
+ Name: cs["name"].(string),
+ Identifier: cs["identifier"].(string),
+ Type_: nextgen.ChangeSourceType(changeSourceType),
+ PagerDuty: &data,
+ Enabled: cs["enabled"].(bool),
+ Category: cs["category"].(string),
}
}
if changeSourceType == "K8sCluster" {
@@ -214,12 +312,12 @@ func getChangeSourceByType(cs map[string]interface{}) nextgen.ChangeSourceDto {
json.Unmarshal([]byte(changeSourceSpec), &data)
return nextgen.ChangeSourceDto{
- Name: cs["name"].(string),
- Identifier: cs["identifier"].(string),
- Type_: nextgen.ChangeSourceType(changeSourceType),
+ Name: cs["name"].(string),
+ Identifier: cs["identifier"].(string),
+ Type_: nextgen.ChangeSourceType(changeSourceType),
K8sCluster: &data,
- Enabled: cs["enabled"].(bool),
- Category: cs["category"].(string),
+ Enabled: cs["enabled"].(bool),
+ Category: cs["category"].(string),
}
}
if changeSourceType == "HarnessCD" {
@@ -227,12 +325,12 @@ func getChangeSourceByType(cs map[string]interface{}) nextgen.ChangeSourceDto {
json.Unmarshal([]byte(changeSourceSpec), &data)
return nextgen.ChangeSourceDto{
- Name: cs["name"].(string),
- Identifier: cs["identifier"].(string),
- Type_: nextgen.ChangeSourceType(changeSourceType),
- HarnessCD: &data,
- Enabled: cs["enabled"].(bool),
- Category: cs["category"].(string),
+ Name: cs["name"].(string),
+ Identifier: cs["identifier"].(string),
+ Type_: nextgen.ChangeSourceType(changeSourceType),
+ HarnessCD: &data,
+ Enabled: cs["enabled"].(bool),
+ Category: cs["category"].(string),
}
}
diff --git a/internal/service/platform/pipeline/resource_pipeline.go b/internal/service/platform/pipeline/resource_pipeline.go
index 9d1e43ac4..06f61dd05 100644
--- a/internal/service/platform/pipeline/resource_pipeline.go
+++ b/internal/service/platform/pipeline/resource_pipeline.go
@@ -27,29 +27,34 @@ func ResourcePipeline() *schema.Resource {
"yaml": {
Description: "YAML of the pipeline." + helpers.Descriptions.YamlText.String(),
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ Computed: true,
},
"git_details": {
Description: "Contains parameters related to creating an Entity for Git Experience.",
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"branch_name": {
Description: "Name of the branch.",
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"file_path": {
Description: "File path of the Entity in the repository.",
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"commit_message": {
Description: "Commit message used for the merge commit.",
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"base_branch": {
Description: "Name of the default branch (this checks out a new branch titled by branch_name).",
@@ -61,17 +66,20 @@ func ResourcePipeline() *schema.Resource {
Description: "Identifier of the Harness Connector used for CRUD operations on the Entity." + helpers.Descriptions.ConnectorRefText.String(),
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"store_type": {
Description: "Specifies whether the Entity is to be stored in Git or not. Possible values: INLINE, REMOTE.",
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"INLINE", "REMOTE"}, false),
+ Computed: true,
},
"repo_name": {
Description: "Name of the repository.",
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"last_object_id": {
Description: "Last object identifier (for Github). To be provided only when updating Pipeline.",
@@ -98,6 +106,61 @@ func ResourcePipeline() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
+ "import_from_git": {
+ Description: "Flag to set if importing from Git",
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "git_import_info": {
+ Description: "Contains Git Information for importing entities from Git",
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "branch_name": {
+ Description: "Name of the branch.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "file_path": {
+ Description: "File path of the Entity in the repository.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "connector_ref": {
+ Description: "Identifier of the Harness Connector used for importing entity from Git" + helpers.Descriptions.ConnectorRefText.String(),
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "repo_name": {
+ Description: "Name of the repository.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ },
+ },
+ },
+ "pipeline_import_request": {
+ Description: "Contains parameters for importing a pipeline",
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "pipeline_name": {
+ Description: "Name of the pipeline.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "pipeline_description": {
+ Description: "Description of the pipeline.",
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ },
+ },
+ },
},
}
@@ -160,18 +223,29 @@ func resourcePipelineCreateOrUpdate(ctx context.Context, d *schema.ResourceData,
template_applied := d.Get("template_applied").(bool)
if id == "" {
- pipeline := buildCreatePipeline(d)
- if pipeline.GitDetails != nil {
- base_branch = optional.NewString(pipeline.GitDetails.BaseBranch)
- store_type = optional.NewString(pipeline.GitDetails.StoreType)
- commit_message = optional.NewString(pipeline.GitDetails.CommitMessage)
- connector_ref = optional.NewString(pipeline.GitDetails.ConnectorRef)
- branch_name = pipeline.GitDetails.BranchName
- }
+ if d.Get("import_from_git").(bool) {
+ pipeline_id = d.Get("pipeline_import_request.0.pipeline_name").(string)
+
+ pipeline_import_request_body := createImportFromGitRequest(d)
+
+ _, httpResp, err = c.PipelinesApi.ImportPipelineFromGit(ctx, org_id, project_id, pipeline_id,
+ &nextgen.PipelinesApiImportPipelineFromGitOpts{
+ Body: optional.NewInterface(pipeline_import_request_body),
+ HarnessAccount: optional.NewString(c.AccountId)})
+ } else {
+ pipeline := buildCreatePipeline(d)
+ if pipeline.GitDetails != nil {
+ base_branch = optional.NewString(pipeline.GitDetails.BaseBranch)
+ store_type = optional.NewString(pipeline.GitDetails.StoreType)
+ commit_message = optional.NewString(pipeline.GitDetails.CommitMessage)
+ connector_ref = optional.NewString(pipeline.GitDetails.ConnectorRef)
+ branch_name = pipeline.GitDetails.BranchName
+ }
- pipeline_id = pipeline.Identifier
- _, httpResp, err = c.PipelinesApi.CreatePipeline(ctx, pipeline, org_id, project_id,
- &nextgen.PipelinesApiCreatePipelineOpts{HarnessAccount: optional.NewString(c.AccountId)})
+ pipeline_id = pipeline.Identifier
+ _, httpResp, err = c.PipelinesApi.CreatePipeline(ctx, pipeline, org_id, project_id,
+ &nextgen.PipelinesApiCreatePipelineOpts{HarnessAccount: optional.NewString(c.AccountId)})
+ }
} else {
pipeline := buildUpdatePipeline(d)
store_type = helpers.BuildField(d, "git_details.0.store_type")
@@ -202,6 +276,43 @@ func resourcePipelineCreateOrUpdate(ctx context.Context, d *schema.ResourceData,
return nil
}
+func createImportFromGitRequest(d *schema.ResourceData) *nextgen.PipelineImportRequestBody {
+
+ pipeline_git_import_info := &nextgen.GitImportInfo{}
+ if attr, ok := d.GetOk("git_import_info"); ok {
+ config := attr.([]interface{})[0].(map[string]interface{})
+ if attr, ok := config["branch_name"]; ok {
+ pipeline_git_import_info.BranchName = attr.(string)
+ }
+ if attr, ok := config["file_path"]; ok {
+ pipeline_git_import_info.FilePath = attr.(string)
+ }
+ if attr, ok := config["connector_ref"]; ok {
+ pipeline_git_import_info.ConnectorRef = attr.(string)
+ }
+ if attr, ok := config["repo_name"]; ok {
+ pipeline_git_import_info.RepoName = attr.(string)
+ }
+ }
+
+ pipeline_import_request := &nextgen.PipelineImportRequestDto{}
+ if attr, ok := d.GetOk("pipeline_import_request"); ok {
+ config := attr.([]interface{})[0].(map[string]interface{})
+ if attr, ok := config["pipeline_name"]; ok {
+ pipeline_import_request.PipelineName = attr.(string)
+ }
+ if attr, ok := config["pipeline_description"]; ok {
+ pipeline_import_request.PipelineDescription = attr.(string)
+ }
+ }
+
+ pipeline_import_request_body := &nextgen.PipelineImportRequestBody{}
+ pipeline_import_request_body.GitImportInfo = pipeline_git_import_info
+ pipeline_import_request_body.PipelineImportRequest = pipeline_import_request
+
+ return pipeline_import_request_body
+}
+
func resourcePipelineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
c, ctx := meta.(*internal.Session).GetClientWithContext(ctx)
diff --git a/internal/service/platform/pipeline/resource_pipeline_test.go b/internal/service/platform/pipeline/resource_pipeline_test.go
index c278e60a3..03a59505a 100644
--- a/internal/service/platform/pipeline/resource_pipeline_test.go
+++ b/internal/service/platform/pipeline/resource_pipeline_test.go
@@ -87,6 +87,35 @@ func TestAccResourcePipelineInline(t *testing.T) {
})
}
+func TestAccResourcePipelineImportFromGit(t *testing.T) {
+ id := fmt.Sprintf("%s_%s", t.Name(), utils.RandStringBytes(6))
+ name := id
+
+ resourceName := "harness_platform_pipeline.test"
+
+ resource.UnitTest(t, resource.TestCase{
+ PreCheck: func() { acctest.TestAccPreCheck(t) },
+ ProviderFactories: acctest.ProviderFactories,
+ CheckDestroy: testAccPipelineDestroy(resourceName),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccResourcePipelineImportFromGit(id, name),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "id", "gitx"),
+ resource.TestCheckResourceAttr(resourceName, "name", "gitx"),
+ ),
+ },
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateIdFunc: acctest.ProjectResourceImportStateIdFunc(resourceName),
+ ImportStateVerifyIgnore: []string{"git_import_info.0.branch_name", "git_import_info.0.connector_ref", "git_import_info.0.file_path","git_import_info.0.repo_name", "import_from_git", "pipeline_import_request.0.pipeline_description", "pipeline_import_request.0.pipeline_name", "git_import_info.#", "git_import_info.0.%", "pipeline_import_request.#", "pipeline_import_request.0.%"},
+ },
+ },
+ })
+}
+
func TestAccResourcePipeline_DeleteUnderlyingResource(t *testing.T) {
name := t.Name()
id := fmt.Sprintf("%s_%s", name, utils.RandStringBytes(5))
@@ -369,3 +398,29 @@ func testAccResourcePipelineInline(id string, name string) string {
}
`, id, name)
}
+
+func testAccResourcePipelineImportFromGit(id string, name string) string {
+ return fmt.Sprintf(`
+ resource "harness_platform_organization" "test" {
+ identifier = "%[1]s"
+ name = "%[2]s"
+ }
+ resource "harness_platform_pipeline" "test" {
+ identifier = "gitx"
+ org_id = "default"
+ project_id = "V"
+ name = "gitx"
+ import_from_git = true
+ git_import_info {
+ branch_name = "main"
+ file_path = ".harness/gitx.yaml"
+ connector_ref = "account.DoNotDeleteGithub"
+ repo_name = "open-repo"
+ }
+ pipeline_import_request {
+ pipeline_name = "gitx"
+ pipeline_description = "Pipeline Description"
+ }
+ }
+ `, id, name)
+}
diff --git a/internal/service/platform/user/resource_user.go b/internal/service/platform/user/resource_user.go
index a0e433495..106e602e8 100644
--- a/internal/service/platform/user/resource_user.go
+++ b/internal/service/platform/user/resource_user.go
@@ -141,7 +141,11 @@ func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interfac
return nil
}
+var creationSemaphore = make(chan struct{}, 1)
+
func resourceUserCreateOrUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ creationSemaphore <- struct{}{}
+ defer func() { <-creationSemaphore }()
c, ctx := meta.(*internal.Session).GetPlatformClientWithContext(ctx)
id := d.Id()
@@ -184,6 +188,8 @@ func resourceUserCreateOrUpdate(ctx context.Context, d *schema.ResourceData, met
}
func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ creationSemaphore <- struct{}{}
+ defer func() { <-creationSemaphore }()
c, ctx := meta.(*internal.Session).GetPlatformClientWithContext(ctx)
uuid := d.Get("identifier").(string)