diff --git a/.gitignore b/.gitignore
index cee0b74..ef432a7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,6 +9,7 @@ stackql
stack/
oss-activity-monitor/
creds.json
+*.log
# Byte-compiled / optimized / DLL files
__pycache__/
diff --git a/aws-stack/README.md b/aws-stack/README.md
new file mode 100644
index 0000000..cce8c31
--- /dev/null
+++ b/aws-stack/README.md
@@ -0,0 +1,63 @@
+# `stackql-deploy` starter project for `aws`
+
+> for starter projects using other providers, try `stackql-deploy my_stack --provider=azure` or `stackql-deploy my_stack --provider=google`
+
+see the following links for more information on `stackql`, `stackql-deploy` and the `aws` provider:
+
+- [`aws` provider docs](https://stackql.io/registry/aws)
+- [`stackql`](https://github.com/stackql/stackql)
+- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/)
+- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy)
+
+## Overview
+
+__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `aws` and `azure` for example.
+
+## Prerequisites
+
+This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `aws` provider, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and optionally `AWS_SESSION_TOKEN` must be set, for more information on authentication to `aws` see the [`aws` provider documentation](https://aws.stackql.io/providers/aws).
+
+## Usage
+
+Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `stackql_queries` and `stackql_resources` folders.
+
+The syntax for the `stackql-deploy` command is as follows:
+
+```bash
+stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ]
+```
+
+### Deploying a stack
+
+For example, to deploy the stack to an environment labeled `sit`, run the following:
+
+```bash
+stackql-deploy build aws-stack sit \
+-e AWS_REGION=ap-southeast-2
+```
+
+Use the `--dry-run` flag to view the queries to be run without actually running them, for example:
+
+```bash
+stackql-deploy build aws-stack sit \
+-e AWS_REGION=ap-southeast-2 \
+--dry-run
+```
+
+### Testing a stack
+
+To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example):
+
+```bash
+stackql-deploy test aws-stack sit \
+-e AWS_REGION=ap-southeast-2
+```
+
+### Tearing down a stack
+
+To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following:
+
+```bash
+stackql-deploy teardown aws-stack sit \
+-e AWS_REGION=ap-southeast-2
+```
\ No newline at end of file
diff --git a/aws-stack/external_scripts/README.md b/aws-stack/external_scripts/README.md
new file mode 100644
index 0000000..79c0788
--- /dev/null
+++ b/aws-stack/external_scripts/README.md
@@ -0,0 +1 @@
+# external scripts for `aws` `stackql-deploy` starter project
\ No newline at end of file
diff --git a/aws-stack/stackql_docs/example_vpc.md b/aws-stack/stackql_docs/example_vpc.md
new file mode 100644
index 0000000..f419c60
--- /dev/null
+++ b/aws-stack/stackql_docs/example_vpc.md
@@ -0,0 +1,3 @@
+# `example_vpc`
+
+document your `example_vpc` AWS VPC resource here, this is optional
\ No newline at end of file
diff --git a/aws-stack/stackql_manifest.yml b/aws-stack/stackql_manifest.yml
new file mode 100644
index 0000000..98a4bea
--- /dev/null
+++ b/aws-stack/stackql_manifest.yml
@@ -0,0 +1,145 @@
+#
+# aws starter project manifest file, add and update values as needed
+#
+version: 1
+name: "aws-stack"
+description: description for "aws-stack"
+providers:
+ - aws
+globals:
+ - name: region
+ description: aws region
+ value: "{{ AWS_REGION }}"
+ - name: global_tags
+ value:
+ - Key: Provisioner
+ Value: stackql
+ - Key: StackName
+ Value: "{{ stack_name }}"
+ - Key: StackEnv
+ Value: "{{ stack_env }}"
+resources:
+ - name: example_vpc
+ props:
+ - name: vpc_cidr_block
+ values:
+ prd:
+ value: "10.0.0.0/16"
+ sit:
+ value: "10.1.0.0/16"
+ dev:
+ value: "10.2.0.0/16"
+ - name: vpc_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-vpc"
+ exports:
+ - vpc_id
+ - vpc_cidr_block
+ - name: example_subnet
+ props:
+ - name: vpc_id
+ value: "{{ vpc_id }}"
+ - name: subnet_cidr_block
+ values:
+ prd:
+ value: "10.0.1.0/24"
+ sit:
+ value: "10.1.1.0/24"
+ dev:
+ value: "10.2.1.0/24"
+ - name: subnet_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-subnet"
+ exports:
+ - subnet_id
+ - availability_zone
+ - name: example_inet_gateway
+ props:
+ - name: inet_gateway_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway"
+ exports:
+ - internet_gateway_id
+ - name: example_inet_gw_attachment
+ props: []
+ - name: example_route_table
+ props:
+ - name: route_table_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-route-table"
+ exports:
+ - route_table_id
+ - name: example_subnet_rt_assn
+ props: []
+ exports:
+ - route_table_assn_id
+ - name: example_inet_route
+ props: []
+ exports:
+ - inet_route_indentifer
+ - name: example_security_group
+ props:
+ - name: group_description
+ value: "web security group for {{ stack_name }} ({{ stack_env }} environment)"
+ - name: group_name
+ value: "{{ stack_name }}-{{ stack_env }}-web-sg"
+ - name: sg_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-web-sg"
+ - name: security_group_ingress
+ value:
+ - CidrIp: "0.0.0.0/0"
+ Description: Allow HTTP traffic
+ FromPort: 80
+ ToPort: 80
+ IpProtocol: "tcp"
+ - CidrIp: "{{ vpc_cidr_block }}"
+ Description: Allow SSH traffic from the internal network
+ FromPort: 22
+ ToPort: 22
+ IpProtocol: "tcp"
+ - name: security_group_egress
+ value:
+ - CidrIp: "0.0.0.0/0"
+ Description: Allow all outbound traffic
+ FromPort: 0
+ ToPort: 0
+ IpProtocol: "-1"
+ exports:
+ - security_group_id
+ - name: example_web_server
+ props:
+ - name: instance_name
+ value: "{{ stack_name }}-{{ stack_env }}-instance"
+ - name: ami_id
+ value: ami-030a5acd7c996ef60
+ - name: instance_type
+ value: t2.micro
+ - name: network_interfaces
+ value:
+ - AssociatePublicIpAddress: True
+ DeviceIndex: "0"
+ SubnetId: "{{ subnet_id }}"
+ GroupSet:
+ - "{{ security_group_id }}"
+ - name: user_data
+ value: |
+ #!/bin/bash
+ yum update -y
+ yum install -y httpd
+ systemctl start httpd
+ systemctl enable httpd
+ echo "
Hello, StackQL!
" > /var/www/html/index.html
+ - name: instance_tags
+ value:
+ - Key: Name
+ Value: "{{ stack_name }}-{{ stack_env }}-instance"
+ exports:
+ - instance_id
+ - public_dns_name
+
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_inet_gateway.iql b/aws-stack/stackql_queries/example_inet_gateway.iql
new file mode 100644
index 0000000..9708ecf
--- /dev/null
+++ b/aws-stack/stackql_queries/example_inet_gateway.iql
@@ -0,0 +1,38 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT internet_gateway_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.internet_gateway_tags
+WHERE region = '{{ region }}'
+GROUP BY internet_gateway_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT internet_gateway_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.internet_gateway_tags
+WHERE region = '{{ region }}'
+GROUP BY internet_gateway_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT internet_gateway_id FROM
+(
+SELECT internet_gateway_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.internet_gateway_tags
+WHERE region = '{{ region }}'
+GROUP BY internet_gateway_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_inet_gw_attachment.iql b/aws-stack/stackql_queries/example_inet_gw_attachment.iql
new file mode 100644
index 0000000..92f9abb
--- /dev/null
+++ b/aws-stack/stackql_queries/example_inet_gw_attachment.iql
@@ -0,0 +1,23 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT
+attachment_type,
+vpc_id
+FROM aws.ec2.vpc_gateway_attachments
+WHERE region = '{{ region }}'
+AND internet_gateway_id = '{{ internet_gateway_id }}'
+AND vpc_id = '{{ vpc_id }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT
+attachment_type,
+vpc_id
+FROM aws.ec2.vpc_gateway_attachments
+WHERE region = '{{ region }}'
+AND internet_gateway_id = '{{ internet_gateway_id }}'
+AND vpc_id = '{{ vpc_id }}'
+) t;
diff --git a/aws-stack/stackql_queries/example_inet_route.iql b/aws-stack/stackql_queries/example_inet_route.iql
new file mode 100644
index 0000000..5a6466a
--- /dev/null
+++ b/aws-stack/stackql_queries/example_inet_route.iql
@@ -0,0 +1,23 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT data__Identifier
+FROM aws.ec2.routes
+WHERE region = '{{ region }}'
+AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT data__Identifier
+FROM aws.ec2.routes
+WHERE region = '{{ region }}'
+AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT data__Identifier as inet_route_indentifer
+FROM aws.ec2.routes
+WHERE region = '{{ region }}'
+AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0';
diff --git a/aws-stack/stackql_queries/example_route_table.iql b/aws-stack/stackql_queries/example_route_table.iql
new file mode 100644
index 0000000..610ca27
--- /dev/null
+++ b/aws-stack/stackql_queries/example_route_table.iql
@@ -0,0 +1,41 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT count(*) as count FROM
+(
+SELECT route_table_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.route_table_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY route_table_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT count(*) as count FROM
+(
+SELECT route_table_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.route_table_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY route_table_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT route_table_id FROM
+(
+SELECT route_table_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.route_table_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY route_table_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_security_group.iql b/aws-stack/stackql_queries/example_security_group.iql
new file mode 100644
index 0000000..31da342
--- /dev/null
+++ b/aws-stack/stackql_queries/example_security_group.iql
@@ -0,0 +1,48 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT group_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.security_group_tags
+WHERE region = '{{ region }}'
+AND group_name = '{{ group_name }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY group_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT group_id,
+security_group_ingress,
+security_group_egress,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.security_group_tags
+WHERE region = '{{ region }}'
+AND group_name = '{{ group_name }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY group_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT group_id as 'security_group_id' FROM
+(
+SELECT group_id,
+security_group_ingress,
+security_group_egress,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.security_group_tags
+WHERE region = '{{ region }}'
+AND group_name = '{{ group_name }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY group_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_subnet.iql b/aws-stack/stackql_queries/example_subnet.iql
new file mode 100644
index 0000000..d9e0633
--- /dev/null
+++ b/aws-stack/stackql_queries/example_subnet.iql
@@ -0,0 +1,46 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT subnet_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.subnet_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY subnet_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT subnet_id,
+cidr_block,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.subnet_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY subnet_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t
+WHERE cidr_block = '{{ subnet_cidr_block }}';
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT subnet_id, availability_zone FROM
+(
+SELECT subnet_id,
+availability_zone,
+cidr_block,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.subnet_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+GROUP BY subnet_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t
+WHERE cidr_block = '{{ subnet_cidr_block }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_subnet_rt_assn.iql b/aws-stack/stackql_queries/example_subnet_rt_assn.iql
new file mode 100644
index 0000000..0062aab
--- /dev/null
+++ b/aws-stack/stackql_queries/example_subnet_rt_assn.iql
@@ -0,0 +1,26 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT id
+FROM aws.ec2.subnet_route_table_associations
+WHERE region = '{{ region }}'
+AND route_table_id = '{{ route_table_id }}'
+AND subnet_id = '{{ subnet_id }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT id
+FROM aws.ec2.subnet_route_table_associations
+WHERE region = '{{ region }}'
+AND route_table_id = '{{ route_table_id }}'
+AND subnet_id = '{{ subnet_id }}'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT id as route_table_assn_id
+FROM aws.ec2.subnet_route_table_associations
+WHERE region = '{{ region }}'
+AND route_table_id = '{{ route_table_id }}'
+AND subnet_id = '{{ subnet_id }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_queries/example_vpc.iql b/aws-stack/stackql_queries/example_vpc.iql
new file mode 100644
index 0000000..96b2372
--- /dev/null
+++ b/aws-stack/stackql_queries/example_vpc.iql
@@ -0,0 +1,43 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT vpc_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.vpc_tags
+WHERE region = '{{ region }}'
+AND cidr_block = '{{ vpc_cidr_block }}'
+GROUP BY vpc_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT vpc_id,
+cidr_block,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.vpc_tags
+WHERE region = '{{ region }}'
+AND cidr_block = '{{ vpc_cidr_block }}'
+GROUP BY vpc_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t
+WHERE cidr_block = '{{ vpc_cidr_block }}';
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT vpc_id, vpc_cidr_block FROM
+(
+SELECT vpc_id, cidr_block as "vpc_cidr_block",
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.vpc_tags
+WHERE region = '{{ region }}'
+AND cidr_block = '{{ vpc_cidr_block }}'
+GROUP BY vpc_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+) t;
diff --git a/aws-stack/stackql_queries/example_web_server.iql b/aws-stack/stackql_queries/example_web_server.iql
new file mode 100644
index 0000000..9e1f9de
--- /dev/null
+++ b/aws-stack/stackql_queries/example_web_server.iql
@@ -0,0 +1,47 @@
+/*+ preflight, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT instance_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.instance_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+AND subnet_id = '{{ subnet_id }}'
+GROUP BY instance_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+AND json_extract(tags, '$.Name') = '{{ instance_name }}'
+) t;
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM
+(
+SELECT instance_id,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.instance_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+AND subnet_id = '{{ subnet_id }}'
+GROUP BY instance_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+AND json_extract(tags, '$.Name') = '{{ instance_name }}'
+) t;
+
+/*+ exports, retries=5, retry_delay=5 */
+SELECT instance_id, public_ip FROM
+(
+SELECT instance_id, public_ip,
+json_group_object(tag_key, tag_value) as tags
+FROM aws.ec2.instance_tags
+WHERE region = '{{ region }}'
+AND vpc_id = '{{ vpc_id }}'
+AND subnet_id = '{{ subnet_id }}'
+GROUP BY instance_id
+HAVING json_extract(tags, '$.Provisioner') = 'stackql'
+AND json_extract(tags, '$.StackName') = '{{ stack_name }}'
+AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'
+AND json_extract(tags, '$.Name') = '{{ instance_name }}'
+) t;
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_inet_gateway.iql b/aws-stack/stackql_resources/example_inet_gateway.iql
new file mode 100644
index 0000000..01ecfd4
--- /dev/null
+++ b/aws-stack/stackql_resources/example_inet_gateway.iql
@@ -0,0 +1,13 @@
+/*+ create */
+INSERT INTO aws.ec2.internet_gateways (
+ Tags,
+ region
+)
+SELECT
+'{{ inet_gateway_tags | merge_lists(global_tags) }}',
+'{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.internet_gateways
+WHERE data__Identifier = '{{ internet_gateway_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_inet_gw_attachment.iql b/aws-stack/stackql_resources/example_inet_gw_attachment.iql
new file mode 100644
index 0000000..7dbe954
--- /dev/null
+++ b/aws-stack/stackql_resources/example_inet_gw_attachment.iql
@@ -0,0 +1,15 @@
+/*+ create */
+INSERT INTO aws.ec2.vpc_gateway_attachments (
+ InternetGatewayId,
+ VpcId,
+ region
+)
+SELECT
+ '{{ internet_gateway_id }}',
+ '{{ vpc_id }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.vpc_gateway_attachments
+WHERE data__Identifier = 'IGW|{{ vpc_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_inet_route.iql b/aws-stack/stackql_resources/example_inet_route.iql
new file mode 100644
index 0000000..cc227d2
--- /dev/null
+++ b/aws-stack/stackql_resources/example_inet_route.iql
@@ -0,0 +1,17 @@
+/*+ create */
+INSERT INTO aws.ec2.routes (
+ DestinationCidrBlock,
+ GatewayId,
+ RouteTableId,
+ region
+)
+SELECT
+ '0.0.0.0/0',
+ '{{ internet_gateway_id }}',
+ '{{ route_table_id }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.routes
+WHERE data__Identifier = '{{ inet_route_indentifer }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_route_table.iql b/aws-stack/stackql_resources/example_route_table.iql
new file mode 100644
index 0000000..bd83e74
--- /dev/null
+++ b/aws-stack/stackql_resources/example_route_table.iql
@@ -0,0 +1,15 @@
+/*+ create */
+INSERT INTO aws.ec2.route_tables (
+ Tags,
+ VpcId,
+ region
+)
+SELECT
+ '{{ route_table_tags | merge_lists(global_tags) }}',
+ '{{ vpc_id }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.route_tables
+WHERE data__Identifier = '{{ route_table_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_security_group.iql b/aws-stack/stackql_resources/example_security_group.iql
new file mode 100644
index 0000000..90300d8
--- /dev/null
+++ b/aws-stack/stackql_resources/example_security_group.iql
@@ -0,0 +1,23 @@
+/*+ create */
+INSERT INTO aws.ec2.security_groups (
+ GroupName,
+ GroupDescription,
+ VpcId,
+ SecurityGroupIngress,
+ SecurityGroupEgress,
+ Tags,
+ region
+)
+SELECT
+ '{{ group_name }}',
+ '{{ group_description }}',
+ '{{ vpc_id }}',
+ '{{ security_group_ingress | tojson }}',
+ '{{ security_group_egress | tojson }}',
+ '{{ sg_tags | merge_lists(global_tags) }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.security_groups
+WHERE data__Identifier = '{{ security_group_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_subnet.iql b/aws-stack/stackql_resources/example_subnet.iql
new file mode 100644
index 0000000..bc1a7f3
--- /dev/null
+++ b/aws-stack/stackql_resources/example_subnet.iql
@@ -0,0 +1,17 @@
+/*+ create */
+INSERT INTO aws.ec2.subnets (
+ VpcId,
+ CidrBlock,
+ Tags,
+ region
+)
+SELECT
+ '{{ vpc_id }}',
+ '{{ subnet_cidr_block }}',
+ '{{ subnet_tags | merge_lists(global_tags) }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.subnets
+WHERE data__Identifier = '{{ subnet_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_subnet_rt_assn.iql b/aws-stack/stackql_resources/example_subnet_rt_assn.iql
new file mode 100644
index 0000000..f1ea1b7
--- /dev/null
+++ b/aws-stack/stackql_resources/example_subnet_rt_assn.iql
@@ -0,0 +1,15 @@
+/*+ create */
+INSERT INTO aws.ec2.subnet_route_table_associations (
+ RouteTableId,
+ SubnetId,
+ region
+)
+SELECT
+ '{{ route_table_id }}',
+ '{{ subnet_id }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.subnet_route_table_associations
+WHERE data__Identifier = '{{ route_table_assn_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_vpc.iql b/aws-stack/stackql_resources/example_vpc.iql
new file mode 100644
index 0000000..ddcfb67
--- /dev/null
+++ b/aws-stack/stackql_resources/example_vpc.iql
@@ -0,0 +1,15 @@
+/*+ create */
+INSERT INTO aws.ec2.vpcs (
+ CidrBlock,
+ Tags,
+ region
+)
+SELECT
+ '{{ vpc_cidr_block }}',
+ '{{ vpc_tags | merge_lists(global_tags) }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.vpcs
+WHERE data__Identifier = '{{ vpc_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/aws-stack/stackql_resources/example_web_server.iql b/aws-stack/stackql_resources/example_web_server.iql
new file mode 100644
index 0000000..d9972ae
--- /dev/null
+++ b/aws-stack/stackql_resources/example_web_server.iql
@@ -0,0 +1,21 @@
+/*+ create */
+INSERT INTO aws.ec2.instances (
+ ImageId,
+ InstanceType,
+ NetworkInterfaces,
+ UserData,
+ Tags,
+ region
+)
+SELECT
+ '{{ ami_id }}',
+ '{{ instance_type }}',
+ '{{ network_interfaces | tojson }}',
+ '{{ user_data | base64_encode }}',
+ '{{ instance_tags | merge_lists(global_tags) }}',
+ '{{ region }}';
+
+/*+ delete */
+DELETE FROM aws.ec2.instances
+WHERE data__Identifier = '{{ instance_id }}'
+AND region = '{{ region }}';
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 379202b..3ab0966 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
setup(
name='stackql-deploy',
- version='1.5.3',
+ version='1.5.5',
description='Model driven resource provisioning and deployment framework using StackQL.',
long_description=readme,
long_description_content_type='text/x-rst',
diff --git a/stackql_deploy/__init__.py b/stackql_deploy/__init__.py
index 0972d83..f51f62d 100644
--- a/stackql_deploy/__init__.py
+++ b/stackql_deploy/__init__.py
@@ -1 +1 @@
-__version__ = '1.5.3'
+__version__ = '1.5.6'
diff --git a/stackql_deploy/cli.py b/stackql_deploy/cli.py
index 5b21bdd..1247d07 100644
--- a/stackql_deploy/cli.py
+++ b/stackql_deploy/cli.py
@@ -8,6 +8,20 @@
from dotenv import load_dotenv, dotenv_values
from pystackql import StackQL
+def print_unicode_box(message):
+ border_color = '\033[93m' # Yellow color
+ reset_color = '\033[0m'
+
+ lines = message.split('\n')
+ max_length = max(len(line) for line in lines)
+ top_border = border_color + '┌' + '─' * (max_length + 2) + '┐' + reset_color
+ bottom_border = border_color + '└' + '─' * (max_length + 2) + '┘' + reset_color
+
+ click.echo(top_border)
+ for line in lines:
+ click.echo(border_color + '│ ' + line.ljust(max_length) + ' │' + reset_color)
+ click.echo(bottom_border)
+
def get_stackql_instance(custom_registry=None, download_dir=None):
"""Initializes StackQL with the given options."""
stackql_kwargs = {}
@@ -41,7 +55,7 @@ def parse_env_var(ctx, param, value):
key, val = item.split('=', 1)
env_vars[key] = val
except ValueError:
- raise click.BadParameter('Environment variables must be formatted as KEY=VALUE')
+ raise click.BadParameter('environment variables must be formatted as KEY=VALUE')
return env_vars
def setup_logger(command, args_dict):
@@ -54,8 +68,8 @@ def setup_logger(command, args_dict):
#
@click.group()
-@click.option('--custom-registry', default=None, help='Custom registry URL for StackQL.')
-@click.option('--download-dir', default=None, help='Download directory for StackQL.')
+@click.option('--custom-registry', default=None, help='custom registry URL for StackQL.')
+@click.option('--download-dir', default=None, help='download directory for StackQL.')
@click.pass_context
def cli(ctx, custom_registry, download_dir):
"""This is the main CLI entry point."""
@@ -71,18 +85,24 @@ def cli(ctx, custom_registry, download_dir):
@cli.command()
@click.argument('stack_dir')
@click.argument('stack_env')
-@click.option('--log-level', default='INFO', help='Set the logging level.')
-@click.option('--env-file', default='.env', help='Environment variables file.')
-@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='Set additional environment variables.')
-@click.option('--dry-run', is_flag=True, help='Perform a dry run of the operation.')
-@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='Action on failure.')
+@click.option('--log-level', default='INFO', help='set the logging level.')
+@click.option('--env-file', default='.env', help='environment variables file.')
+@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='set additional environment variables.')
+@click.option('--dry-run', is_flag=True, help='perform a dry run of the operation.')
+@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='action on failure.')
@click.pass_context
def build(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_failure):
"""Create or update resources."""
- setup_logger(log_level, locals())
- env_vars = load_env_vars(env_file, env)
+ setup_logger('build', locals())
+ vars = load_env_vars(env_file, env)
stackql = get_stackql_instance(ctx.obj['custom_registry'], ctx.obj['download_dir'])
- provisioner = StackQLProvisioner(stackql, env_vars, logger, stack_dir, stack_env)
+ provisioner = StackQLProvisioner(stackql, vars, logger, stack_dir, stack_env)
+
+ # Print the bordered message
+ stack_name_display = provisioner.stack_name if provisioner.stack_name else stack_dir
+ message = f"Deploying stack: [{stack_name_display}] to environment: [{stack_env}]"
+ print_unicode_box(message)
+
provisioner.run(dry_run, on_failure)
click.echo(f"🚀 build complete (dry run: {dry_run})")
@@ -93,11 +113,11 @@ def build(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_failu
@cli.command()
@click.argument('stack_dir')
@click.argument('stack_env')
-@click.option('--log-level', default='INFO', help='Set the logging level.')
-@click.option('--env-file', default='.env', help='Environment variables file.')
-@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='Set additional environment variables.')
-@click.option('--dry-run', is_flag=True, help='Perform a dry run of the operation.')
-@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='Action on failure.')
+@click.option('--log-level', default='INFO', help='set the logging level.')
+@click.option('--env-file', default='.env', help='environment variables file.')
+@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='set additional environment variables.')
+@click.option('--dry-run', is_flag=True, help='perform a dry run of the operation.')
+@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='action on failure.')
@click.pass_context
def teardown(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_failure):
"""Teardown a provisioned stack defined in the `{STACK_DIR}/stackql_manifest.yml` file."""
@@ -108,6 +128,12 @@ def teardown(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_fa
)
vars = load_env_vars(env_file, env)
deprovisioner = StackQLDeProvisioner(stackql, vars, logger, stack_dir, stack_env)
+
+ # Print the bordered message
+ stack_name_display = deprovisioner.stack_name if deprovisioner.stack_name else stack_dir
+ message = f"Tearing down stack: [{stack_name_display}] in environment: [{stack_env}]"
+ print_unicode_box(message)
+
deprovisioner.run(dry_run, on_failure)
click.echo(f"🚧 teardown complete (dry run: {dry_run})")
@@ -118,11 +144,11 @@ def teardown(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_fa
@cli.command()
@click.argument('stack_dir')
@click.argument('stack_env')
-@click.option('--log-level', default='INFO', help='Set the logging level.')
-@click.option('--env-file', default='.env', help='Environment variables file.')
-@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='Set additional environment variables.')
-@click.option('--dry-run', is_flag=True, help='Perform a dry run of the operation.')
-@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='Action on failure.')
+@click.option('--log-level', default='INFO', help='set the logging level.')
+@click.option('--env-file', default='.env', help='environment variables file.')
+@click.option('-e', '--env', multiple=True, callback=parse_env_var, help='set additional environment variables.')
+@click.option('--dry-run', is_flag=True, help='perform a dry run of the operation.')
+@click.option('--on-failure', type=click.Choice(['rollback', 'ignore', 'error']), default='error', help='action on failure.')
@click.pass_context
def test(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_failure):
"""Run test queries to ensure desired state resources and configuration for the stack defined in the `{STACK_DIR}/stackql_manifest.yml` file."""
@@ -133,6 +159,12 @@ def test(ctx, stack_dir, stack_env, log_level, env_file, env, dry_run, on_failur
)
vars = load_env_vars(env_file, env)
test_runner = StackQLTestRunner(stackql, vars, logger, stack_dir, stack_env)
+
+ # Print the bordered message
+ stack_name_display = test_runner.stack_name if test_runner.stack_name else stack_dir
+ message = f"Testing stack: [{stack_name_display}] in environment: [{stack_env}]"
+ print_unicode_box(message)
+
test_runner.run(dry_run, on_failure)
click.echo(f"🔍 tests complete (dry run: {dry_run})")
@@ -163,7 +195,7 @@ def info(ctx):
# Optionally add custom registry if it's provided
if ctx.obj.get('custom_registry'):
info_items.append(("custom registry", ctx.obj.get('custom_registry')))
-
+
# Calculate the maximum label length for alignment
max_label_length = max(len(label) for label, _ in info_items)
@@ -171,29 +203,62 @@ def info(ctx):
for label, value in info_items:
click.echo(f"{label.ljust(max_label_length)}: {value}")
+ click.echo("")
+
+ providers_info = []
+ providers_info.append(("installed providers:", ""))
+ providers = stackql.execute("SHOW PROVIDERS")
+ for provider in providers:
+ providers_info.append((provider['name'], provider['version']))
+
+ # Print out all information items
+ for label, value in providers_info:
+ click.echo(f"{label.ljust(max_label_length)}: {value}")
+
#
# init command
#
+SUPPORTED_PROVIDERS = {'aws', 'google', 'azure'}
-def create_project_structure(stack_name):
+def create_project_structure(stack_name, provider=None):
+ stack_name = stack_name.replace('_', '-').lower()
base_path = os.path.join(os.getcwd(), stack_name)
if os.path.exists(base_path):
- raise click.ClickException(f"Directory '{stack_name}' already exists.")
+ raise click.ClickException(f"directory '{stack_name}' already exists.")
- directories = ['stackql_docs', 'stackql_resources', 'stackql_queries']
+ directories = ['stackql_docs', 'stackql_resources', 'stackql_queries', 'external_scripts']
for directory in directories:
os.makedirs(os.path.join(base_path, directory), exist_ok=True)
- template_base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
+ # Check if provider is supported
+ if provider is None:
+ logger.debug(f"provider not supplied, defaulting to `aws`")
+ provider = 'aws'
+ elif provider not in SUPPORTED_PROVIDERS:
+ provider = 'aws'
+ message = f"provider '{provider}' is not supported for `init`, supported providers are: {', '.join(SUPPORTED_PROVIDERS)}, defaulting to `aws`"
+ click.secho(message, fg='yellow', err=False)
+
+ # set template files
+ if provider == 'google':
+ sample_res_name = 'example_project'
+ elif provider == 'azure':
+ sample_res_name = 'example_res_grp'
+ elif provider == 'aws':
+ sample_res_name = 'example_vpc'
+
+ template_base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', provider)
env = Environment(loader=FileSystemLoader(template_base_path))
logger.debug(f"template base path: {template_base_path}")
template_files = {
'stackql_manifest.yml.template': os.path.join(base_path, 'stackql_manifest.yml'),
- 'stackql_docs/stackql_example_rg.md.template': os.path.join(base_path,'stackql_docs', 'stackql_example_rg.md'),
- 'stackql_resources/stackql_example_rg.iql.template': os.path.join(base_path,'stackql_resources', 'stackql_example_rg.iql'),
- 'stackql_queries/stackql_example_rg.iql.template': os.path.join(base_path,'stackql_queries', 'stackql_example_rg.iql')
+ 'README.md.template': os.path.join(base_path, 'README.md'),
+ 'external_scripts/README.md.template': os.path.join(base_path,'external_scripts', 'README.md'),
+ f'stackql_docs/{sample_res_name}.md.template': os.path.join(base_path,'stackql_docs', f'{sample_res_name}.md'),
+ f'stackql_resources/{sample_res_name}.iql.template': os.path.join(base_path,'stackql_resources', f'{sample_res_name}.iql'),
+ f'stackql_queries/{sample_res_name}.iql.template': os.path.join(base_path,'stackql_queries', f'{sample_res_name}.iql')
}
for template_name, output_name in template_files.items():
@@ -206,10 +271,11 @@ def create_project_structure(stack_name):
@cli.command()
@click.argument('stack_name')
-def init(stack_name):
+@click.option('--provider', default=None, help='[OPTIONAL] specify a provider to start your project, supported values: aws, azure, google')
+def init(stack_name, provider):
"""Initialize a new stackql-deploy project structure."""
setup_logger("init", locals())
- create_project_structure(stack_name)
+ create_project_structure(stack_name, provider=provider)
click.echo(f"project {stack_name} initialized successfully.")
diff --git a/stackql_deploy/cmd/build.py b/stackql_deploy/cmd/build.py
index d81c387..4c9ef1a 100644
--- a/stackql_deploy/cmd/build.py
+++ b/stackql_deploy/cmd/build.py
@@ -51,7 +51,7 @@ def _export_vars(self, resource, export, expected_exports, protected_exports):
for key, value in export.items():
if key in protected_exports:
mask = '*' * len(str(value))
- self.logger.info(f"🔒 set protected variable [{key}] to [{mask}] in exports")
+ self.logger.info(f"🔒 set protected variable [{key}] to [{mask}] in exports")
else:
self.logger.info(f"➡️ set [{key}] to [{value}] in exports")
@@ -60,7 +60,7 @@ def _export_vars(self, resource, export, expected_exports, protected_exports):
def run(self, dry_run, on_failure):
- self.logger.info(f"Deploying [{self.stack_name}] in [{self.stack_env}] environment {'(dry run)' if dry_run else ''}")
+ self.logger.info(f"deploying [{self.stack_name}] in [{self.stack_env}] environment {'(dry run)' if dry_run else ''}")
# get global context and pull providers
self.global_context, self.providers = get_global_context_and_providers(self.env, self.manifest, self.vars, self.stack_env, self.stack_name, self.stackql, self.logger)
@@ -137,7 +137,9 @@ def run(self, dry_run, on_failure):
else:
if 'preflight' in test_queries:
preflight_query = test_queries['preflight']
-
+ preflight_retries = test_query_options.get('preflight', {}).get('retries', 10)
+ preflight_retry_delay = test_query_options.get('preflight', {}).get('retry_delay', 10)
+
if 'postdeploy' in test_queries:
postdeploy_query = test_queries['postdeploy']
postdeploy_retries = test_query_options.get('postdeploy', {}).get('retries', 10)
@@ -164,10 +166,10 @@ def run(self, dry_run, on_failure):
if not preflight_query:
self.logger.info(f"pre-flight check not configured for [{resource['name']}]")
elif dry_run:
- self.logger.info(f"dry run pre-flight check for [{resource['name']}]:\n\n{preflight_query}\n")
+ self.logger.info(f"🔎 dry run pre-flight check for [{resource['name']}]:\n\n/* pre-flight query */\n{preflight_query}\n")
else:
- self.logger.info(f"running pre-flight check for [{resource['name']}]...")
- resource_exists = run_test(resource, preflight_query, self.stackql, self.logger)
+ self.logger.info(f"🔎 running pre-flight check for [{resource['name']}]...")
+ resource_exists = perform_retries(resource, preflight_query, preflight_retries, preflight_retry_delay, self.stackql, self.logger)
#
# deploy
@@ -175,17 +177,17 @@ def run(self, dry_run, on_failure):
if createorupdate_query:
# disregard preflight check result if createorupdate is present
if dry_run:
- self.logger.info(f"dry run create_or_update for [{resource['name']}]:\n\n{createorupdate_query}\n")
+ self.logger.info(f"🚧 dry run create_or_update for [{resource['name']}]:\n\n/* insert (create or replace) query*/\n{createorupdate_query}\n")
else:
- self.logger.info(f"creating/updating [{resource['name']}]...")
+ self.logger.info(f"🚧 creating/updating [{resource['name']}]...")
msg = run_stackql_command(createorupdate_query, self.stackql, self.logger)
self.logger.debug(f"create or update response: {msg}")
else:
if not resource_exists:
if dry_run:
- self.logger.info(f"dry run create for [{resource['name']}]:\n\n{create_query}\n")
+ self.logger.info(f"🚧 dry run create for [{resource['name']}]:\n\n/* insert (create) query */\n{create_query}\n")
else:
- self.logger.info(f"[{resource['name']}] does not exist, creating...")
+ self.logger.info(f"[{resource['name']}] does not exist, creating 🚧...")
msg = run_stackql_command(create_query, self.stackql, self.logger)
self.logger.debug(f"create response: {msg}")
else:
@@ -194,23 +196,25 @@ def run(self, dry_run, on_failure):
self.logger.info(f"state check not configured for [{resource['name']}], state check bypassed...")
is_correct_state = True
elif dry_run:
- self.logger.info(f"dry run state check for [{resource['name']}]:\n\n{postdeploy_query}\n")
+ self.logger.info(f"🔎 dry run state check for [{resource['name']}]:\n\n/* state check query */\n{postdeploy_query}\n")
else:
- self.logger.info(f"running state check for [{resource['name']}]...")
+ self.logger.info(f"🔎 [{resource['name']}] exists, running state check...")
is_correct_state = perform_retries(resource, postdeploy_query, postdeploy_retries, postdeploy_retry_delay, self.stackql, self.logger)
- self.logger.info(f"state check result for [{resource['name']}] : {is_correct_state}")
+ if is_correct_state:
+ self.logger.info(f"[{resource['name']}] is in the desired state 👍")
+ else:
+ self.logger.info(f"[{resource['name']}] exists but is not in the desired state 👎")
if update_query:
if dry_run:
- self.logger.info(f"dry run update for [{resource['name']}]:\n\n{update_query}\n")
+ self.logger.info(f"🔧 dry run update for [{resource['name']}]:\n\n/* update query */\n{update_query}\n")
if not is_correct_state:
- self.logger.info(f"[{resource['name']}] exists and is not in the desired state, updating...")
+ self.logger.info(f"🔧 updating [{resource['name']}]...")
msg = run_stackql_command(update_query, self.stackql, self.logger)
self.logger.debug(f"update response: {msg}")
- else:
- self.logger.info(f"[{resource['name']}] is in the desired state 👍")
else:
- self.logger.info(f"[{resource['name']}] exists, no update query defined however, skipping update...")
+ if not is_correct_state:
+ self.logger.info(f"[{resource['name']}] exists, no update query defined however, skipping update...")
#
# postdeploy check
@@ -219,17 +223,18 @@ def run(self, dry_run, on_failure):
self.logger.info(f"post-deploy check not configured for [{resource['name']}], not waiting...")
else:
if dry_run:
- self.logger.info(f"dry run post-deploy check for [{resource['name']}]:\n\n{postdeploy_query}\n")
+ self.logger.info(f"🔎 dry run post-deploy check for [{resource['name']}]:\n\n/* post-deploy state check */\n{postdeploy_query}\n")
else:
if not is_correct_state:
- self.logger.info(f"running post deploy check for [{resource['name']}], waiting...")
+ self.logger.info(f"🔎 running post deploy check for [{resource['name']}], waiting...")
is_correct_state = perform_retries(resource, postdeploy_query, postdeploy_retries, postdeploy_retry_delay, self.stackql, self.logger)
#
# postdeploy check complete
#
if postdeploy_query and not is_correct_state:
- catch_error_and_exit(f"❌ deployment failed for {resource['name']} after post-deploy checks.", self.logger)
+ if not dry_run:
+ catch_error_and_exit(f"❌ deployment failed for {resource['name']} after post-deploy checks.", self.logger)
#
# exports
@@ -241,9 +246,12 @@ def run(self, dry_run, on_failure):
protected_exports = resource.get('protected', [])
if not dry_run:
- self.logger.info(f"exporting variables for [{resource['name']}]...")
+ self.logger.info(f"📦 exporting variables for [{resource['name']}]...")
exports = run_stackql_query(exports_query, self.stackql, True, self.logger, exports_retries, exports_retry_delay)
self.logger.debug(f"exports: {exports}")
+
+ if exports is None:
+ catch_error_and_exit(f"exports query failed for {resource['name']}", self.logger)
if len(exports) > 1:
catch_error_and_exit(f"exports should include one row only, received {str(len(exports))} rows", self.logger)
@@ -267,7 +275,7 @@ def run(self, dry_run, on_failure):
self._export_vars(resource, export_data, expected_exports, protected_exports)
else:
- self.logger.info(f"dry run exports query for [{resource['name']}]:\n\n{exports_query}\n")
+ self.logger.info(f"📦 dry run exports query for [{resource['name']}]:\n\n/* exports query */\n{exports_query}\n")
if not dry_run:
if type == 'resource':
diff --git a/stackql_deploy/cmd/teardown.py b/stackql_deploy/cmd/teardown.py
index 8184b08..e69be50 100644
--- a/stackql_deploy/cmd/teardown.py
+++ b/stackql_deploy/cmd/teardown.py
@@ -1,5 +1,5 @@
import sys
-from ..lib.utils import perform_retries, run_stackql_command, catch_error_and_exit
+from ..lib.utils import perform_retries, run_stackql_command, catch_error_and_exit, run_stackql_query
from ..lib.config import setup_environment, load_manifest, get_global_context_and_providers, get_full_context
from ..lib.templating import get_queries
@@ -15,6 +15,79 @@ def __init__(self, stackql, vars, logger, stack_dir, stack_env):
self.manifest = load_manifest(self.stack_dir, self.logger)
self.stack_name = self.manifest.get('name', stack_dir)
+ def _export_vars(self, resource, export, expected_exports, protected_exports):
+ for key in expected_exports:
+ if key not in export:
+ catch_error_and_exit(f"exported key '{key}' not found in exports for {resource['name']}.", self.logger)
+
+ for key, value in export.items():
+ if key in protected_exports:
+ mask = '*' * len(str(value))
+ self.logger.info(f"🔒 set protected variable [{key}] to [{mask}] in exports")
+ else:
+ self.logger.info(f"➡️ set [{key}] to [{value}] in exports")
+
+ self.global_context[key] = value # Update global context with exported values
+
+ def collect_exports(self):
+ self.logger.info(f"Collecting exports for [{self.stack_name}] in [{self.stack_env}] environment")
+
+ # # get global context and pull providers
+ # self.global_context, self.providers = get_global_context_and_providers(self.env, self.manifest, self.vars, self.stack_env, self.stack_name, self.stackql, self.logger)
+
+ for resource in self.manifest.get('resources', []):
+ self.logger.info(f"processing resource: {resource['name']}")
+
+ # get full context
+ full_context = get_full_context(self.env, self.global_context, resource, self.logger)
+
+ # get resource queries
+ test_queries, test_query_options = get_queries(self.env, self.stack_dir, 'stackql_queries', resource, full_context, False, self.logger)
+
+ exports_query = None
+
+ if 'exports' in test_queries:
+ exports_query = test_queries['exports']
+ exports_retries = test_query_options.get('exports', {}).get('retries', 10)
+ exports_retry_delay = test_query_options.get('exports', {}).get('retry_delay', 10)
+
+ if exports_query:
+ expected_exports = resource.get('exports', [])
+
+ if len(expected_exports) > 0:
+ protected_exports = resource.get('protected', [])
+
+ self.logger.info(f"📦 exporting variables for [{resource['name']}]...")
+ exports = run_stackql_query(exports_query, self.stackql, True, self.logger, exports_retries, exports_retry_delay)
+ self.logger.debug(f"exports: {exports}")
+
+ if exports is None:
+ catch_error_and_exit(f"exports query failed for {resource['name']}", self.logger)
+
+ if len(exports) > 1:
+ catch_error_and_exit(f"exports should include one row only, received {str(len(exports))} rows", self.logger)
+
+ if len(exports) == 1 and not isinstance(exports[0], dict):
+ catch_error_and_exit(f"exports must be a dictionary, received {str(exports[0])}", self.logger)
+
+ if len(exports) == 0:
+ export_data = {key: '' for key in expected_exports}
+
+ if len(exports) == 1 and isinstance(exports[0], dict):
+ # exports is a list with one dictionary
+ export = exports[0]
+ export_data = {}
+ for key in expected_exports:
+ # Check if the key's value is a simple string or needs special handling
+ if isinstance(export.get(key), dict) and 'String' in export[key]:
+ # Assume complex object that needs extraction from 'String'
+ export_data[key] = export[key]['String']
+ else:
+ # Treat as a simple key-value pair
+ export_data[key] = export.get(key, '') # Default to empty string if key is missing
+
+ self._export_vars(resource, export_data, expected_exports, protected_exports)
+
def run(self, dry_run, on_failure):
self.logger.info(f"Tearing down [{self.stack_name}] in [{self.stack_env}] environment {'(dry run)' if dry_run else ''}")
@@ -22,6 +95,9 @@ def run(self, dry_run, on_failure):
# get global context and pull providers
self.global_context, self.providers = get_global_context_and_providers(self.env, self.manifest, self.vars, self.stack_env, self.stack_name, self.stackql, self.logger)
+ # Collect all exports
+ self.collect_exports()
+
for resource in reversed(self.manifest['resources']):
# process resources in reverse order
diff --git a/stackql_deploy/cmd/test.py b/stackql_deploy/cmd/test.py
index 9205308..c4c80ca 100644
--- a/stackql_deploy/cmd/test.py
+++ b/stackql_deploy/cmd/test.py
@@ -1,5 +1,5 @@
import sys
-from ..lib.utils import run_test, catch_error_and_exit, run_stackql_query
+from ..lib.utils import run_test, perform_retries, catch_error_and_exit, run_stackql_query
from ..lib.config import setup_environment, load_manifest, get_global_context_and_providers, get_full_context
from ..lib.templating import get_queries
@@ -49,6 +49,8 @@ def run(self, dry_run, on_failure):
if 'postdeploy' in test_queries:
postdeploy_query = test_queries['postdeploy']
+ postdeploy_retries = test_query_options.get('postdeploy', {}).get('retries', 10)
+ postdeploy_retry_delay = test_query_options.get('postdeploy', {}).get('retry_delay', 10)
if 'exports' in test_queries:
# export variables from resource
@@ -59,24 +61,25 @@ def run(self, dry_run, on_failure):
#
# postdeploy check
#
- post_deploy_check_passed = False
+ is_correct_state = False
if not postdeploy_query:
- post_deploy_check_passed = True
+ is_correct_state = True
if resource.get('type') and resource['type'] == 'query':
self.logger.debug(f"❓ test not configured for [{resource['name']}]")
else:
self.logger.info(f"❓ test not configured for [{resource['name']}]")
elif dry_run:
- post_deploy_check_passed = True
+ is_correct_state = True
self.logger.info(f"test query for [{resource['name']}]:\n\n{postdeploy_query}\n")
else:
- post_deploy_check_passed = run_test(resource, postdeploy_query, self.stackql, self.logger)
+ self.logger.info(f"🔎 checking state for [{resource['name']}]...")
+ is_correct_state = perform_retries(resource, postdeploy_query, postdeploy_retries, postdeploy_retry_delay, self.stackql, self.logger)
#
# postdeploy check complete
#
- if not post_deploy_check_passed:
+ if not is_correct_state:
catch_error_and_exit(f"❌ test failed for {resource['name']}.", self.logger)
#
@@ -89,10 +92,13 @@ def run(self, dry_run, on_failure):
protected_exports = resource.get('protected', [])
if not dry_run:
- self.logger.info(f"exporting variables for [{resource['name']}]...")
+ self.logger.info(f"📦 exporting variables for [{resource['name']}]...")
exports = run_stackql_query(exports_query, self.stackql, True, self.logger, exports_retries, exports_retry_delay)
self.logger.debug(f"exports: {exports}")
+ if exports is None:
+ catch_error_and_exit(f"exports query failed for {resource['name']}", self.logger)
+
if len(exports) > 1:
catch_error_and_exit(f"exports should include one row only, received {str(len(exports))} rows", self.logger)
diff --git a/stackql_deploy/lib/config.py b/stackql_deploy/lib/config.py
index 8fb76ec..733f525 100644
--- a/stackql_deploy/lib/config.py
+++ b/stackql_deploy/lib/config.py
@@ -1,38 +1,49 @@
-import os, yaml, json
+import os, yaml, json, base64
from .utils import pull_providers, catch_error_and_exit
from jinja2 import Environment, FileSystemLoader, select_autoescape
from jinja2.utils import markupsafe
from jinja2 import TemplateError
+# jinja filters
+
def from_json(value):
return json.loads(value)
+# def to_json_string(value):
+# return json.dumps(json.loads(value))
+
+# def remove_single_quotes(value):
+# return str(value).replace("'", "")
+
+def base64_encode(value):
+ return base64.b64encode(value.encode()).decode()
+
+def merge_lists(tags1, tags2):
+ combined_tags = tags1 + tags2
+ combined_tags_json = json.dumps(combined_tags)
+ return combined_tags_json
+
+# END jinja filters
+
def render_globals(env, vars, global_vars, stack_env, stack_name):
- # Establish the context with stack environment and stack name, and other vars if needed
global_context = {'stack_env': stack_env, 'stack_name': stack_name}
global_context.update(vars)
def render_value(value, context):
- """Handles recursive rendering of values that might be strings, lists, or dictionaries."""
if isinstance(value, str):
try:
- # Render the string using Jinja2 with the current context to resolve any templates
template = env.from_string(value)
- return template.render(**context) # Use **context to spread the context dictionary
+ return template.render(**context)
except TemplateError as e:
print(f"Error rendering template: {e}")
return value
elif isinstance(value, dict):
- # Recursively process and serialize each dictionary after processing
- processed_dict = {k: render_value(v, context) for k, v in value.items()}
- return json.dumps(processed_dict, ensure_ascii=False).replace('True', 'true').replace('False', 'false')
+ return {k: render_value(v, context) for k, v in value.items()}
elif isinstance(value, list):
- # First resolve templates in list items, then serialize the list as a whole
return [render_value(item, context) for item in value]
else:
return value
- # Update the global context with the rendered results
for global_var in global_vars:
global_context[global_var['name']] = render_value(global_var['value'], global_context)
@@ -41,21 +52,22 @@ def render_value(value, context):
def render_properties(env, resource_props, global_context, logger):
def render_value(value, context):
- """Handles recursive rendering of values that might be strings, lists, or dictionaries."""
if isinstance(value, str):
try:
template = env.from_string(value)
- rendered = template.render(context)
- return rendered.replace('True', 'true').replace('False', 'false')
+ # rendered = template.render(context)
+ rendered = template.render(**context)
+ # deal with boolean values
+ if rendered in ['True', 'False']:
+ return rendered.replace('True', 'true').replace('False', 'false')
+ return rendered
except TemplateError as e:
print(f"Error rendering template: {e}")
return value
elif isinstance(value, dict):
- rendered_dict = {k: render_value(v, context) for k, v in value.items()}
- return rendered_dict
+ return {k: render_value(v, context) for k, v in value.items()}
elif isinstance(value, list):
- processed_list = [render_value(item, context) for item in value]
- return processed_list
+ return [render_value(item, context) for item in value]
else:
return value
@@ -73,11 +85,6 @@ def render_value(value, context):
except Exception as e:
catch_error_and_exit(f"Failed to render property '{prop['name']}']: {e}", logger)
- # Serialize lists and dictionaries to JSON strings
- for key, value in prop_context.items():
- if isinstance(value, (list, dict)):
- prop_context[key] = json.dumps(value).replace('True', 'true').replace('False', 'false')
-
return prop_context
#
@@ -100,6 +107,10 @@ def setup_environment(stack_dir, logger):
autoescape=False
)
env.filters['from_json'] = from_json
+ # env.filters['to_json_string'] = to_json_string
+ # env.filters['remove_single_quotes'] = remove_single_quotes
+ env.filters['merge_lists'] = merge_lists
+ env.filters['base64_encode'] = base64_encode
return env
def get_global_context_and_providers(env, manifest, vars, stack_env, stack_name, stackql, logger):
diff --git a/stackql_deploy/lib/utils.py b/stackql_deploy/lib/utils.py
index 38efa28..25365be 100644
--- a/stackql_deploy/lib/utils.py
+++ b/stackql_deploy/lib/utils.py
@@ -9,72 +9,83 @@ def run_stackql_query(query, stackql, suppress_errors, logger, retries=0, delay=
attempt = 0
while attempt <= retries:
try:
- logger.debug(f"Executing stackql query on attempt {attempt + 1}: {query}")
+ logger.debug(f"executing stackql query on attempt {attempt + 1}:\n\n{query}\n")
result = stackql.execute(query, suppress_errors)
- logger.debug(f"StackQL query result: {result}, type: {type(result)}")
+ logger.debug(f"stackql query result (type:{type(result)}): {result}")
# Check if result is a list (expected outcome)
if isinstance(result, list):
- if not suppress_errors and result and 'error' in result[0]:
+ if len(result) == 0:
+ logger.debug("stackql query executed successfully, retrieved 0 items.")
+ pass
+ elif not suppress_errors and result and 'error' in result[0]:
error_message = result[0]['error']
if attempt == retries:
# If retries are exhausted, log the error and exit
- catch_error_and_exit(f"Error occurred during stackql query execution: {error_message}", logger)
+ catch_error_and_exit(f"error occurred during stackql query execution:\n\n{error_message}\n", logger)
else:
# Log the error and prepare for another attempt
- logger.error(f"Attempt {attempt + 1} failed: {error_message}")
+ logger.error(f"attempt {attempt + 1} failed:\n\n{error_message}\n")
+ elif 'count' in result[0]:
+ # If the result is a count query, return the count
+ logger.debug(f"stackql query executed successfully, retrieved count: {result[0]['count']}.")
+ if int(result[0]['count']) > 1:
+ catch_error_and_exit(f"detected more than one resource matching the query criteria, expected 0 or 1, got {result[0]['count']}\n", logger)
+ return result
else:
# If no errors or errors are suppressed, return the result
- logger.debug(f"StackQL query executed successfully, retrieved {len(result)} items.")
+ logger.debug(f"stackql query executed successfully, retrieved {len(result)} items.")
return result
-
else:
# Handle unexpected result format
if attempt == retries:
- catch_error_and_exit("Unexpected result format received from stackql query execution.", logger)
+ catch_error_and_exit("unexpected result format received from stackql query execution.", logger)
else:
- logger.error("Unexpected result format, retrying...")
+ logger.error("unexpected result format, retrying...")
except Exception as e:
# Log the exception and check if retry attempts are exhausted
if attempt == retries:
- catch_error_and_exit(f"An exception occurred during stackql query execution: {str(e)}", logger)
+ catch_error_and_exit(f"an exception occurred during stackql query execution:\n\n{str(e)}\n", logger)
else:
- logger.error(f"Exception on attempt {attempt + 1}: {str(e)}")
+ logger.error(f"exception on attempt {attempt + 1}:\n\n{str(e)}\n")
# Delay before next attempt
time.sleep(delay)
attempt += 1
# If all attempts fail and no result is returned, log the final failure
- logger.error(f"All attempts ({retries + 1}) to execute the query failed.")
+ logger.error(f"all attempts ({retries + 1}) to execute the query failed.")
return None
def run_stackql_command(command, stackql, logger):
try:
- logger.debug(f"executing stackql command: {command}")
+ logger.debug(f"executing stackql command:\n\n{command}\n")
result = stackql.executeStmt(command)
- logger.debug(f"stackql command result: {result}, type: {type(result)}")
+ logger.debug(f"stackql command result:\n\n{result}, type: {type(result)}\n")
if isinstance(result, dict):
# If the result contains a message, it means the execution was successful
if 'message' in result:
- logger.debug(f"stackql command executed successfully: {result['message']}")
+ # aws cloud control hack...
+ if result['message'].startswith('http response status code: 4') or result['message'].startswith('http response status code: 5'):
+ catch_error_and_exit(f"error occurred during stackql command execution:\n\n{result['message']}\n", logger)
+ logger.debug(f"stackql command executed successfully:\n\n{result['message']}\n")
return result['message'].rstrip()
elif 'error' in result:
# Check if the result contains an error message
error_message = result['error'].rstrip()
- catch_error_and_exit(f"error occurred during stackql command execution: {error_message}", logger)
+ catch_error_and_exit(f"error occurred during stackql command execution:\n\n{error_message}\n", logger)
# If there's no 'error' or 'message', it's an unexpected result format
catch_error_and_exit("unexpected result format received from stackql execution.", logger)
except Exception as e:
# Log the exception exit
- catch_error_and_exit(f"an exception occurred during stackql command execution: {str(e)}", logger)
+ catch_error_and_exit(f"an exception occurred during stackql command execution:\n\n{str(e)}\n", logger)
def pull_providers(providers, stackql, logger):
- logger.debug(f"stackql run time info: {json.dumps(stackql.properties(), indent=2)}")
+ logger.debug(f"stackql run time info:\n\n{json.dumps(stackql.properties(), indent=2)}\n")
installed_providers = run_stackql_query("SHOW PROVIDERS", stackql, False, logger) # not expecting an error here
if len(installed_providers) == 0:
installed_names = set()
@@ -92,7 +103,7 @@ def pull_providers(providers, stackql, logger):
def run_test(resource, rendered_test_iql, stackql, logger, delete_test=False):
try:
test_result = run_stackql_query(rendered_test_iql, stackql, True, logger)
- logger.debug(f"test query result for [{resource['name']}]: {test_result}")
+ logger.debug(f"test query result for [{resource['name']}]:\n\n{test_result}\n")
if test_result == []:
if delete_test:
@@ -103,7 +114,7 @@ def run_test(resource, rendered_test_iql, stackql, logger, delete_test=False):
return False
if not test_result or 'count' not in test_result[0]:
- catch_error_and_exit(f"test data structure unexpected for [{resource['name']}]: {test_result}", logger)
+ catch_error_and_exit(f"data structure unexpected for [{resource['name']}] test:\n\n{test_result}\n", logger)
count = int(test_result[0]['count'])
if delete_test:
@@ -123,7 +134,7 @@ def run_test(resource, rendered_test_iql, stackql, logger, delete_test=False):
return False
except Exception as e:
- catch_error_and_exit(f"an exception occurred during testing for [{resource['name']}]: {str(e)}", logger)
+ catch_error_and_exit(f"an exception occurred during testing for [{resource['name']}]:\n\n{str(e)}\n", logger)
def perform_retries(resource, query, retries, delay, stackql, logger, delete_test=False):
attempt = 0
@@ -133,9 +144,8 @@ def perform_retries(resource, query, retries, delay, stackql, logger, delete_tes
if result:
return True
elapsed = time.time() - start_time # Calculate elapsed time
- logger.info(f"attempt {attempt + 1}/{retries}: retrying in {delay} seconds ({int(elapsed)} seconds elapsed).")
+ logger.info(f"🕒 attempt {attempt + 1}/{retries}: retrying in {delay} seconds ({int(elapsed)} seconds elapsed).")
time.sleep(delay)
attempt += 1
elapsed = time.time() - start_time # Calculate total elapsed time
- logger.error(f"failed after {retries} retries in {int(elapsed)} seconds.")
return False
diff --git a/stackql_deploy/templates/aws/README.md.template b/stackql_deploy/templates/aws/README.md.template
new file mode 100644
index 0000000..296834c
--- /dev/null
+++ b/stackql_deploy/templates/aws/README.md.template
@@ -0,0 +1,11 @@
+# `stackql-deploy` starter project for `aws`
+
+> for starter projects using other providers, try `stackql-deploy my_stack --provider=azure` or `stackql-deploy my_stack --provider=google`
+
+[`aws` provider docs](https://stackql.io/registry/aws)
+
+[`stackql`](https://github.com/stackql/stackql)
+
+[`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/)
+
+[`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy)
\ No newline at end of file
diff --git a/stackql_deploy/templates/aws/external_scripts/README.md.template b/stackql_deploy/templates/aws/external_scripts/README.md.template
new file mode 100644
index 0000000..79c0788
--- /dev/null
+++ b/stackql_deploy/templates/aws/external_scripts/README.md.template
@@ -0,0 +1 @@
+# external scripts for `aws` `stackql-deploy` starter project
\ No newline at end of file
diff --git a/stackql_deploy/templates/aws/stackql_docs/example_vpc.md.template b/stackql_deploy/templates/aws/stackql_docs/example_vpc.md.template
new file mode 100644
index 0000000..28509ef
--- /dev/null
+++ b/stackql_deploy/templates/aws/stackql_docs/example_vpc.md.template
@@ -0,0 +1,3 @@
+# `example_vpc`
+
+document your `example_vpc` AWS VPC resource here, this is optional
diff --git a/stackql_deploy/templates/aws/stackql_manifest.yml.template b/stackql_deploy/templates/aws/stackql_manifest.yml.template
new file mode 100644
index 0000000..45adc19
--- /dev/null
+++ b/stackql_deploy/templates/aws/stackql_manifest.yml.template
@@ -0,0 +1,23 @@
+#
+# sample manifest file, add and update values as needed
+#
+version: 1
+name: "{{ stack_name }}"
+description: description for "{{ stack_name }}"
+providers:
+ - azure
+globals:
+ - name: subscription_id
+ description: azure subscription id
+ value: "{% raw %}{{ AZURE_SUBSCRIPTION_ID }}{% endraw %}"
+ - name: location
+ value: eastus
+ - name: resource_group_name_base
+ value: "stackql-example-rg"
+resources:
+ - name: stackql_example_rg
+ description: example azure resource group
+ props:
+ - name: resource_group_name
+ description: azure resource group name
+ value: "{% raw %}{{ globals.resource_group_name_base }}-{{ globals.stack_env }}{% endraw %}"
diff --git a/stackql_deploy/templates/stackql_queries/stackql_example_rg.iql.template b/stackql_deploy/templates/aws/stackql_queries/example_vpc.iql.template
similarity index 100%
rename from stackql_deploy/templates/stackql_queries/stackql_example_rg.iql.template
rename to stackql_deploy/templates/aws/stackql_queries/example_vpc.iql.template
diff --git a/stackql_deploy/templates/stackql_resources/stackql_example_rg.iql.template b/stackql_deploy/templates/aws/stackql_resources/example_vpc.iql.template
similarity index 100%
rename from stackql_deploy/templates/stackql_resources/stackql_example_rg.iql.template
rename to stackql_deploy/templates/aws/stackql_resources/example_vpc.iql.template
diff --git a/stackql_deploy/templates/azure/README.md.template b/stackql_deploy/templates/azure/README.md.template
new file mode 100644
index 0000000..b0cadda
--- /dev/null
+++ b/stackql_deploy/templates/azure/README.md.template
@@ -0,0 +1,9 @@
+# `stackql-deploy` starter project for `azure`
+
+[`azure` provider docs](https://stackql.io/registry/azure)
+
+[`stackql`](https://github.com/stackql/stackql)
+
+[`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/)
+
+[`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy)
\ No newline at end of file
diff --git a/stackql_deploy/templates/azure/external_scripts/README.md.template b/stackql_deploy/templates/azure/external_scripts/README.md.template
new file mode 100644
index 0000000..a6c4e09
--- /dev/null
+++ b/stackql_deploy/templates/azure/external_scripts/README.md.template
@@ -0,0 +1 @@
+# external scripts for `azure` `stackql-deploy` starter project
\ No newline at end of file
diff --git a/stackql_deploy/templates/azure/stackql_docs/example_res_grp.md.template b/stackql_deploy/templates/azure/stackql_docs/example_res_grp.md.template
new file mode 100644
index 0000000..ff77835
--- /dev/null
+++ b/stackql_deploy/templates/azure/stackql_docs/example_res_grp.md.template
@@ -0,0 +1,3 @@
+# `example_res_grp`
+
+document your `example_res_grp` Azure Resource Group here, this is optional
\ No newline at end of file
diff --git a/stackql_deploy/templates/azure/stackql_manifest.yml.template b/stackql_deploy/templates/azure/stackql_manifest.yml.template
new file mode 100644
index 0000000..45adc19
--- /dev/null
+++ b/stackql_deploy/templates/azure/stackql_manifest.yml.template
@@ -0,0 +1,23 @@
+#
+# sample manifest file, add and update values as needed
+#
+version: 1
+name: "{{ stack_name }}"
+description: description for "{{ stack_name }}"
+providers:
+ - azure
+globals:
+ - name: subscription_id
+ description: azure subscription id
+ value: "{% raw %}{{ AZURE_SUBSCRIPTION_ID }}{% endraw %}"
+ - name: location
+ value: eastus
+ - name: resource_group_name_base
+ value: "stackql-example-rg"
+resources:
+ - name: stackql_example_rg
+ description: example azure resource group
+ props:
+ - name: resource_group_name
+ description: azure resource group name
+ value: "{% raw %}{{ globals.resource_group_name_base }}-{{ globals.stack_env }}{% endraw %}"
diff --git a/stackql_deploy/templates/azure/stackql_queries/example_res_grp.iql.template b/stackql_deploy/templates/azure/stackql_queries/example_res_grp.iql.template
new file mode 100644
index 0000000..037a948
--- /dev/null
+++ b/stackql_deploy/templates/azure/stackql_queries/example_res_grp.iql.template
@@ -0,0 +1,15 @@
+/* defines the the pre-flight and post-deploy
+tests to check the state of a resource
+replace queries with your queries */
+
+/*+ preflight */
+SELECT COUNT(*) as count FROM azure.resources.resource_groups
+WHERE subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+AND resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM azure.resources.resource_groups
+WHERE subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+AND resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+AND location = '{% raw %}{{ location }}{% endraw %}'
+AND JSON_EXTRACT(properties, '$.provisioningState') = 'Succeeded'
diff --git a/stackql_deploy/templates/azure/stackql_resources/example_res_grp.iql.template b/stackql_deploy/templates/azure/stackql_resources/example_res_grp.iql.template
new file mode 100644
index 0000000..60d4d5d
--- /dev/null
+++ b/stackql_deploy/templates/azure/stackql_resources/example_res_grp.iql.template
@@ -0,0 +1,24 @@
+/* defines the provisioning and deprovisioning commands
+used to create, update or delete the resource
+replace queries with your queries */
+
+/*+ create */
+INSERT INTO azure.resources.resource_groups(
+ resourceGroupName,
+ subscriptionId,
+ data__location
+)
+SELECT
+ '{% raw %}{{ resource_group_name }}{% endraw %}',
+ '{% raw %}{{ subscription_id }}{% endraw %}',
+ '{% raw %}{{ location }}{% endraw %}'
+
+/*+ update */
+UPDATE azure.resources.resource_groups
+SET data__location = '{% raw %}{{ location }}{% endraw %}'
+WHERE resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+ AND subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+
+/*+ delete */
+DELETE FROM azure.resources.resource_groups
+WHERE resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}' AND subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
diff --git a/stackql_deploy/templates/google/README.md.template b/stackql_deploy/templates/google/README.md.template
new file mode 100644
index 0000000..4127303
--- /dev/null
+++ b/stackql_deploy/templates/google/README.md.template
@@ -0,0 +1,9 @@
+# `stackql-deploy` starter project for `google`
+
+[`google` provider docs](https://stackql.io/registry/google)
+
+[`stackql`](https://github.com/stackql/stackql)
+
+[`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/)
+
+[`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy)
\ No newline at end of file
diff --git a/stackql_deploy/templates/google/external_scripts/README.md.template b/stackql_deploy/templates/google/external_scripts/README.md.template
new file mode 100644
index 0000000..e93a52d
--- /dev/null
+++ b/stackql_deploy/templates/google/external_scripts/README.md.template
@@ -0,0 +1 @@
+# external scripts for `google` `stackql-deploy` starter project
\ No newline at end of file
diff --git a/stackql_deploy/templates/google/stackql_docs/example_project.md.template b/stackql_deploy/templates/google/stackql_docs/example_project.md.template
new file mode 100644
index 0000000..7244eb9
--- /dev/null
+++ b/stackql_deploy/templates/google/stackql_docs/example_project.md.template
@@ -0,0 +1,3 @@
+# `example_project`
+
+document your `example_project` GCP Project resource here, this is optional
diff --git a/stackql_deploy/templates/stackql_manifest.yml.template b/stackql_deploy/templates/google/stackql_manifest.yml.template
similarity index 100%
rename from stackql_deploy/templates/stackql_manifest.yml.template
rename to stackql_deploy/templates/google/stackql_manifest.yml.template
diff --git a/stackql_deploy/templates/google/stackql_queries/example_project.iql.template b/stackql_deploy/templates/google/stackql_queries/example_project.iql.template
new file mode 100644
index 0000000..037a948
--- /dev/null
+++ b/stackql_deploy/templates/google/stackql_queries/example_project.iql.template
@@ -0,0 +1,15 @@
+/* defines the the pre-flight and post-deploy
+tests to check the state of a resource
+replace queries with your queries */
+
+/*+ preflight */
+SELECT COUNT(*) as count FROM azure.resources.resource_groups
+WHERE subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+AND resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+
+/*+ postdeploy, retries=5, retry_delay=5 */
+SELECT COUNT(*) as count FROM azure.resources.resource_groups
+WHERE subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+AND resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+AND location = '{% raw %}{{ location }}{% endraw %}'
+AND JSON_EXTRACT(properties, '$.provisioningState') = 'Succeeded'
diff --git a/stackql_deploy/templates/google/stackql_resources/example_project.iql.template b/stackql_deploy/templates/google/stackql_resources/example_project.iql.template
new file mode 100644
index 0000000..60d4d5d
--- /dev/null
+++ b/stackql_deploy/templates/google/stackql_resources/example_project.iql.template
@@ -0,0 +1,24 @@
+/* defines the provisioning and deprovisioning commands
+used to create, update or delete the resource
+replace queries with your queries */
+
+/*+ create */
+INSERT INTO azure.resources.resource_groups(
+ resourceGroupName,
+ subscriptionId,
+ data__location
+)
+SELECT
+ '{% raw %}{{ resource_group_name }}{% endraw %}',
+ '{% raw %}{{ subscription_id }}{% endraw %}',
+ '{% raw %}{{ location }}{% endraw %}'
+
+/*+ update */
+UPDATE azure.resources.resource_groups
+SET data__location = '{% raw %}{{ location }}{% endraw %}'
+WHERE resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}'
+ AND subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
+
+/*+ delete */
+DELETE FROM azure.resources.resource_groups
+WHERE resourceGroupName = '{% raw %}{{ resource_group_name }}{% endraw %}' AND subscriptionId = '{% raw %}{{ subscription_id }}{% endraw %}'
diff --git a/stackql_deploy/templates/stackql_docs/stackql_example_rg.md.template b/stackql_deploy/templates/stackql_docs/stackql_example_rg.md.template
deleted file mode 100644
index ce35fdd..0000000
--- a/stackql_deploy/templates/stackql_docs/stackql_example_rg.md.template
+++ /dev/null
@@ -1,2 +0,0 @@
-# stackql_example_rg
-documentation for `stackql_example_rg`