From ec347029ef356890223880287e04cdd39c3da3d9 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Tue, 17 Mar 2026 04:37:19 +1100 Subject: [PATCH 1/8] stop tracking .claude dir --- .claude/settings.local.json | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 .claude/settings.local.json diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index 6fadd2b..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(cargo build:*)", - "Bash(cargo check:*)", - "Bash(where clang:*)", - "Read(//c/Program Files/LLVM/bin/**)", - "Bash(echo \"LIBCLANG_PATH=$LIBCLANG_PATH\")", - "Read(//c/Program Files/**)", - "Bash(winget list:*)", - "Bash(cargo tree:*)", - "Bash(where llvm-config:*)", - "Read(//c/msys64/mingw64/bin/**)", - "Read(//c/msys64/clang64/bin/**)", - "Bash(cargo doc:*)", - "Bash(grep -r impl.*Read.*for.*Response c:/LocalGitRepos/stackql/stackql-deploy-rs/target/doc/reqwest/blocking/struct.Response.html)", - "Bash(grep -r \"multiarch\\\\|darwin\\\\|amd64\\\\|architecture\" --include=*.toml --include=*.rs --include=*.md)", - "WebFetch(domain:releases.stackql.io)", - "Bash(curl -sI -L \"https://storage.googleapis.com/stackql-public-releases/latest/stackql_darwin_multiarch.pkg\")", - "Bash(curl -sI -L \"https://releases.stackql.io/stackql/latest/stackql_linux_arm64.zip\")", - "Bash(curl -sI -L \"https://releases.stackql.io/stackql/latest/stackql_darwin_multiarch.pkg\")", - "Bash(python -c \"import yaml; yaml.safe_load\\(open\\(''.github/workflows/ci.yml''\\)\\); print\\(''ci.yml OK''\\)\")", - "Bash(python -c \"import yaml; yaml.safe_load\\(open\\(''.github/workflows/release.yml''\\)\\); print\\(''release.yml OK''\\)\")", - "Bash(cargo fmt:*)", - "Bash(rustfmt --check src/utils/download.rs src/utils/server.rs)" - ] - } -} From ffbf5c5c5519977063d3548e6d2dc91a9b9f0475 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Tue, 17 Mar 2026 08:11:51 +1100 Subject: [PATCH 2/8] identifier capture pattern --- .gitignore | 1 + examples/aws/aws-stack/README.md | 75 ----- .../resources/example_inet_gateway.iql | 52 ---- .../resources/example_inet_gw_attachment.iql | 39 --- .../resources/example_inet_route.iql | 41 --- .../resources/example_route_table.iql | 57 ---- .../resources/example_security_group.iql | 72 ----- .../aws-stack/resources/example_subnet.iql | 66 ---- .../resources/example_subnet_rt_assn.iql | 42 --- .../aws/aws-stack/resources/example_vpc.iql | 63 ---- .../resources/example_web_server.iql | 71 ----- .../resources/get_web_server_url.iql | 2 - .../resources/example_inet_gateway.iql | 36 +++ .../resources/example_inet_gw_attachment.iql | 21 ++ .../resources/example_inet_route.iql | 23 ++ .../resources/example_route_table.iql | 39 +++ .../resources/example_security_group.iql | 48 +++ .../resources/example_subnet.iql | 43 +++ .../resources/example_subnet_rt_assn.iql | 16 + .../resources/example_vpc.iql | 44 +++ .../resources/example_web_server.iql | 48 +++ .../resources/get_web_server_url.iql | 6 + .../stackql_manifest.yml | 291 +++++++++--------- src/commands/base.rs | 29 +- src/commands/build.rs | 103 ++++++- src/commands/teardown.rs | 44 ++- src/core/utils.rs | 73 ++++- website/docs/resource-query-files.md | 107 ++++++- 28 files changed, 770 insertions(+), 782 deletions(-) delete mode 100644 examples/aws/aws-stack/README.md delete mode 100644 examples/aws/aws-stack/resources/example_inet_gateway.iql delete mode 100644 examples/aws/aws-stack/resources/example_inet_gw_attachment.iql delete mode 100644 examples/aws/aws-stack/resources/example_inet_route.iql delete mode 100644 examples/aws/aws-stack/resources/example_route_table.iql delete mode 100644 examples/aws/aws-stack/resources/example_security_group.iql delete mode 100644 examples/aws/aws-stack/resources/example_subnet.iql delete mode 100644 examples/aws/aws-stack/resources/example_subnet_rt_assn.iql delete mode 100644 examples/aws/aws-stack/resources/example_vpc.iql delete mode 100644 examples/aws/aws-stack/resources/example_web_server.iql delete mode 100644 examples/aws/aws-stack/resources/get_web_server_url.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_inet_route.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_route_table.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_security_group.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_subnet.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_vpc.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/example_web_server.iql create mode 100644 examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql rename examples/aws/{aws-stack => aws-vpc-webserver}/stackql_manifest.yml (88%) diff --git a/.gitignore b/.gitignore index d5e5313..745a451 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ stackql.log .env nohup.out contributors.csv +.claude/ \ No newline at end of file diff --git a/examples/aws/aws-stack/README.md b/examples/aws/aws-stack/README.md deleted file mode 100644 index f05f129..0000000 --- a/examples/aws/aws-stack/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# `stackql-deploy` starter project for `aws` - -> for starter projects using other providers, try `stackql-deploy my_stack --provider=azure` or `stackql-deploy my_stack --provider=google` - -see the following links for more information on `stackql`, `stackql-deploy` and the `aws` provider: - -- [`aws` provider docs](https://stackql.io/registry/aws) -- [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) -- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) - -## Overview - -__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `aws` and `azure` for example. - -## Prerequisites - -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `aws` provider, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and optionally `AWS_SESSION_TOKEN` must be set, for more information on authentication to `aws` see the [`aws` provider documentation](https://aws.stackql.io/providers/aws). - -> __Note for macOS users__ -> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: -> ```bash -> python3 -m venv myenv -> source myenv/bin/activate -> pip install stackql-deploy -> ``` - -## Usage - -Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` and `resources` folders. - -The syntax for the `stackql-deploy` command is as follows: - -```bash -stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] -``` - -### Deploying a stack - -For example, to deploy the stack to an environment labeled `sit`, run the following: - -```bash -stackql-deploy build \ -examples/aws/aws-stack sit \ --e AWS_REGION=ap-southeast-2 -``` - -Use the `--dry-run` flag to view the queries to be run without actually running them, for example: - -```bash -stackql-deploy build \ -examples/aws/aws-stack sit \ --e AWS_REGION=ap-southeast-2 \ ---dry-run -``` - -### Testing a stack - -To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): - -```bash -stackql-deploy test \ -examples/aws/aws-stack sit \ --e AWS_REGION=ap-southeast-2 -``` - -### Tearing down a stack - -To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: - -```bash -stackql-deploy teardown \ -examples/aws/aws-stack sit \ --e AWS_REGION=ap-southeast-2 -``` \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_inet_gateway.iql b/examples/aws/aws-stack/resources/example_inet_gateway.iql deleted file mode 100644 index 5bd42b2..0000000 --- a/examples/aws/aws-stack/resources/example_inet_gateway.iql +++ /dev/null @@ -1,52 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.internet_gateways ( - Tags, - region -) -SELECT -'{{ inet_gateway_tags }}', -'{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ exports */ -SELECT internet_gateway_id FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.internet_gateways -WHERE Identifier = '{{ internet_gateway_id }}' -AND region = '{{ region }}'; diff --git a/examples/aws/aws-stack/resources/example_inet_gw_attachment.iql b/examples/aws/aws-stack/resources/example_inet_gw_attachment.iql deleted file mode 100644 index b6e1def..0000000 --- a/examples/aws/aws-stack/resources/example_inet_gw_attachment.iql +++ /dev/null @@ -1,39 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT -attachment_type, -vpc_id -FROM aws.ec2.vpc_gateway_attachments -WHERE region = '{{ region }}' -AND internet_gateway_id = '{{ internet_gateway_id }}' -AND vpc_id = '{{ vpc_id }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.vpc_gateway_attachments ( - InternetGatewayId, - VpcId, - region -) -SELECT - '{{ internet_gateway_id }}', - '{{ vpc_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT -attachment_type, -vpc_id -FROM aws.ec2.vpc_gateway_attachments -WHERE region = '{{ region }}' -AND internet_gateway_id = '{{ internet_gateway_id }}' -AND vpc_id = '{{ vpc_id }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.vpc_gateway_attachments -WHERE Identifier = 'IGW|{{ vpc_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_inet_route.iql b/examples/aws/aws-stack/resources/example_inet_route.iql deleted file mode 100644 index d9454fb..0000000 --- a/examples/aws/aws-stack/resources/example_inet_route.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t; - -/*+ create */ -INSERT INTO aws.ec2.routes ( - DestinationCidrBlock, - GatewayId, - RouteTableId, - region -) -SELECT - '0.0.0.0/0', - '{{ internet_gateway_id }}', - '{{ route_table_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t; - -/*+ exports */ -SELECT Identifier as inet_route_indentifer -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND Identifier = '{{ route_table_id }}|0.0.0.0/0'; - -/*+ delete */ -DELETE FROM aws.ec2.routes -WHERE Identifier = '{{ inet_route_indentifer }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_route_table.iql b/examples/aws/aws-stack/resources/example_route_table.iql deleted file mode 100644 index 4333d18..0000000 --- a/examples/aws/aws-stack/resources/example_route_table.iql +++ /dev/null @@ -1,57 +0,0 @@ -/*+ exists */ -SELECT count(*) as count FROM -( -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.route_tables ( - Tags, - VpcId, - region -) -SELECT - '{{ route_table_tags }}', - '{{ vpc_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT count(*) as count FROM -( -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ exports */ -SELECT route_table_id FROM -( -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.route_tables -WHERE Identifier = '{{ route_table_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_security_group.iql b/examples/aws/aws-stack/resources/example_security_group.iql deleted file mode 100644 index 0f57ce9..0000000 --- a/examples/aws/aws-stack/resources/example_security_group.iql +++ /dev/null @@ -1,72 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT group_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.security_group_tags -WHERE region = '{{ region }}' -AND group_name = '{{ group_name }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY group_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.security_groups ( - GroupName, - GroupDescription, - VpcId, - SecurityGroupIngress, - SecurityGroupEgress, - Tags, - region -) -SELECT - '{{ group_name }}', - '{{ group_description }}', - '{{ vpc_id }}', - '{{ security_group_ingress }}', - '{{ security_group_egress }}', - '{{ sg_tags }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT group_id, -security_group_ingress, -security_group_egress, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.security_group_tags -WHERE region = '{{ region }}' -AND group_name = '{{ group_name }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY group_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ exports */ -SELECT group_id as 'security_group_id' FROM -( -SELECT group_id, -security_group_ingress, -security_group_egress, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.security_group_tags -WHERE region = '{{ region }}' -AND group_name = '{{ group_name }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY group_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.security_groups -WHERE Identifier = '{{ security_group_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_subnet.iql b/examples/aws/aws-stack/resources/example_subnet.iql deleted file mode 100644 index d03699e..0000000 --- a/examples/aws/aws-stack/resources/example_subnet.iql +++ /dev/null @@ -1,66 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT subnet_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.subnets ( - VpcId, - CidrBlock, - MapPublicIpOnLaunch, - Tags, - region -) -SELECT - '{{ vpc_id }}', - '{{ subnet_cidr_block }}', - true, - '{{ subnet_tags }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT subnet_id, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ subnet_cidr_block }}'; - -/*+ exports */ -SELECT subnet_id, availability_zone FROM -( -SELECT subnet_id, -availability_zone, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ subnet_cidr_block }}'; - -/*+ delete */ -DELETE FROM aws.ec2.subnets -WHERE Identifier = '{{ subnet_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_subnet_rt_assn.iql b/examples/aws/aws-stack/resources/example_subnet_rt_assn.iql deleted file mode 100644 index d0c81dc..0000000 --- a/examples/aws/aws-stack/resources/example_subnet_rt_assn.iql +++ /dev/null @@ -1,42 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT id -FROM aws.ec2.subnet_route_table_associations -WHERE region = '{{ region }}' -AND route_table_id = '{{ route_table_id }}' -AND subnet_id = '{{ subnet_id }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.subnet_route_table_associations ( - RouteTableId, - SubnetId, - region -) -SELECT - '{{ route_table_id }}', - '{{ subnet_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT id -FROM aws.ec2.subnet_route_table_associations -WHERE region = '{{ region }}' -AND route_table_id = '{{ route_table_id }}' -AND subnet_id = '{{ subnet_id }}' -) t; - -/*+ exports */ -SELECT id as route_table_assn_id -FROM aws.ec2.subnet_route_table_associations -WHERE region = '{{ region }}' -AND route_table_id = '{{ route_table_id }}' -AND subnet_id = '{{ subnet_id }}'; - -/*+ delete */ -DELETE FROM aws.ec2.subnet_route_table_associations -WHERE Identifier = '{{ route_table_assn_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_vpc.iql b/examples/aws/aws-stack/resources/example_vpc.iql deleted file mode 100644 index 63d52ce..0000000 --- a/examples/aws/aws-stack/resources/example_vpc.iql +++ /dev/null @@ -1,63 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.vpcs ( - CidrBlock, - Tags, - EnableDnsSupport, - EnableDnsHostnames, - region -) -SELECT - '{{ vpc_cidr_block }}', - '{{ vpc_tags }}', - true, - true, - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ vpc_cidr_block }}'; - -/*+ exports */ -SELECT vpc_id, vpc_cidr_block FROM -( -SELECT vpc_id, cidr_block as "vpc_cidr_block", -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.vpcs -WHERE Identifier = '{{ vpc_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/example_web_server.iql b/examples/aws/aws-stack/resources/example_web_server.iql deleted file mode 100644 index 83de535..0000000 --- a/examples/aws/aws-stack/resources/example_web_server.iql +++ /dev/null @@ -1,71 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT instance_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.instance_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND subnet_id = '{{ subnet_id }}' -GROUP BY instance_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ instance_name }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.instances ( - ImageId, - InstanceType, - SubnetId, - SecurityGroupIds, - UserData, - Tags, - region -) -SELECT - '{{ ami_id }}', - '{{ instance_type }}', - '{{ instance_subnet_id }}', - '{{ sg_ids }}', - '{{ user_data | base64_encode }}', - '{{ instance_tags }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT instance_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.instance_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND subnet_id = '{{ subnet_id }}' -GROUP BY instance_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ instance_name }}' -) t; - -/*+ exports */ -SELECT instance_id, public_dns_name FROM -( -SELECT instance_id, public_dns_name, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.instance_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND subnet_id = '{{ subnet_id }}' -GROUP BY instance_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ instance_name }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.instances -WHERE Identifier = '{{ instance_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-stack/resources/get_web_server_url.iql b/examples/aws/aws-stack/resources/get_web_server_url.iql deleted file mode 100644 index 047bcd5..0000000 --- a/examples/aws/aws-stack/resources/get_web_server_url.iql +++ /dev/null @@ -1,2 +0,0 @@ -/*+ exports */ -SELECT 'http://' || '{{ public_dns_name }}' as web_server_url \ No newline at end of file diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql new file mode 100644 index 0000000..dbdc6da --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql @@ -0,0 +1,36 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_inet_gateway"]}]' +AND ResourceTypeFilters = '["ec2:internet-gateway"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT internet_gateway_id +FROM awscc.ec2.internet_gateways +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t; + +/*+ create */ +INSERT INTO awscc.ec2.internet_gateways ( + Tags, + region +) +SELECT + '{{ inet_gateway_tags }}', + '{{ region }}'; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT split_part(ResourceARN, '/', 2) as internet_gateway_id +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_inet_gateway"]}]' +AND ResourceTypeFilters = '["ec2:internet-gateway"]'; + +/*+ delete */ +DELETE FROM awscc.ec2.internet_gateways +WHERE data__Identifier = '{{ internet_gateway_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql new file mode 100644 index 0000000..23d30a8 --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql @@ -0,0 +1,21 @@ +/*+ createorupdate */ +INSERT INTO awscc.ec2.vpc_gateway_attachments ( + InternetGatewayId, + VpcId, + region +) +SELECT + '{{ internet_gateway_id }}', + '{{ vpc_id }}', + '{{ region }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT attachment_type +FROM awscc.ec2.vpc_gateway_attachments +WHERE Identifier = 'IGW|{{ vpc_id }}' +AND region = '{{ region }}'; + +/*+ delete */ +DELETE FROM awscc.ec2.vpc_gateway_attachments +WHERE data__Identifier = 'IGW|{{ vpc_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql new file mode 100644 index 0000000..c0c307d --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql @@ -0,0 +1,23 @@ +/*+ createorupdate */ +INSERT INTO awscc.ec2.routes ( + DestinationCidrBlock, + GatewayId, + RouteTableId, + region +) +SELECT + '0.0.0.0/0', + '{{ internet_gateway_id }}', + '{{ route_table_id }}', + '{{ region }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT route_table_id, cidr_block +FROM awscc.ec2.routes +WHERE Identifier = '{{ route_table_id }}|0.0.0.0/0' +AND region = '{{ region }}'; + +/*+ delete */ +DELETE FROM awscc.ec2.routes +WHERE data__Identifier = '{{ route_table_id }}|0.0.0.0/0' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_route_table.iql b/examples/aws/aws-vpc-webserver/resources/example_route_table.iql new file mode 100644 index 0000000..4c3390d --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_route_table.iql @@ -0,0 +1,39 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_route_table"]}]' +AND ResourceTypeFilters = '["ec2:route-table"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT route_table_id, vpc_id +FROM awscc.ec2.route_tables +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE vpc_id = '{{ vpc_id }}'; + +/*+ create */ +INSERT INTO awscc.ec2.route_tables ( + Tags, + VpcId, + region +) +SELECT + '{{ route_table_tags }}', + '{{ vpc_id }}', + '{{ region }}'; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT split_part(ResourceARN, '/', 2) as route_table_id +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_route_table"]}]' +AND ResourceTypeFilters = '["ec2:route-table"]'; + +/*+ delete */ +DELETE FROM awscc.ec2.route_tables +WHERE data__Identifier = '{{ route_table_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_security_group.iql b/examples/aws/aws-vpc-webserver/resources/example_security_group.iql new file mode 100644 index 0000000..0a631b7 --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_security_group.iql @@ -0,0 +1,48 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_security_group"]}]' +AND ResourceTypeFilters = '["ec2:security-group"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT id, group_name, group_description, vpc_id +FROM awscc.ec2.security_groups +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE group_name = '{{ group_name }}' +AND vpc_id = '{{ vpc_id }}'; + +/*+ create */ +INSERT INTO awscc.ec2.security_groups ( + GroupName, + GroupDescription, + VpcId, + SecurityGroupIngress, + SecurityGroupEgress, + Tags, + region +) +SELECT + '{{ group_name }}', + '{{ group_description }}', + '{{ vpc_id }}', + '{{ security_group_ingress }}', + '{{ security_group_egress }}', + '{{ sg_tags }}', + '{{ region }}'; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT split_part(ResourceARN, '/', 2) as security_group_id +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_security_group"]}]' +AND ResourceTypeFilters = '["ec2:security-group"]'; + +/*+ delete */ +DELETE FROM awscc.ec2.security_groups +WHERE data__Identifier = '{{ security_group_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_subnet.iql b/examples/aws/aws-vpc-webserver/resources/example_subnet.iql new file mode 100644 index 0000000..c336812 --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_subnet.iql @@ -0,0 +1,43 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_subnet"]}]' +AND ResourceTypeFilters = '["ec2:subnet"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT subnet_id, vpc_id, cidr_block, map_public_ip_on_launch +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE cidr_block = '{{ subnet_cidr_block }}' +AND vpc_id = '{{ vpc_id }}'; + +/*+ create */ +INSERT INTO awscc.ec2.subnets ( + VpcId, + CidrBlock, + MapPublicIpOnLaunch, + Tags, + region +) +SELECT + '{{ vpc_id }}', + '{{ subnet_cidr_block }}', + true, + '{{ subnet_tags }}', + '{{ region }}'; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT subnet_id, availability_zone +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}'; + +/*+ delete */ +DELETE FROM awscc.ec2.subnets +WHERE data__Identifier = '{{ subnet_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql new file mode 100644 index 0000000..aa51c5f --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql @@ -0,0 +1,16 @@ +/*+ createorupdate */ +INSERT INTO awscc.ec2.subnet_route_table_associations ( + RouteTableId, + SubnetId, + region +) +SELECT + '{{ route_table_id }}', + '{{ subnet_id }}', + '{{ region }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT route_table_id +FROM awscc.ec2.route_tables +WHERE Identifier = '{{ route_table_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_vpc.iql b/examples/aws/aws-vpc-webserver/resources/example_vpc.iql new file mode 100644 index 0000000..e842e57 --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_vpc.iql @@ -0,0 +1,44 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' +AND ResourceTypeFilters = '["ec2:vpc"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT vpc_id, cidr_block +FROM awscc.ec2.vpcs +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE cidr_block = '{{ vpc_cidr_block }}'; + +/*+ create */ +INSERT INTO awscc.ec2.vpcs ( + CidrBlock, + Tags, + EnableDnsSupport, + EnableDnsHostnames, + region +) +SELECT + '{{ vpc_cidr_block }}', + '{{ vpc_tags }}', + true, + true, + '{{ region }}'; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT split_part(ResourceARN, '/', 2) as vpc_id, +'{{ vpc_cidr_block }}' as vpc_cidr_block +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' +AND ResourceTypeFilters = '["ec2:vpc"]'; + +/*+ delete */ +DELETE FROM awscc.ec2.vpcs +WHERE data__Identifier = '{{ vpc_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_web_server.iql b/examples/aws/aws-vpc-webserver/resources/example_web_server.iql new file mode 100644 index 0000000..f5165ac --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/example_web_server.iql @@ -0,0 +1,48 @@ +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_web_server"]}]' +AND ResourceTypeFilters = '["ec2:instance"]'; + +/*+ statecheck, retries=10, retry_delay=10 */ +SELECT COUNT(*) as count FROM +( +SELECT instance_id, instance_type, subnet_id, image_id +FROM awscc.ec2.instances +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE instance_type = '{{ instance_type }}' +AND subnet_id = '{{ instance_subnet_id }}'; + +/*+ create */ +INSERT INTO awscc.ec2.instances ( + ImageId, + InstanceType, + SubnetId, + SecurityGroupIds, + UserData, + Tags, + region +) +SELECT + '{{ ami_id }}', + '{{ instance_type }}', + '{{ instance_subnet_id }}', + '{{ sg_ids }}', + '{{ user_data | base64_encode }}', + '{{ instance_tags }}', + '{{ region }}'; + +/*+ exports, retries=10, retry_delay=10 */ +SELECT split_part(ResourceARN, '/', 2) as instance_id +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_web_server"]}]' +AND ResourceTypeFilters = '["ec2:instance"]'; + +/*+ delete */ +DELETE FROM awscc.ec2.instances +WHERE data__Identifier = '{{ instance_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql b/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql new file mode 100644 index 0000000..8db07c8 --- /dev/null +++ b/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql @@ -0,0 +1,6 @@ +/*+ exports, retries=5, retry_delay=10 */ +SELECT public_dns_name, +public_dns_name as web_server_url +FROM awscc.ec2.instances +WHERE Identifier = '{{ instance_id }}' +AND region = '{{ region }}'; diff --git a/examples/aws/aws-stack/stackql_manifest.yml b/examples/aws/aws-vpc-webserver/stackql_manifest.yml similarity index 88% rename from examples/aws/aws-stack/stackql_manifest.yml rename to examples/aws/aws-vpc-webserver/stackql_manifest.yml index 7b0673a..a1b1c4c 100644 --- a/examples/aws/aws-stack/stackql_manifest.yml +++ b/examples/aws/aws-vpc-webserver/stackql_manifest.yml @@ -1,150 +1,141 @@ -version: 1 -name: "aws-vpc-webserver" -description: Provisions a complete AWS networking stack (VPC, subnet, internet gateway, route table, security group) with an Apache web server EC2 instance. -providers: - - awscc -globals: - - name: region - description: aws region - value: "{{ AWS_REGION }}" - - name: global_tags - value: - - Key: 'stackql:stack-name' - Value: "{{ stack_name }}" - - Key: 'stackql:stack-env' - Value: "{{ stack_env }}" - - Key: 'stackql:resource-name' - Value: "{{ resource_name }}" -resources: - - name: example_vpc - props: - - name: vpc_cidr_block - values: - prd: - value: "10.0.0.0/16" - sit: - value: "10.1.0.0/16" - dev: - value: "10.2.0.0/16" - - name: vpc_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-vpc" - merge: - - global_tags - exports: - - vpc_id - - vpc_cidr_block - - name: example_subnet - props: - - name: subnet_cidr_block - values: - prd: - value: "10.0.1.0/24" - sit: - value: "10.1.1.0/24" - dev: - value: "10.2.1.0/24" - - name: subnet_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-subnet" - merge: ['global_tags'] - exports: - - subnet_id - - availability_zone - - name: example_inet_gateway - props: - - name: inet_gateway_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway" - merge: ['global_tags'] - exports: - - internet_gateway_id - - name: example_inet_gw_attachment - props: [] - - name: example_route_table - props: - - name: route_table_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-route-table" - merge: ['global_tags'] - exports: - - route_table_id - - name: example_subnet_rt_assn - props: [] - exports: - - route_table_assn_id - - name: example_inet_route - props: [] - exports: - - inet_route_indentifer - - name: example_security_group - props: - - name: group_description - value: "web security group for {{ stack_name }} ({{ stack_env }} environment)" - - name: group_name - value: "{{ stack_name }}-{{ stack_env }}-web-sg" - - name: sg_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-web-sg" - merge: ['global_tags'] - - name: security_group_ingress - value: - - CidrIp: "0.0.0.0/0" - Description: Allow HTTP traffic - FromPort: 80 - ToPort: 80 - IpProtocol: "tcp" - - CidrIp: "{{ vpc_cidr_block }}" - Description: Allow SSH traffic from the internal network - FromPort: 22 - ToPort: 22 - IpProtocol: "tcp" - - name: security_group_egress - value: - - CidrIp: "0.0.0.0/0" - Description: Allow all outbound traffic - FromPort: 0 - ToPort: 0 - IpProtocol: "-1" - exports: - - security_group_id - - name: example_web_server - props: - - name: instance_name - value: "{{ stack_name }}-{{ stack_env }}-instance" - - name: ami_id - value: ami-030a5acd7c996ef60 - - name: instance_type - value: t2.micro - - name: instance_subnet_id - value: "{{ subnet_id }}" - - name: sg_ids - value: - - "{{ security_group_id }}" - - name: user_data - value: | - #!/bin/bash - yum update -y - yum install -y httpd - systemctl start httpd - systemctl enable httpd - echo 'StackQL on AWS' > /var/www/html/index.html - echo '
StackQL Logo

Hello, stackql-deploy on AWS!

' >> /var/www/html/index.html - - name: instance_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-instance" - merge: ['global_tags'] - exports: - - instance_id - - public_dns_name - - name: get_web_server_url - type: query - props: [] - exports: - - web_server_url \ No newline at end of file +version: 1 +name: "aws-vpc-webserver" +description: Provisions a complete AWS networking stack (VPC, subnet, internet gateway, route table, security group) with an Apache web server EC2 instance. +providers: + - awscc +globals: + - name: region + description: aws region + value: "{{ AWS_REGION }}" + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{{ stack_name }}" + - Key: 'stackql:stack-env' + Value: "{{ stack_env }}" + - Key: 'stackql:resource-name' + Value: "{{ resource_name }}" +resources: + - name: example_vpc + props: + - name: vpc_cidr_block + values: + prd: + value: "10.0.0.0/16" + sit: + value: "10.1.0.0/16" + dev: + value: "10.2.0.0/16" + - name: vpc_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-vpc" + merge: + - global_tags + exports: + - vpc_id + - vpc_cidr_block + - name: example_subnet + props: + - name: subnet_cidr_block + values: + prd: + value: "10.0.1.0/24" + sit: + value: "10.1.1.0/24" + dev: + value: "10.2.1.0/24" + - name: subnet_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-subnet" + merge: ['global_tags'] + exports: + - subnet_id + - availability_zone + - name: example_inet_gateway + props: + - name: inet_gateway_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway" + merge: ['global_tags'] + exports: + - internet_gateway_id + - name: example_inet_gw_attachment + props: [] + - name: example_route_table + props: + - name: route_table_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-route-table" + merge: ['global_tags'] + exports: + - route_table_id + - name: example_subnet_rt_assn + props: [] + - name: example_inet_route + props: [] + - name: example_security_group + props: + - name: group_description + value: "web security group for {{ stack_name }} ({{ stack_env }} environment)" + - name: group_name + value: "{{ stack_name }}-{{ stack_env }}-web-sg" + - name: sg_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-web-sg" + merge: ['global_tags'] + - name: security_group_ingress + value: + - IpProtocol: "tcp" + CidrIp: "0.0.0.0/0" + Description: Allow HTTP traffic + FromPort: 80 + ToPort: 80 + - IpProtocol: "tcp" + CidrIp: "{{ vpc_cidr_block }}" + Description: Allow SSH traffic from the internal network + FromPort: 22 + ToPort: 22 + - name: security_group_egress + value: '[{"IpProtocol":"-1","CidrIp":"0.0.0.0/0","Description":"Allow all outbound traffic"}]' + exports: + - security_group_id + - name: example_web_server + props: + - name: instance_name + value: "{{ stack_name }}-{{ stack_env }}-instance" + - name: ami_id + value: ami-05024c2628f651b80 + - name: instance_type + value: t2.micro + - name: instance_subnet_id + value: "{{ subnet_id }}" + - name: sg_ids + value: + - "{{ security_group_id }}" + - name: user_data + value: | + #!/bin/bash + yum update -y + yum install -y httpd + systemctl start httpd + systemctl enable httpd + echo 'StackQL on AWS' > /var/www/html/index.html + echo '
StackQL Logo

Hello, stackql-deploy on AWS!

' >> /var/www/html/index.html + - name: instance_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-instance" + merge: ['global_tags'] + exports: + - instance_id + - name: get_web_server_url + type: query + props: [] + exports: + - public_dns_name + - web_server_url diff --git a/src/commands/base.rs b/src/commands/base.rs index f568088..8caf6e1 100644 --- a/src/commands/base.rs +++ b/src/commands/base.rs @@ -17,7 +17,8 @@ use crate::core::env::load_env_vars; use crate::core::templating::{self, ParsedQuery}; use crate::core::utils::{ catch_error_and_exit, check_exports_as_statecheck_proxy, check_short_circuit, export_vars, - flatten_returning_row, has_returning_clause, perform_retries, pull_providers, + flatten_returning_row, has_returning_clause, perform_retries, perform_retries_with_fields, + pull_providers, run_callback_poll, run_ext_script, run_stackql_command, run_stackql_dml_returning, run_stackql_query, show_query, }; @@ -182,6 +183,15 @@ impl CommandRunner { /// Check if a resource exists using the exists query. #[allow(clippy::too_many_arguments)] + /// Check if a resource exists by running the exists query. + /// + /// Returns `(bool, Option>)`: + /// - The bool indicates whether the resource exists. + /// - If the exists query returned fields OTHER than `count`, those fields + /// are captured and returned. The caller should inject them into the + /// template context scoped to the resource (e.g. `this.identifier`) so + /// that subsequent queries (statecheck, exports, delete) can reference + /// the discovered identifier without a separate lookup. pub fn check_if_resource_exists( &mut self, resource: &Resource, @@ -191,7 +201,7 @@ impl CommandRunner { dry_run: bool, show_queries: bool, delete_test: bool, - ) -> bool { + ) -> (bool, Option>) { let check_type = if delete_test { "post-delete" } else { "exists" }; if dry_run { @@ -199,13 +209,13 @@ impl CommandRunner { "dry run {} check for [{}]:\n\n/* exists query */\n{}\n", check_type, resource.name, exists_query ); - return false; + return (false, None); } info!("running {} check for [{}]...", check_type, resource.name); show_query(show_queries, exists_query); - let exists = perform_retries( + let (exists, fields) = perform_retries_with_fields( &resource.name, exists_query, retries, @@ -222,11 +232,20 @@ impl CommandRunner { } } else if exists { info!("[{}] exists", resource.name); + // Log any captured fields from the exists query + if let Some(ref f) = fields { + for (k, v) in f { + info!( + "exists query for [{}] captured field [this.{}] ({{ {}.{} }}) = [{}]", + resource.name, k, resource.name, k, v + ); + } + } } else { info!("[{}] does not exist", resource.name); } - exists + (exists, fields) } /// Check if a resource is in the correct state. diff --git a/src/commands/build.rs b/src/commands/build.rs index 996fd19..7ce07d8 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -96,6 +96,26 @@ pub fn execute(matches: &ArgMatches) { } } +/// Render the statecheck query template with the given context. +macro_rules! render_statecheck { + ($runner:expr, $resource_queries:expr, $resource:expr, $ctx:expr) => { + $resource_queries.get("statecheck").map(|q| { + let rendered = + $runner.render_query(&$resource.name, "statecheck", &q.template, $ctx); + (rendered, q.options.clone()) + }) + }; +} + +/// Render the exports query template with the given context. +macro_rules! render_exports { + ($runner:expr, $resource_queries:expr, $resource:expr, $ctx:expr) => { + $resource_queries + .get("exports") + .map(|q| $runner.render_query(&$resource.name, "exports", &q.template, $ctx)) + }; +} + /// Main build workflow matching Python's StackQLProvisioner.run(). fn run_build( runner: &mut CommandRunner, @@ -206,24 +226,32 @@ fn run_build( } } - // Test queries - render only the ones we need + // Render the exists query eagerly (it never depends on this.* fields) let exists_query = resource_queries.get("exists").map(|q| { let rendered = runner.render_query(&resource.name, "exists", &q.template, &full_context); (rendered, q.options.clone()) }); - let statecheck_query = resource_queries.get("statecheck").map(|q| { - let rendered = - runner.render_query(&resource.name, "statecheck", &q.template, &full_context); - (rendered, q.options.clone()) - }); - let mut exports_query_str: Option = resource_queries - .get("exports") - .map(|q| runner.render_query(&resource.name, "exports", &q.template, &full_context)); + + // Statecheck and exports rendering is deferred until after the exists + // check runs, because the exists query may capture fields (e.g. + // `identifier`) that should be available as {{ this. }} in + // subsequent queries. + let mut full_context = full_context; let exports_opts = resource_queries.get("exports"); let exports_retries = exports_opts.map_or(1, |q| q.options.retries); let exports_retry_delay = exports_opts.map_or(0, |q| q.options.retry_delay); + // Only eagerly render exports if there's no exists query; otherwise + // defer until after exists has captured this.* fields. + let mut exports_query_str: Option = if resource_queries.contains_key("exists") { + None // will be rendered after exists check injects this.* fields + } else { + resource_queries + .get("exports") + .map(|q| runner.render_query(&resource.name, "exports", &q.template, &full_context)) + }; + // Handle query type with no exports if res_type == "query" && exports_query_str.is_none() { if let Some(ref iq) = inline_query { @@ -242,14 +270,30 @@ fn run_build( let mut resource_exists = false; let mut is_correct_state = false; + /// Inject fields captured by the exists query into the context as + /// `this.` variables (scoped to the resource name), so that + /// statecheck / exports / delete templates can reference the + /// discovered identifiers. + fn apply_exists_fields( + fields: Option>, + resource_name: &str, + full_context: &mut HashMap, + ) { + if let Some(ref f) = fields { + for (k, v) in f { + full_context.insert(format!("{}.{}", resource_name, k), v.clone()); + } + } + } + // State checking logic if has_createorupdate { // Skip all existence and state checks for createorupdate - } else if statecheck_query.is_some() { + } else if resource_queries.contains_key("statecheck") { // Flow 1: Traditional flow when statecheck exists if let Some(ref eq) = exists_query { let eq_opts = resource_queries.get("exists").unwrap(); - resource_exists = runner.check_if_resource_exists( + let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, eq_opts.options.retries, @@ -258,8 +302,16 @@ fn run_build( show_queries, false, ); + resource_exists = exists; + + // If the exists query captured fields, inject them and + // re-render downstream queries. + if fields.is_some() { + apply_exists_fields(fields, &resource.name, &mut full_context); + } } else { - // Use statecheck as exists check + // Use statecheck as exists check (render with current ctx) + let statecheck_query = render_statecheck!(runner, resource_queries, resource, &full_context); let sq = statecheck_query.as_ref().unwrap(); let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( @@ -282,6 +334,8 @@ fn run_build( ); is_correct_state = true; } else { + // Re-render statecheck with (possibly enriched) context + let statecheck_query = render_statecheck!(runner, resource_queries, resource, &full_context); let sq = statecheck_query.as_ref().unwrap(); let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( @@ -294,7 +348,10 @@ fn run_build( ); } } - } else if let Some(ref eq_str) = exports_query_str { + + // Re-render exports with enriched context + exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + } else if exports_query_str.is_some() { // Flow 2: Optimized flow using exports as proxy info!( "trying exports query first (fast-fail) for optimal validation for [{}]", @@ -302,7 +359,7 @@ fn run_build( ); let (state, proxy_result) = runner.check_state_using_exports_proxy( resource, - eq_str, + exports_query_str.as_ref().unwrap(), 1, 0, dry_run, @@ -326,7 +383,7 @@ fn run_build( if let Some(ref eq) = exists_query { let eq_opts = resource_queries.get("exists").unwrap(); - resource_exists = runner.check_if_resource_exists( + let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, eq_opts.options.retries, @@ -335,6 +392,12 @@ fn run_build( show_queries, false, ); + resource_exists = exists; + + if fields.is_some() { + apply_exists_fields(fields, &resource.name, &mut full_context); + exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + } } else { resource_exists = false; } @@ -342,7 +405,7 @@ fn run_build( } else if let Some(ref eq) = exists_query { // Flow 3: Basic flow with only exists query let eq_opts = resource_queries.get("exists").unwrap(); - resource_exists = runner.check_if_resource_exists( + let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, eq_opts.options.retries, @@ -351,6 +414,12 @@ fn run_build( show_queries, false, ); + resource_exists = exists; + + if fields.is_some() { + apply_exists_fields(fields, &resource.name, &mut full_context); + exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + } } else { catch_error_and_exit( "iql file must include either 'exists', 'statecheck', or 'exports' anchor.", @@ -467,7 +536,7 @@ fn run_build( // Post-deploy state check if is_created_or_updated { - if let Some(ref sq) = statecheck_query { + if let Some(sq) = render_statecheck!(runner, resource_queries, resource, &full_context) { let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( resource, diff --git a/src/commands/teardown.rs b/src/commands/teardown.rs index 0e2d536..1e8ed71 100644 --- a/src/commands/teardown.rs +++ b/src/commands/teardown.rs @@ -235,19 +235,16 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ continue; }; - // Get delete query - render JIT - let (delete_query, delete_retries, delete_retry_delay) = - if let Some(dq) = resource_queries.get("delete") { - let rendered = - runner.render_query(&resource.name, "delete", &dq.template, &full_context); - (rendered, dq.options.retries, dq.options.retry_delay) - } else { - info!( - "delete query not defined for [{}], skipping...", - resource.name - ); - continue; - }; + // Check if delete query template exists (don't render yet — may need + // this.* fields from the exists check). + let has_delete_query = resource_queries.contains_key("delete"); + if !has_delete_query { + info!( + "delete query not defined for [{}], skipping...", + resource.name + ); + continue; + } // Pre-delete check let ignore_errors = res_type == "multi"; @@ -255,7 +252,7 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ info!("pre-delete check not supported for multi resources, skipping..."); true } else { - runner.check_if_resource_exists( + let (exists, fields) = runner.check_if_resource_exists( resource, &exists_query_str, exists_retries, @@ -263,9 +260,24 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ dry_run, show_queries, false, - ) + ); + // If the exists query captured fields, inject them as this.* so + // the delete query can reference them. + if let Some(ref f) = fields { + for (k, v) in f { + full_context.insert(format!("{}.{}", &resource.name, k), v.clone()); + } + } + exists }; + // Render the delete query now (after exists fields are available). + let dq = resource_queries.get("delete").unwrap(); + let delete_query = + runner.render_query(&resource.name, "delete", &dq.template, &full_context); + let delete_retries = dq.options.retries; + let delete_retry_delay = dq.options.retry_delay; + // Delete if resource_exists { let returning_row = runner.delete_resource( @@ -323,7 +335,7 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ } // Confirm deletion - let resource_deleted = runner.check_if_resource_exists( + let (resource_deleted, _) = runner.check_if_resource_exists( resource, &exists_query_str, postdelete_retries, diff --git a/src/core/utils.rs b/src/core/utils.rs index 80bd5aa..8ed9620 100644 --- a/src/core/utils.rs +++ b/src/core/utils.rs @@ -299,24 +299,40 @@ pub fn run_test( client: &mut PgwireLite, delete_test: bool, ) -> bool { + run_test_with_fields(resource_name, query, client, delete_test).0 +} + +/// Run a test query and capture any non-count fields from the result. +/// +/// Returns `(bool, Option>)`: +/// - The bool indicates whether the test passed (resource exists / is deleted). +/// - If the exists query returns fields OTHER than `count`, those fields are +/// captured and returned so the caller can inject them into the template +/// context (e.g. as `{{ this.identifier }}`). +pub fn run_test_with_fields( + resource_name: &str, + query: &str, + client: &mut PgwireLite, + delete_test: bool, +) -> (bool, Option>) { let result = run_stackql_query(query, client, true, 0, 5); if result.is_empty() { if delete_test { debug!("Delete test result true for [{}]", resource_name); - return true; + return (true, None); } else { debug!("Test result false for [{}]", resource_name); - return false; + return (false, None); } } // Check for error markers if result[0].contains_key("_stackql_deploy_error") || result[0].contains_key("error") { if delete_test { - return true; + return (true, None); } - return false; + return (false, None); } if let Some(count_str) = result[0].get("count") { @@ -324,33 +340,52 @@ pub fn run_test( if delete_test { if count == 0 { debug!("Delete test result true for [{}]", resource_name); - return true; + return (true, None); } else { debug!( "Delete test result false for [{}], expected 0 got {}", resource_name, count ); - return false; + return (false, None); } } else if count == 1 { debug!("Test result true for [{}]", resource_name); - return true; + // Capture any extra fields beyond "count" + let extra = extract_non_count_fields(&result[0]); + return (true, extra); } else { debug!( "Test result false for [{}], expected 1 got {}", resource_name, count ); - return false; + return (false, None); } } } // If no count field, for non-delete test consider any result as exists + // and capture all returned fields if !delete_test && !result.is_empty() { - return true; + let fields = Some(result[0].clone()); + return (true, fields); } - false + (false, None) +} + +/// Extract fields from an exists query result row, excluding the `count` field. +/// Returns `Some(map)` if there are non-count fields, `None` otherwise. +fn extract_non_count_fields(row: &HashMap) -> Option> { + let extra: HashMap = row + .iter() + .filter(|(k, _)| k.as_str() != "count") + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + if extra.is_empty() { + None + } else { + Some(extra) + } } /// Perform retries on a test query. @@ -363,13 +398,25 @@ pub fn perform_retries( client: &mut PgwireLite, delete_test: bool, ) -> bool { + perform_retries_with_fields(resource_name, query, retries, delay, client, delete_test).0 +} + +/// Perform retries on a test query, capturing any non-count fields from the result. +pub fn perform_retries_with_fields( + resource_name: &str, + query: &str, + retries: u32, + delay: u32, + client: &mut PgwireLite, + delete_test: bool, +) -> (bool, Option>) { let start = Instant::now(); let mut attempt = 0; while attempt < retries { - let result = run_test(resource_name, query, client, delete_test); + let (result, fields) = run_test_with_fields(resource_name, query, client, delete_test); if result { - return true; + return (true, fields); } let elapsed = start.elapsed().as_secs(); info!( @@ -383,7 +430,7 @@ pub fn perform_retries( attempt += 1; } - false + (false, None) } /// Show a query in logs if show_queries is enabled. diff --git a/website/docs/resource-query-files.md b/website/docs/resource-query-files.md index 89799dc..8d6cf95 100644 --- a/website/docs/resource-query-files.md +++ b/website/docs/resource-query-files.md @@ -33,7 +33,13 @@ The types of queries defined in resource files are detailed in the following sec ### `exists` -`exists` queries are StackQL `SELECT` statements designed to test the existence of a resource by its designated identifier (does not test the desired state). This is used to determine whether a `create` (`INSERT`) or `update` (`UPDATE`) is required. A `exists` query needs to return a single row with a single field named `count`. A `count` value of `1` indicates that the resource exists, a value of `0` would indicate that the resource does not exist. +`exists` queries are StackQL `SELECT` statements designed to test the existence of a resource by its designated identifier (does not test the desired state). This is used to determine whether a `create` (`INSERT`) or `update` (`UPDATE`) is required. + +An `exists` query can return results in one of two forms: + +#### Count-based existence check + +The query returns a single row with a single field named `count`. A `count` value of `1` indicates that the resource exists, a value of `0` would indicate that the resource does not exist. ```sql /*+ exists */ @@ -42,6 +48,49 @@ WHERE name = '{{ vpc_name }}' AND project = '{{ project }}' ``` +#### Identifier-based existence check + +Alternatively, the query can return a field **other than** `count` (for example `identifier`). If the query returns a row, the resource is considered to exist. If no rows are returned, the resource does not exist. + +Any non-`count` fields returned are automatically captured and made available as **resource-scoped variables** for all subsequent queries within the same resource (`statecheck`, `exports`, `delete`). These captured fields are accessible using the `{{ this. }}` syntax, which expands to `{{ . }}`. + +This pattern is particularly useful when you need to **discover a resource identifier** (for example, from a tag-based lookup) and then use that identifier to query the resource's actual properties in a `statecheck` or `exports` query. + +```sql +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' +AND ResourceTypeFilters = '["ec2:vpc"]' +``` + +In the example above, when the resource exists the `identifier` field (e.g. `vpc-0abc123def456`) is captured and available as `{{ this.identifier }}` in subsequent queries: + +```sql +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT vpc_id, cidr_block +FROM awscc.ec2.vpcs +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE cidr_block = '{{ vpc_cidr_block }}' +``` + +:::tip + +The identifier capture pattern enables a powerful two-step workflow for providers like `awscc` (AWS Cloud Control) where resources are identified by tags rather than names: + +1. **`exists`** — find the resource via a tag-based lookup (e.g. `awscc.tagging.tagged_resources`), capturing the cloud-assigned identifier +2. **`statecheck`** — use `{{ this.identifier }}` to query the resource directly and verify its properties match the desired state +3. **`exports`** — use `{{ this.identifier }}` to query the resource and extract values for downstream resources + +This avoids the need for complex JOINs or subqueries between the tagging service and the resource provider. + +::: + `preflight` is an alias for `exists` for backwards compatability, this will be deprecated in a future release. ### `create` @@ -291,6 +340,7 @@ In addition to the properties defined in the manifest, StackQL Deploy injects a | `idempotency_token` | Per-resource | Stable UUID v4 for this resource for the lifetime of the session | | `this.idempotency_token` | Per-resource (inside `.iql`) | Preferred alias — expands to `{{ .idempotency_token }}` | | `.idempotency_token` | Global | Scoped form, usable in any downstream resource | +| `this.` | Per-resource (inside `.iql`) | Fields captured from `exists` queries (see [identifier-based existence check](#identifier-based-existence-check)) | ### `idempotency_token` @@ -442,6 +492,61 @@ The corresponding manifest entry requires **no** `callback` section — callback - bucket_name ``` +### Tag-based identifier discovery example (`awscc`) + +This example demonstrates the **identifier capture** pattern for AWS Cloud Control (`awscc`) resources, where resources are discovered via the `awscc.tagging.tagged_resources` service. The `exists` query returns the resource identifier (extracted from the ARN), which is then used in `statecheck` and `exports` queries via `{{ this.identifier }}`. + + + +```sql +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_subnet"]}]' +AND ResourceTypeFilters = '["ec2:subnet"]' + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT subnet_id, vpc_id, cidr_block +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' +) t +WHERE cidr_block = '{{ subnet_cidr_block }}' +AND vpc_id = '{{ vpc_id }}' + +/*+ create */ +INSERT INTO awscc.ec2.subnets ( + VpcId, CidrBlock, MapPublicIpOnLaunch, Tags, region +) +SELECT + '{{ vpc_id }}', '{{ subnet_cidr_block }}', true, + '{{ subnet_tags }}', '{{ region }}' + +/*+ exports, retries=5, retry_delay=5 */ +SELECT subnet_id, availability_zone +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.identifier }}' +AND region = '{{ region }}' + +/*+ delete */ +DELETE FROM awscc.ec2.subnets +WHERE data__Identifier = '{{ subnet_id }}' +AND region = '{{ region }}' +``` + + + +In this example: + +1. **`exists`** — queries `awscc.tagging.tagged_resources` filtered by stack-level tags and resource type. If a matching resource is found, `identifier` is captured (e.g. `subnet-0abc123...`). +2. **`statecheck`** — uses `{{ this.identifier }}` to query `awscc.ec2.subnets` directly and verify the CIDR block and VPC ID match the desired state. +3. **`create`** — standard `INSERT` with tags that include `stackql:stack-name`, `stackql:stack-env`, and `stackql:resource-name` for future discovery. +4. **`exports`** — uses `{{ this.identifier }}` to query the resource and extract `subnet_id` and `availability_zone` for downstream resources. +5. **`delete`** — uses the exported `subnet_id` (from the `exports` query, not `this.identifier`) with `data__Identifier`. + ### `query` type example This `query` example demonstrates retrieving the KMS key id for a given key alias in AWS. From 764c8a7d94afc4196c24584cb80f42893bd0976d Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Tue, 17 Mar 2026 08:30:54 +1100 Subject: [PATCH 3/8] updated starter template --- src/commands/build.rs | 19 +++++ src/commands/init.rs | 8 +++ template-hub/aws/starter/README.md.template | 27 ++++--- .../resources/example_vpc.iql.template | 71 +++++++------------ .../aws/starter/stackql_manifest.yml.template | 10 +-- 5 files changed, 69 insertions(+), 66 deletions(-) diff --git a/src/commands/build.rs b/src/commands/build.rs index 7ce07d8..39c98ba 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -536,6 +536,25 @@ fn run_build( // Post-deploy state check if is_created_or_updated { + // After create/update, re-run the exists query to capture + // this.* fields (e.g. identifier) that are needed by the + // statecheck and exports queries. + if let Some(ref eq) = exists_query { + let eq_opts = resource_queries.get("exists").unwrap(); + let (_exists, fields) = runner.check_if_resource_exists( + resource, + &eq.0, + eq_opts.options.retries.max(3), + eq_opts.options.retry_delay.max(5), + dry_run, + show_queries, + false, + ); + apply_exists_fields(fields, &resource.name, &mut full_context); + // Re-render exports with the newly captured fields + exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + } + if let Some(sq) = render_statecheck!(runner, resource_queries, resource, &full_context) { let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( diff --git a/src/commands/init.rs b/src/commands/init.rs index 5df150e..833aca0 100644 --- a/src/commands/init.rs +++ b/src/commands/init.rs @@ -90,6 +90,14 @@ pub fn command() -> Command { .action(ArgAction::Set) .conflicts_with("provider"), ) + .arg( + Arg::new("env") + .short('e') + .long("env") + .help("Default environment for examples in generated README (default: dev)") + .default_value("dev") + .action(ArgAction::Set), + ) } /// Executes the `init` command to initialize a new project structure. diff --git a/template-hub/aws/starter/README.md.template b/template-hub/aws/starter/README.md.template index 74e30f2..98f85bf 100644 --- a/template-hub/aws/starter/README.md.template +++ b/template-hub/aws/starter/README.md.template @@ -1,31 +1,30 @@ # `stackql-deploy` starter project for `aws` -> for starter projects using other providers, try `stackql-deploy {{ stack_name }} --provider=azure` or `stackql-deploy {{ stack_name }} --provider=google` +> for starter projects using other providers, try `stackql-deploy init {{ stack_name }} --provider=azure` or `stackql-deploy init {{ stack_name }} --provider=google` -see the following links for more information on `stackql`, `stackql-deploy` and the `aws` provider: +see the following links for more information on `stackql`, `stackql-deploy` and the `awscc` provider: -- [`aws` provider docs](https://stackql.io/registry/aws) +- [`awscc` provider docs](https://awscc.stackql.io/providers/awscc/) - [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) -- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) +- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy-rs) ## Overview -__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `aws` and `azure` for example. +__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `aws` and `azure` for example. ## Prerequisites -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `aws` provider, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and optionally `AWS_SESSION_TOKEN` must be set, for more information on authentication to `aws` see the [`aws` provider documentation](https://aws.stackql.io/providers/aws). +This example requires `stackql-deploy` to be installed. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of `aws`, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and optionally `AWS_SESSION_TOKEN` must be set, for more information on authentication to `aws` see the [`awscc` provider documentation](https://awscc.stackql.io/providers/awscc/). ## Usage -Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. +Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. The syntax for the `stackql-deploy` command is as follows: ```bash stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] -``` +``` ### Deploying a stack @@ -33,14 +32,14 @@ For example, to deploy the stack named {{ stack_name }} to an environment labele ```bash stackql-deploy build {{ stack_name }} sit \ --e AWS_REGION=ap-southeast-2 +-e AWS_REGION=us-east-1 ``` Use the `--dry-run` flag to view the queries to be run without actually running them, for example: ```bash stackql-deploy build {{ stack_name }} sit \ --e AWS_REGION=ap-southeast-2 \ +-e AWS_REGION=us-east-1 \ --dry-run ``` @@ -50,7 +49,7 @@ To test a stack to ensure that all resources are present and in the desired stat ```bash stackql-deploy test {{ stack_name }} sit \ --e AWS_REGION=ap-southeast-2 +-e AWS_REGION=us-east-1 ``` ### Tearing down a stack @@ -59,5 +58,5 @@ To destroy or deprovision all resources in a stack for our `sit` deployment exam ```bash stackql-deploy teardown {{ stack_name }} sit \ --e AWS_REGION=ap-southeast-2 -``` \ No newline at end of file +-e AWS_REGION=us-east-1 +``` diff --git a/template-hub/aws/starter/resources/example_vpc.iql.template b/template-hub/aws/starter/resources/example_vpc.iql.template index ee7bf72..ef223c2 100644 --- a/template-hub/aws/starter/resources/example_vpc.iql.template +++ b/template-hub/aws/starter/resources/example_vpc.iql.template @@ -1,67 +1,44 @@ -/* defines the provisioning and deprovisioning commands -used to create, update or delete the resource -replace queries with your queries */ - /*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as identifier +FROM awscc.tagging.tagged_resources +WHERE region = '{% raw %}{{ region }}{% endraw %}' +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{% raw %}{{ stack_name }}{% endraw %}"]},{"Key":"stackql:stack-env","Values":["{% raw %}{{ stack_env }}{% endraw %}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' +AND ResourceTypeFilters = '["ec2:vpc"]'; + +/*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT vpc_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{% raw %}{{ region }}{% endraw %}' -AND cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{% raw %}{{ stack_name }}{% endraw %}' -AND json_extract(tags, '$.StackEnv') = '{% raw %}{{ stack_env }}{% endraw %}' -) t; +SELECT vpc_id, cidr_block +FROM awscc.ec2.vpcs +WHERE Identifier = '{% raw %}{{ this.identifier }}{% endraw %}' +AND region = '{% raw %}{{ region }}{% endraw %}' +) t +WHERE cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}'; /*+ create */ -INSERT INTO aws.ec2.vpcs ( +INSERT INTO awscc.ec2.vpcs ( CidrBlock, Tags, EnableDnsSupport, - EnableDnsHostnames, + EnableDnsHostnames, region ) -SELECT +SELECT '{% raw %}{{ vpc_cidr_block }}{% endraw %}', '{% raw %}{{ vpc_tags }}{% endraw %}', true, true, '{% raw %}{{ region }}{% endraw %}'; -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{% raw %}{{ region }}{% endraw %}' -AND cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{% raw %}{{ stack_name }}{% endraw %}' -AND json_extract(tags, '$.StackEnv') = '{% raw %}{{ stack_env }}{% endraw %}' -) t -WHERE cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}'; - /*+ exports, retries=5, retry_delay=5 */ -SELECT vpc_id, vpc_cidr_block FROM -( -SELECT vpc_id, cidr_block as "vpc_cidr_block", -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags +SELECT split_part(ResourceARN, '/', 2) as vpc_id, +'{% raw %}{{ vpc_cidr_block }}{% endraw %}' as vpc_cidr_block +FROM awscc.tagging.tagged_resources WHERE region = '{% raw %}{{ region }}{% endraw %}' -AND cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{% raw %}{{ stack_name }}{% endraw %}' -AND json_extract(tags, '$.StackEnv') = '{% raw %}{{ stack_env }}{% endraw %}' -) t; +AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{% raw %}{{ stack_name }}{% endraw %}"]},{"Key":"stackql:stack-env","Values":["{% raw %}{{ stack_env }}{% endraw %}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' +AND ResourceTypeFilters = '["ec2:vpc"]'; /*+ delete */ -DELETE FROM aws.ec2.vpcs -WHERE Identifier = '{% raw %}{{ vpc_id }}{% endraw %}' -AND region = '{% raw %}{{ region }}{% endraw %}'; \ No newline at end of file +DELETE FROM awscc.ec2.vpcs +WHERE data__Identifier = '{% raw %}{{ vpc_id }}{% endraw %}' +AND region = '{% raw %}{{ region }}{% endraw %}'; diff --git a/template-hub/aws/starter/stackql_manifest.yml.template b/template-hub/aws/starter/stackql_manifest.yml.template index cc40520..d2a8810 100644 --- a/template-hub/aws/starter/stackql_manifest.yml.template +++ b/template-hub/aws/starter/stackql_manifest.yml.template @@ -5,19 +5,19 @@ version: 1 name: "{{ stack_name }}" description: description for "{{ stack_name }}" providers: - - aws + - awscc globals: - name: region description: aws region value: "{% raw %}{{ AWS_REGION }}{% endraw %}" - name: global_tags value: - - Key: Provisioner - Value: stackql - - Key: StackName + - Key: 'stackql:stack-name' Value: "{% raw %}{{ stack_name }}{% endraw %}" - - Key: StackEnv + - Key: 'stackql:stack-env' Value: "{% raw %}{{ stack_env }}{% endraw %}" + - Key: 'stackql:resource-name' + Value: "{% raw %}{{ resource_name }}{% endraw %}" resources: - name: example_vpc description: example vpc resource From 188f50c1215017b9bf05514ced8ecf2c3f40aac0 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Wed, 18 Mar 2026 18:58:52 +1100 Subject: [PATCH 4/8] 2.0.4 wip --- .github/workflows/release.yml | 2 +- .gitignore | 4 +- CHANGELOG.md | 69 ++ Cargo.lock | 2 +- Cargo.toml | 2 +- docs/flows.md | 225 ++++++ examples/aws/aws-vpc-webserver/README.md | 178 +++++ .../resources/example_inet_gateway.iql | 42 +- .../resources/example_inet_gw_attachment.iql | 17 +- .../resources/example_inet_route.iql | 9 +- .../resources/example_route_table.iql | 43 +- .../resources/example_security_group.iql | 49 +- .../resources/example_subnet.iql | 40 +- .../resources/example_subnet_rt_assn.iql | 27 +- .../resources/example_vpc.iql | 46 +- .../resources/example_web_server.iql | 50 +- .../resources/get_web_server_url.iql | 5 +- .../aws-vpc-webserver/stackql_manifest.yml | 14 +- examples/aws/patch-doc-test/README.md | 90 +++ .../aws/patch-doc-test/resources/bucket1.iql | 50 ++ .../aws/patch-doc-test/stackql_manifest.yml | 34 + examples/azure/azure-stack/README.md | 2 +- .../azure-stack/resources/hello-stackql.html | 82 +- .../serverless/resources/aws/iam/roles.iql | 3 +- .../resources/aws/s3/bucket_policies.iql | 3 +- .../serverless/resources/aws/s3/buckets.iql | 3 +- .../databricks_account/account_groups.iql | 22 +- .../databricks_account/credentials.iql | 11 +- .../resources/databricks_account/storage.iql | 6 +- .../databricks_account/workspaces.iql | 15 +- .../serverless/stackql_manifest.yml | 16 +- examples/google/k8s-the-hard-way/README.md | 2 +- examples/google/load-balanced-vms/README.md | 142 ++-- examples/snowflake/entitlements/README.md | 2 +- src/commands/base.rs | 38 +- src/commands/build.rs | 228 +++++- src/commands/teardown.rs | 54 +- src/commands/test.rs | 50 +- src/core/config.rs | 58 +- src/core/templating.rs | 33 + src/core/utils.rs | 48 +- src/resource/manifest.rs | 41 + src/resource/validation.rs | 1 + src/template/engine.rs | 27 + src/utils/server.rs | 53 +- .../resources/example_vpc.iql.template | 100 +-- .../aws/starter/stackql_manifest.yml.template | 80 +- template-hub/azure/starter/README.md.template | 124 ++-- .../google/starter/README.md.template | 124 ++-- website/docs/getting-started.md | 477 ++++++------ website/docs/manifest-file.md | 36 + website/docs/resource-query-files.md | 95 ++- website/docs/template-filters.md | 32 + .../aws/vpc-and-ec2-instance.md | 701 +++++++++--------- 54 files changed, 2510 insertions(+), 1197 deletions(-) create mode 100644 docs/flows.md create mode 100644 examples/aws/aws-vpc-webserver/README.md create mode 100644 examples/aws/patch-doc-test/README.md create mode 100644 examples/aws/patch-doc-test/resources/bucket1.iql create mode 100644 examples/aws/patch-doc-test/stackql_manifest.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5f94b71..60e8ea4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -257,7 +257,7 @@ jobs: If you are currently using the Python package on PyPI, please migrate to this release. The Python package is now deprecated and will no longer receive updates: - https://pypi.org/project/stackql-deploy/ + https://crates.io/crates/stackql-deploy The CLI interface is fully compatible — existing `stackql_manifest.yml` files and project layouts work without modification. diff --git a/.gitignore b/.gitignore index 745a451..75cebe2 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,9 @@ stackql*.zip stackql*.pkg stackql_history.txt stackql.log +stackql-zip .env nohup.out contributors.csv -.claude/ \ No newline at end of file +.claude/ +nohup.out \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 49f7517..75a38af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,74 @@ # Changelog +## 2.0.4 (2026-03-18) + +### Identifier capture from `exists` queries + +The `exists` query can now return a named field (e.g. `vpc_id`) instead of `count`. The returned value is automatically captured as a resource-scoped variable (`{{ this. }}`) and made available to all subsequent queries (`statecheck`, `exports`, `delete`) for that resource. This enables a two-step workflow where `exists` discovers the resource identifier and `statecheck` verifies its properties. + +- When `exists` returns `null` or empty for the captured field, the resource is treated as non-existent +- Multiple rows from an `exists` (identifier pattern) or `exports` query is now a fatal error +- After a `create`, the `exists` query is automatically re-run to capture the identifier for use in post-deploy `statecheck` and `exports` queries + +### `RETURNING *` identifier capture + +When a `create` statement includes `RETURNING *` and the response contains an `Identifier` field, it is automatically injected as `this.identifier` — skipping the post-create `exists` re-run and saving an API call per resource. + +### `return_vals` manifest field + +New optional `return_vals` field on resources to explicitly map fields from `RETURNING *` responses to resource-scoped variables: + +```yaml +return_vals: + create: + - Identifier: identifier # rename pattern + - ErrorCode # direct capture +``` + +If `return_vals` is specified but the field is missing from the response, the build fails. + +### `to_aws_tag_filters` template filter + +New AWS-specific Tera filter that converts `global_tags` (list of `Key`/`Value` pairs) to the AWS Resource Groups Tagging API `TagFilters` format: + +```sql +AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' +``` + +### YAML type preservation fix + +Fixed an issue where YAML string values that look like numbers (e.g. `IpProtocol: "-1"`) were being coerced to integers during JSON serialization. String types declared in YAML are now preserved through to the rendered query. + +### Teardown improvements + +- Teardown no longer retries exports queries that return empty results — missing exports are set to `` and teardown continues best-effort +- Post-delete existence checks accept the first empty response instead of retrying, reducing teardown time significantly + +### AWS starter template updated + +The `stackql-deploy init --provider aws` starter template now uses: +- `awscc` (Cloud Control) provider instead of `aws` +- CTE + INNER JOIN exists pattern with `to_aws_tag_filters` +- `AWS_POLICY_EQUAL` for statecheck tag comparison +- `this.` identifier capture pattern +- `RETURNING *` on create statements +- `stackql:stack-name` / `stackql:stack-env` / `stackql:resource-name` tag taxonomy + +### AWS VPC Web Server example + +Complete rewrite of the `examples/aws/aws-vpc-webserver` stack (renamed from `aws-stack`) using the `awscc` provider exclusively. Includes 10 resources demonstrating all query patterns: tag-based discovery, identifier capture, property-level statechecks, PatchDocument updates, and the `to_aws_tag_filters` filter. + +### Patch Document Test example + +New `examples/aws/patch-doc-test` example demonstrating the Cloud Control API `UPDATE` workflow with `PatchDocument` — deploy an S3 bucket, modify its versioning config in the manifest, and re-deploy to apply the update. + +### Other changes + +- Fixed `init` command missing `--env` argument (defaulting to `dev`) +- Added `debug` log import to build command +- Debug logging now shows full `RETURNING *` payloads +- Documentation updates: `resource-query-files.md`, `template-filters.md`, `manifest-file.md`, and AWS template library + ## 2.0.0 (2026-03-14) ### Initial Rust Release diff --git a/Cargo.lock b/Cargo.lock index 843f930..a12cdc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1809,7 +1809,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "stackql-deploy" -version = "2.0.3" +version = "2.0.4" dependencies = [ "base64", "chrono", diff --git a/Cargo.toml b/Cargo.toml index ab257de..708251b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stackql-deploy" -version = "2.0.3" +version = "2.0.4" edition = "2021" rust-version = "1.75" description = "Infrastructure-as-code framework for declarative cloud resource management using StackQL" diff --git a/docs/flows.md b/docs/flows.md new file mode 100644 index 0000000..d742a69 --- /dev/null +++ b/docs/flows.md @@ -0,0 +1,225 @@ +# Resource Processing Flows + +This document describes every code path in the `build`, `test`, and `teardown` commands based on which anchors are present in a resource's `.iql` file. + +## Anchor Reference + +| Anchor | Purpose | +|--------|---------| +| `exists` | Check if resource exists (returns `count` or a named field) | +| `statecheck` | Verify resource properties match desired state | +| `create` | Create the resource | +| `update` | Update the resource (patch document) | +| `createorupdate` | Always execute (skip exists/statecheck) | +| `exports` | Extract values for downstream resources | +| `delete` | Remove the resource | + +## Exists Query Variants + +The `exists` query has two modes based on the returned column name: + +**Count mode** — returns `count`, existence is `count > 0`: +```sql +SELECT count(*) as count FROM awscc.s3.buckets WHERE Identifier = '{{ bucket_name }}' AND region = '{{ region }}'; +``` + +**Field capture mode** — returns a named field (e.g. `vpc_id`), captured as `this.` for downstream queries: +```sql +SELECT split_part(ResourceARN, '/', 2) as vpc_id FROM awscc.tagging.tagged_resources WHERE ...; +``` + +When `null` or empty is returned in field capture mode, the resource is treated as non-existent. + +--- + +## Build Flows + +### Flow A: `createorupdate` + `exports` + +Used for resources that should always be applied (e.g. routes, gateway attachments). + +```mermaid +graph LR + A[createorupdate] --> B[exports proxy?] + B -->|rows returned| C[done ✓] + B -->|no rows| D[FAIL ✗] +``` + +**Anchors:** `createorupdate`, optionally `exports` +**Examples:** `example_inet_route`, `example_inet_gw_attachment` (old pattern) + +--- + +### Flow B: `exists`(count) + `statecheck` + `create`/`update` + `exports` + +Classic pattern for resources identified by a known name/identifier. + +```mermaid +graph LR + A[exists count] -->|0| B[create] + A -->|>0| C[statecheck] + C -->|pass| D[exports] + C -->|fail| E[update] + B --> F[post-create exists] + F --> G[statecheck] + G --> D + E --> H[post-update exists] + H --> I[statecheck] + I --> D + D --> J[done ✓] +``` + +**Anchors:** `exists`(count), `statecheck`, `create`, optionally `update`, optionally `exports` +**Examples:** `databricks_account_credentials`, `aws_s3_workspace_bucket_policy` + +--- + +### Flow C: `exists`(field) + `statecheck` + `create` + `exports` + +Pattern for awscc resources using tagging for identification. The exists query returns a resource-specific field (e.g. `vpc_id`) which is captured as `this.` and used in statecheck and exports. + +```mermaid +graph LR + A[exists field] -->|null| B[create] + A -->|value| C["this.field = value"] + C --> D["statecheck(this.field)"] + D -->|pass| E["exports(this.field)"] + D -->|fail| F["update(this.field)"] + B --> G[post-create exists] + G --> H["this.field = value"] + H --> I["statecheck(this.field)"] + I --> E + F --> J[post-update exists] + J --> K["statecheck(this.field)"] + K --> E + E --> L[done ✓] +``` + +**Anchors:** `exists`(field), `statecheck`, `create`, optionally `update`, `exports` +**Examples:** `example_vpc`, `example_subnet`, `example_inet_gateway`, `example_route_table`, `example_security_group`, `example_web_server` + +--- + +### Flow D: `exists`(count) + `exports`-as-proxy (no statecheck) + +The exports query doubles as a statecheck — if it returns rows, the resource is in the desired state. + +```mermaid +graph LR + A[exists count] -->|0| B[create] + A -->|>0| C[exports proxy] + C -->|rows returned| D[done ✓] + C -->|no rows| E[update] + B --> F[post-create exists] + F --> G[exports proxy] + G --> D + E --> H[post-update exists] + H --> I[exports proxy] + I --> D +``` + +**Anchors:** `exists`(count), `create`, optionally `update`, `exports` +**Examples:** `aws_s3_workspace_bucket`, `databricks_storage_configuration` + +--- + +### Flow E: `exists`(field) + `exports` (no statecheck) + +Field capture from exists, exports uses `this.`. Exports acts as statecheck proxy. + +```mermaid +graph LR + A[exists field] -->|null| B[create] + A -->|value| C["this.field = value"] + C --> D["exports proxy(this.field)"] + D -->|rows returned| E[done ✓] + D -->|no rows| F[update] + B --> G[post-create exists] + G --> H["this.field = value"] + H --> I["exports proxy(this.field)"] + I --> E +``` + +**Anchors:** `exists`(field), `create`, `exports` +**Examples:** `example_subnet_rt_assn` + +--- + +### Flow F: `exists`(field) only (no statecheck, no exports) + +Minimal pattern — exists confirms the resource, the captured field is the only output. + +```mermaid +graph LR + A[exists field] -->|null| B[create] + A -->|value| C[done ✓] + B --> D[post-create exists] + D -->|value| C +``` + +**Anchors:** `exists`(field), `create` +**Examples:** Simple resources where existence is sufficient + +--- + +## Test Flows + +Test runs the same exists → statecheck/exports-proxy sequence as build, but never creates or updates. If a resource doesn't exist or fails statecheck, the test fails. + +```mermaid +graph LR + A[exists] -->|not found| B[FAIL ✗] + A -->|found| C[statecheck or exports proxy] + C -->|pass| D[exports] + C -->|fail| B + D --> E[done ✓] +``` + +--- + +## Teardown Flows + +Teardown processes resources in reverse manifest order. First collects all exports (running exists to capture `this.*` fields), then deletes. + +```mermaid +graph LR + A[collect exports phase] --> B[exists per resource] + B --> C[exports per resource] + C --> D[reverse order delete phase] + D --> E[exists check] + E -->|found| F[delete] + E -->|not found| G[skip] + F --> H[post-delete exists] + H -->|gone| I[done ✓] + H -->|still there| I +``` + +During teardown export collection, missing exports are set to `` rather than failing — the stack may be partially deployed. + +--- + +## `this.*` Variable Lifecycle + +When an exists query returns a named field (not `count`), the value is captured as a resource-scoped variable: + +| Phase | Variable | Available to | +|-------|----------|-------------| +| exists returns `vpc_id` | `this.vpc_id` → `example_vpc.vpc_id` | statecheck, exports, delete for this resource | +| exports returns `vpc_id` | `vpc_id` and `example_vpc.vpc_id` | all subsequent resources | + +The `this.*` prefix is syntactic sugar — `{{ this.vpc_id }}` in `example_vpc`'s queries is preprocessed to `{{ example_vpc.vpc_id }}`. + +--- + +## RETURNING * Optimization + +When a `create` query includes `RETURNING *`, the Cloud Control API returns the operation metadata immediately. If `return_vals` is configured in the manifest, specified fields are captured as `this.*` variables, eliminating the need for a post-create exists re-run. + +```yaml +resources: + - name: example_vpc + return_vals: + create: + - Identifier: vpc_id # rename Identifier → this.vpc_id + - ErrorCode # capture as this.ErrorCode +``` diff --git a/examples/aws/aws-vpc-webserver/README.md b/examples/aws/aws-vpc-webserver/README.md new file mode 100644 index 0000000..e01164d --- /dev/null +++ b/examples/aws/aws-vpc-webserver/README.md @@ -0,0 +1,178 @@ +# AWS VPC Web Server Example + +This example provisions a complete AWS networking stack with an Apache web server using the `awscc` (Cloud Control) provider exclusively. + +## Architecture + +```mermaid +architecture-beta + group vpc(logos:aws-vpc)[VPC 10.x.0.0/16] + + service subnet(logos:aws-vpc)[Subnet 10.x.1.0/24] in vpc + service rt(logos:aws-route-53)[Route Table] in vpc + service sg(logos:aws-shield)[Security Group] in vpc + service ec2(logos:aws-ec2)[Web Server t2.micro] in vpc + + group edge(logos:aws-cloudfront)[Edge] + + service igw(logos:aws-api-gateway)[Internet Gateway] in edge + + igw:R --> L:rt + rt:B -- T:subnet + sg:R -- L:ec2 + subnet:T -- B:ec2 +``` + +## Resources + +| # | Resource | Provider Resource | Description | +|---|----------|-------------------|-------------| +| 1 | `example_vpc` | `awscc.ec2.vpcs` | VPC with DNS support and hostnames enabled | +| 2 | `example_subnet` | `awscc.ec2.subnets` | Public subnet with auto-assign public IP | +| 3 | `example_inet_gateway` | `awscc.ec2.internet_gateways` | Internet gateway for outbound/inbound traffic | +| 4 | `example_inet_gw_attachment` | `awscc.ec2.vpc_gateway_attachments` | Attaches IGW to VPC | +| 5 | `example_route_table` | `awscc.ec2.route_tables` | Custom route table for the VPC | +| 6 | `example_subnet_rt_assn` | `awscc.ec2.subnet_route_table_associations` | Associates subnet with route table | +| 7 | `example_inet_route` | `awscc.ec2.routes` | Default route (0.0.0.0/0) to internet gateway | +| 8 | `example_security_group` | `awscc.ec2.security_groups` | Allows HTTP (80) from anywhere, SSH (22) from VPC CIDR | +| 9 | `example_web_server` | `awscc.ec2.instances` | t2.micro running Apache with a landing page | +| 10 | `get_web_server_url` | `awscc.ec2.instances` | Retrieves the public DNS name of the instance | + +## Environment-Specific CIDR Blocks + +| Environment | VPC CIDR | Subnet CIDR | +|-------------|----------|-------------| +| `prd` | 10.0.0.0/16 | 10.0.1.0/24 | +| `sit` | 10.1.0.0/16 | 10.1.1.0/24 | +| `dev` | 10.2.0.0/16 | 10.2.1.0/24 | + +## Prerequisites + +- `stackql-deploy` installed ([releases](https://github.com/stackql/stackql-deploy-rs/releases)) +- AWS credentials set as environment variables: + + ```bash + export AWS_ACCESS_KEY_ID=your_access_key + export AWS_SECRET_ACCESS_KEY=your_secret_key + export AWS_REGION=us-east-1 + ``` + +## Usage + +### Deploy + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev +``` + +With query visibility: + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev --show-queries +``` + +Dry run (no changes): + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev --dry-run --show-queries +``` + +### Test + +```bash +stackql-deploy test examples/aws/aws-vpc-webserver dev +``` + +### Teardown + +```bash +stackql-deploy teardown examples/aws/aws-vpc-webserver dev +``` + +### Debug mode + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev --log-level debug +``` + +## How It Works + +### Tag Taxonomy + +All taggable resources are tagged with three keys used for identification: + +| Tag Key | Value | Purpose | +|---------|-------|---------| +| `stackql:stack-name` | `{{ stack_name }}` | Identifies the stack | +| `stackql:stack-env` | `{{ stack_env }}` | Identifies the deployment environment | +| `stackql:resource-name` | `{{ resource_name }}` | Identifies the specific resource | + +These are defined once as `global_tags` in the manifest and merged into each resource's tags. + +### Exists Query Pattern + +The `exists` query uses a CTE that cross-references `awscc.tagging.tagged_resources` with the provider's `*_list_only` resource to confirm the resource actually exists (not just a stale tag record): + +```sql +/*+ exists */ +WITH tagged_resources AS ( + SELECT split_part(ResourceARN, '/', 2) as vpc_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:vpc"]' +), +vpcs AS ( + SELECT vpc_id FROM awscc.ec2.vpcs_list_only + WHERE region = '{{ region }}' +) +SELECT r.vpc_id +FROM vpcs r +INNER JOIN tagged_resources tr ON r.vpc_id = tr.vpc_id; +``` + +The returned field (e.g. `vpc_id`) is automatically captured as `this.vpc_id` and made available to all subsequent queries for that resource. + +### `to_aws_tag_filters` Filter + +The `global_tags` variable (a list of `Key`/`Value` pairs) is converted to the AWS TagFilters format using the `to_aws_tag_filters` custom Tera filter: + +``` +{{ global_tags | to_aws_tag_filters }} +``` + +Transforms `[{"Key":"k","Value":"v"}]` into `[{"Key":"k","Values":["v"]}]`. + +### Statecheck Pattern + +The `statecheck` query uses `this.` (captured from exists) to query the actual resource via Cloud Control and verify properties match the desired state, including tag comparison using `AWS_POLICY_EQUAL`: + +```sql +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM ( + SELECT AWS_POLICY_EQUAL(tags, '{{ vpc_tags }}') as test_tags + FROM awscc.ec2.vpcs + WHERE Identifier = '{{ this.vpc_id }}' + AND region = '{{ region }}' + AND cidr_block = '{{ vpc_cidr_block }}' +) t +WHERE test_tags = 1; +``` + +### Non-Taggable Resources + +Resources that don't support tags use alternative patterns: + +- **VPC Gateway Attachment**: `count`-based exists using `Identifier = 'IGW|{{ vpc_id }}'` +- **Subnet Route Table Association**: exists via `vw_subnet_route_table_associations` view, field captured as `this.subnet_route_table_assn_id` +- **Route**: `createorupdate` pattern (always attempts insert) + +### Troubleshooting + +Check failed Cloud Control requests: + +```sql +SELECT * FROM awscc.cloud_control.resource_requests +WHERE ResourceRequestStatusFilter = '{"OperationStatuses": ["FAILED"], "Operations": ["CREATE"]}' +AND region = 'us-east-1'; +``` diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql index dbdc6da..3ea1265 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_gateway.iql @@ -1,17 +1,32 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_inet_gateway"]}]' -AND ResourceTypeFilters = '["ec2:internet-gateway"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as internet_gateway_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:internet-gateway"]' +), +internet_gateways AS +( + SELECT internet_gateway_id + FROM awscc.ec2.internet_gateways_list_only + WHERE region = '{{ region }}' +) +SELECT r.internet_gateway_id +FROM internet_gateways r +INNER JOIN tagged_resources tr +ON r.internet_gateway_id = tr.internet_gateway_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT internet_gateway_id +SELECT +AWS_POLICY_EQUAL(tags, '{{ inet_gateway_tags }}') as test_tags FROM awscc.ec2.internet_gateways -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.internet_gateway_id }}' AND region = '{{ region }}' +AND test_tags = 1 ) t; /*+ create */ @@ -21,16 +36,13 @@ INSERT INTO awscc.ec2.internet_gateways ( ) SELECT '{{ inet_gateway_tags }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=5, retry_delay=5 */ -SELECT split_part(ResourceARN, '/', 2) as internet_gateway_id -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_inet_gateway"]}]' -AND ResourceTypeFilters = '["ec2:internet-gateway"]'; +/*+ exports */ +SELECT '{{ this.internet_gateway_id }}' as internet_gateway_id; /*+ delete */ DELETE FROM awscc.ec2.internet_gateways -WHERE data__Identifier = '{{ internet_gateway_id }}' +WHERE Identifier = '{{ internet_gateway_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql index 23d30a8..c3f78aa 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_gw_attachment.iql @@ -1,4 +1,10 @@ -/*+ createorupdate */ +/*+ exists */ +SELECT count(*) as count +FROM awscc.ec2.vpc_gateway_attachments +WHERE Identifier = 'IGW|{{ vpc_id }}' +AND region = '{{ region }}'; + +/*+ create */ INSERT INTO awscc.ec2.vpc_gateway_attachments ( InternetGatewayId, VpcId, @@ -7,15 +13,16 @@ INSERT INTO awscc.ec2.vpc_gateway_attachments ( SELECT '{{ internet_gateway_id }}', '{{ vpc_id }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=3, retry_delay=5 */ -SELECT attachment_type +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT count(*) as count FROM awscc.ec2.vpc_gateway_attachments WHERE Identifier = 'IGW|{{ vpc_id }}' AND region = '{{ region }}'; /*+ delete */ DELETE FROM awscc.ec2.vpc_gateway_attachments -WHERE data__Identifier = 'IGW|{{ vpc_id }}' +WHERE Identifier = 'IGW|{{ vpc_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql b/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql index c0c307d..e36ded7 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_inet_route.iql @@ -9,15 +9,16 @@ SELECT '0.0.0.0/0', '{{ internet_gateway_id }}', '{{ route_table_id }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=3, retry_delay=5 */ -SELECT route_table_id, cidr_block +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT count(*) as count FROM awscc.ec2.routes WHERE Identifier = '{{ route_table_id }}|0.0.0.0/0' AND region = '{{ region }}'; /*+ delete */ DELETE FROM awscc.ec2.routes -WHERE data__Identifier = '{{ route_table_id }}|0.0.0.0/0' +WHERE Identifier = '{{ route_table_id }}|0.0.0.0/0' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_route_table.iql b/examples/aws/aws-vpc-webserver/resources/example_route_table.iql index 4c3390d..c451d88 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_route_table.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_route_table.iql @@ -1,19 +1,33 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_route_table"]}]' -AND ResourceTypeFilters = '["ec2:route-table"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as route_table_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:route-table"]' +), +route_tables AS +( + SELECT route_table_id + FROM awscc.ec2.route_tables_list_only + WHERE region = '{{ region }}' +) +SELECT r.route_table_id +FROM route_tables r +INNER JOIN tagged_resources tr +ON r.route_table_id = tr.route_table_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT route_table_id, vpc_id +SELECT +AWS_POLICY_EQUAL(tags, '{{ route_table_tags }}') as test_tags FROM awscc.ec2.route_tables -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.route_table_id }}' AND region = '{{ region }}' ) t -WHERE vpc_id = '{{ vpc_id }}'; +WHERE test_tags = 1; /*+ create */ INSERT INTO awscc.ec2.route_tables ( @@ -24,16 +38,13 @@ INSERT INTO awscc.ec2.route_tables ( SELECT '{{ route_table_tags }}', '{{ vpc_id }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=5, retry_delay=5 */ -SELECT split_part(ResourceARN, '/', 2) as route_table_id -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_route_table"]}]' -AND ResourceTypeFilters = '["ec2:route-table"]'; +/*+ exports */ +SELECT '{{ this.route_table_id }}' as route_table_id; /*+ delete */ DELETE FROM awscc.ec2.route_tables -WHERE data__Identifier = '{{ route_table_id }}' +WHERE Identifier = '{{ route_table_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_security_group.iql b/examples/aws/aws-vpc-webserver/resources/example_security_group.iql index 0a631b7..b0596a4 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_security_group.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_security_group.iql @@ -1,20 +1,38 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_security_group"]}]' -AND ResourceTypeFilters = '["ec2:security-group"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as security_group_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:security-group"]' +), +security_groups AS +( + SELECT id as security_group_id + FROM awscc.ec2.security_groups_list_only + WHERE region = '{{ region }}' +) +SELECT r.security_group_id +FROM security_groups r +INNER JOIN tagged_resources tr +ON r.security_group_id = tr.security_group_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT id, group_name, group_description, vpc_id +SELECT +AWS_POLICY_EQUAL(tags, '{{ sg_tags }}') as test_tags, +AWS_POLICY_EQUAL(security_group_ingress, '{{ security_group_ingress }}') as test_ingress FROM awscc.ec2.security_groups -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.security_group_id }}' AND region = '{{ region }}' +AND group_name = '{{ group_name }}' +AND vpc_id = '{{ vpc_id }}' +AND group_description = '{{ group_description }}' ) t -WHERE group_name = '{{ group_name }}' -AND vpc_id = '{{ vpc_id }}'; +WHERE test_tags = 1 +AND test_ingress = 1; /*+ create */ INSERT INTO awscc.ec2.security_groups ( @@ -33,16 +51,13 @@ SELECT '{{ security_group_ingress }}', '{{ security_group_egress }}', '{{ sg_tags }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=5, retry_delay=5 */ -SELECT split_part(ResourceARN, '/', 2) as security_group_id -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_security_group"]}]' -AND ResourceTypeFilters = '["ec2:security-group"]'; +/*+ exports */ +SELECT '{{ this.security_group_id }}' as security_group_id; /*+ delete */ DELETE FROM awscc.ec2.security_groups -WHERE data__Identifier = '{{ security_group_id }}' +WHERE Identifier = '{{ security_group_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_subnet.iql b/examples/aws/aws-vpc-webserver/resources/example_subnet.iql index c336812..44b9236 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_subnet.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_subnet.iql @@ -1,20 +1,35 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_subnet"]}]' -AND ResourceTypeFilters = '["ec2:subnet"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as subnet_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:subnet"]' +), +subnets AS +( + SELECT subnet_id + FROM awscc.ec2.subnets_list_only + WHERE region = '{{ region }}' +) +SELECT r.subnet_id +FROM subnets r +INNER JOIN tagged_resources tr +ON r.subnet_id = tr.subnet_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT subnet_id, vpc_id, cidr_block, map_public_ip_on_launch +SELECT +AWS_POLICY_EQUAL(tags, '{{ subnet_tags }}') as test_tags FROM awscc.ec2.subnets -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.subnet_id }}' AND region = '{{ region }}' +AND cidr_block = '{{ subnet_cidr_block }}' +AND vpc_id = '{{ vpc_id }}' ) t -WHERE cidr_block = '{{ subnet_cidr_block }}' -AND vpc_id = '{{ vpc_id }}'; +WHERE test_tags = 1; /*+ create */ INSERT INTO awscc.ec2.subnets ( @@ -29,15 +44,16 @@ SELECT '{{ subnet_cidr_block }}', true, '{{ subnet_tags }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; /*+ exports, retries=5, retry_delay=5 */ SELECT subnet_id, availability_zone FROM awscc.ec2.subnets -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.subnet_id }}' AND region = '{{ region }}'; /*+ delete */ DELETE FROM awscc.ec2.subnets -WHERE data__Identifier = '{{ subnet_id }}' +WHERE Identifier = '{{ subnet_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql index aa51c5f..f54bc61 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql @@ -1,4 +1,13 @@ -/*+ createorupdate */ +/*+ exists */ +SELECT + id as subnet_route_table_assn_id +FROM awscc.ec2.vw_subnet_route_table_associations +WHERE + region = '{{ region }}' + AND route_table_id = '{{ route_table_id }}' + AND subnet_id = '{{ subnet_id }}'; + +/*+ create */ INSERT INTO awscc.ec2.subnet_route_table_associations ( RouteTableId, SubnetId, @@ -7,10 +16,14 @@ INSERT INTO awscc.ec2.subnet_route_table_associations ( SELECT '{{ route_table_id }}', '{{ subnet_id }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; + +/*+ exports */ +SELECT '{{ this.subnet_route_table_assn_id }}' as subnet_route_table_assn_id; -/*+ exports, retries=3, retry_delay=5 */ -SELECT route_table_id -FROM awscc.ec2.route_tables -WHERE Identifier = '{{ route_table_id }}' -AND region = '{{ region }}'; +/*+ delete */ +DELETE FROM awscc.ec2.subnet_route_table_associations +WHERE +Identifier = '{{ subnet_route_table_assn_id }}' AND +region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-vpc-webserver/resources/example_vpc.iql b/examples/aws/aws-vpc-webserver/resources/example_vpc.iql index e842e57..3a26f19 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_vpc.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_vpc.iql @@ -1,19 +1,34 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' -AND ResourceTypeFilters = '["ec2:vpc"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as vpc_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:vpc"]' +), +vpcs AS +( + SELECT vpc_id + FROM awscc.ec2.vpcs_list_only + WHERE region = '{{ region }}' +) +SELECT r.vpc_id +FROM vpcs r +INNER JOIN tagged_resources tr +ON r.vpc_id = tr.vpc_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT vpc_id, cidr_block +SELECT +AWS_POLICY_EQUAL(tags, '{{ vpc_tags }}') as test_tags FROM awscc.ec2.vpcs -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.vpc_id }}' AND region = '{{ region }}' +AND cidr_block = '{{ vpc_cidr_block }}' ) t -WHERE cidr_block = '{{ vpc_cidr_block }}'; +WHERE test_tags = 1; /*+ create */ INSERT INTO awscc.ec2.vpcs ( @@ -28,17 +43,14 @@ SELECT '{{ vpc_tags }}', true, true, - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=5, retry_delay=5 */ -SELECT split_part(ResourceARN, '/', 2) as vpc_id, -'{{ vpc_cidr_block }}' as vpc_cidr_block -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' -AND ResourceTypeFilters = '["ec2:vpc"]'; +/*+ exports */ +SELECT '{{ this.vpc_id }}' as vpc_id, +'{{ vpc_cidr_block }}' as vpc_cidr_block; /*+ delete */ DELETE FROM awscc.ec2.vpcs -WHERE data__Identifier = '{{ vpc_id }}' +WHERE Identifier = '{{ vpc_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/example_web_server.iql b/examples/aws/aws-vpc-webserver/resources/example_web_server.iql index f5165ac..8df6532 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_web_server.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_web_server.iql @@ -1,20 +1,39 @@ /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_web_server"]}]' -AND ResourceTypeFilters = '["ec2:instance"]'; +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as instance_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:instance"]' +), +instances AS +( + SELECT instance_id + FROM awscc.ec2.instances_list_only + WHERE region = '{{ region }}' +) +SELECT r.instance_id +FROM instances r +INNER JOIN tagged_resources tr +ON r.instance_id = tr.instance_id; /*+ statecheck, retries=10, retry_delay=10 */ SELECT COUNT(*) as count FROM ( -SELECT instance_id, instance_type, subnet_id, image_id +SELECT +AWS_POLICY_EQUAL(security_group_ids, '{{ sg_ids }}') as test_security_group_ids, +AWS_POLICY_EQUAL(tags, '{{ instance_tags }}') as test_instance_tags FROM awscc.ec2.instances -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.instance_id }}' AND region = '{{ region }}' +AND instance_type = '{{ instance_type }}' +AND subnet_id = '{{ instance_subnet_id }}' +AND image_id = '{{ ami_id }}' +AND user_data = '{{ user_data | base64_encode }}' ) t -WHERE instance_type = '{{ instance_type }}' -AND subnet_id = '{{ instance_subnet_id }}'; +WHERE test_security_group_ids = 1 +AND test_instance_tags = 1; /*+ create */ INSERT INTO awscc.ec2.instances ( @@ -33,16 +52,13 @@ SELECT '{{ sg_ids }}', '{{ user_data | base64_encode }}', '{{ instance_tags }}', - '{{ region }}'; + '{{ region }}' +RETURNING *; -/*+ exports, retries=10, retry_delay=10 */ -SELECT split_part(ResourceARN, '/', 2) as instance_id -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_web_server"]}]' -AND ResourceTypeFilters = '["ec2:instance"]'; +/*+ exports */ +SELECT '{{ this.instance_id }}' as instance_id; /*+ delete */ DELETE FROM awscc.ec2.instances -WHERE data__Identifier = '{{ instance_id }}' +WHERE Identifier = '{{ instance_id }}' AND region = '{{ region }}'; diff --git a/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql b/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql index 8db07c8..b75c749 100644 --- a/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql +++ b/examples/aws/aws-vpc-webserver/resources/get_web_server_url.iql @@ -1,6 +1,5 @@ /*+ exports, retries=5, retry_delay=10 */ -SELECT public_dns_name, -public_dns_name as web_server_url +SELECT public_dns_name FROM awscc.ec2.instances WHERE Identifier = '{{ instance_id }}' -AND region = '{{ region }}'; +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/aws/aws-vpc-webserver/stackql_manifest.yml b/examples/aws/aws-vpc-webserver/stackql_manifest.yml index a1b1c4c..e7724ee 100644 --- a/examples/aws/aws-vpc-webserver/stackql_manifest.yml +++ b/examples/aws/aws-vpc-webserver/stackql_manifest.yml @@ -75,6 +75,8 @@ resources: - route_table_id - name: example_subnet_rt_assn props: [] + exports: + - subnet_route_table_assn_id - name: example_inet_route props: [] - name: example_security_group @@ -101,13 +103,16 @@ resources: FromPort: 22 ToPort: 22 - name: security_group_egress - value: '[{"IpProtocol":"-1","CidrIp":"0.0.0.0/0","Description":"Allow all outbound traffic"}]' + value: + - CidrIp: "0.0.0.0/0" + Description: "Allow all outbound traffic" + FromPort: -1 + ToPort: -1 + IpProtocol: "-1" exports: - security_group_id - name: example_web_server props: - - name: instance_name - value: "{{ stack_name }}-{{ stack_env }}-instance" - name: ami_id value: ami-05024c2628f651b80 - name: instance_type @@ -125,7 +130,7 @@ resources: systemctl start httpd systemctl enable httpd echo 'StackQL on AWS' > /var/www/html/index.html - echo '
StackQL Logo

Hello, stackql-deploy on AWS!

' >> /var/www/html/index.html + echo '
StackQL Logo

Hello, stackql-deploy on AWS!

' >> /var/www/html/index.html - name: instance_tags value: - Key: Name @@ -138,4 +143,3 @@ resources: props: [] exports: - public_dns_name - - web_server_url diff --git a/examples/aws/patch-doc-test/README.md b/examples/aws/patch-doc-test/README.md new file mode 100644 index 0000000..6d20103 --- /dev/null +++ b/examples/aws/patch-doc-test/README.md @@ -0,0 +1,90 @@ +# Patch Document Test (Cloud Control UPDATE) + +Demonstrates the AWS Cloud Control API update workflow using `PatchDocument` with an S3 bucket. The stack deploys a bucket with versioning enabled, and subsequent builds detect configuration drift and apply updates via the Cloud Control `UpdateResource` action. + +## What This Tests + +1. **Create** - Deploy an S3 bucket with `VersioningConfiguration: Enabled` +2. **Update** - Change the versioning config in the manifest (e.g. to `Suspended`), re-run build +3. **Statecheck** detects the drift (current state != desired state) +4. **PatchDocument** is generated via the `generate_patch_document` filter and applied +5. **Post-update statecheck** confirms the update was applied + +## The PatchDocument Pattern + +The `update` anchor uses the `generate_patch_document` Tera filter to transform manifest property values into a Cloud Control API `PatchDocument`: + +```sql +/*+ update */ +UPDATE awscc.s3.buckets +SET PatchDocument = string('{{ { + "VersioningConfiguration": bucket1_versioning_config, + "Tags": bucket1_tags + } | generate_patch_document }}') +WHERE Identifier = '{{ bucket1_name }}' +AND region = '{{ region }}'; +``` + +This generates a JSON Patch array like: + +```json +[ + {"op": "add", "path": "/VersioningConfiguration", "value": {"Status": "Suspended"}}, + {"op": "add", "path": "/Tags", "value": [...]} +] +``` + +## Prerequisites + +- `stackql-deploy` installed ([releases](https://github.com/stackql/stackql-deploy-rs/releases)) +- AWS credentials set: + + ```bash + export AWS_ACCESS_KEY_ID=your_access_key + export AWS_SECRET_ACCESS_KEY=your_secret_key + export AWS_REGION=us-east-1 + ``` + +## Usage + +### Deploy (create bucket with versioning Enabled) + +```bash +stackql-deploy build examples/aws/patch-doc-test dev +``` + +### Update versioning config + +Edit `stackql_manifest.yml` and change: + +```yaml +- name: bucket1_versioning_config + value: + Status: Suspended # was: Enabled +``` + +Then re-deploy: + +```bash +stackql-deploy build examples/aws/patch-doc-test dev +``` + +The build will detect the drift, generate a PatchDocument, and apply the update. + +### Test + +```bash +stackql-deploy test examples/aws/patch-doc-test dev +``` + +### Teardown + +```bash +stackql-deploy teardown examples/aws/patch-doc-test dev +``` + +### Debug mode + +```bash +stackql-deploy build examples/aws/patch-doc-test dev --log-level debug +``` diff --git a/examples/aws/patch-doc-test/resources/bucket1.iql b/examples/aws/patch-doc-test/resources/bucket1.iql new file mode 100644 index 0000000..a20ad78 --- /dev/null +++ b/examples/aws/patch-doc-test/resources/bucket1.iql @@ -0,0 +1,50 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM awscc.s3.buckets_list_only +WHERE region = '{{ region }}' +AND bucket_name = '{{ bucket1_name }}'; + +/*+ create */ +INSERT INTO awscc.s3.buckets ( + BucketName, + VersioningConfiguration, + Tags, + region +) +SELECT + '{{ bucket1_name }}', + '{{ bucket1_versioning_config }}', + '{{ bucket1_tags }}', + '{{ region }}' +RETURNING *; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +JSON_EQUAL(versioning_configuration, '{{ bucket1_versioning_config }}') as test_versioning_config +FROM awscc.s3.buckets +WHERE Identifier = '{{ bucket1_name }}' +AND region = '{{ region }}' +) t +WHERE test_versioning_config = 1; + +/*+ update */ +UPDATE awscc.s3.buckets +SET PatchDocument = string('{{ { + "VersioningConfiguration": bucket1_versioning_config, + "Tags": bucket1_tags + } | generate_patch_document }}') +WHERE Identifier = '{{ bucket1_name }}' +AND region = '{{ region }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT bucket_name as bucket1_name, arn as bucket1_arn +FROM awscc.s3.buckets +WHERE Identifier = '{{ bucket1_name }}' +AND region = '{{ region }}'; + +/*+ delete */ +DELETE FROM awscc.s3.buckets +WHERE Identifier = '{{ bucket1_name }}' +AND region = '{{ region }}'; diff --git a/examples/aws/patch-doc-test/stackql_manifest.yml b/examples/aws/patch-doc-test/stackql_manifest.yml new file mode 100644 index 0000000..c5a52b9 --- /dev/null +++ b/examples/aws/patch-doc-test/stackql_manifest.yml @@ -0,0 +1,34 @@ +version: 1 +name: "patch-doc-test" +description: Demonstrates the Cloud Control API update (PatchDocument) workflow using an S3 bucket - deploy, modify versioning config, re-deploy to apply the update. +providers: + - awscc +globals: + - name: region + description: aws region + value: "{{ AWS_REGION }}" + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{{ stack_name }}" + - Key: 'stackql:stack-env' + Value: "{{ stack_env }}" + - Key: 'stackql:resource-name' + Value: "{{ resource_name }}" +resources: + - name: bucket1 + props: + - name: bucket1_name + value: "{{ stack_name }}-{{ stack_env }}-bucket1" + - name: bucket1_versioning_config + value: + Status: Enabled + - name: bucket1_tags + merge: + - global_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-bucket1" + exports: + - bucket1_name + - bucket1_arn diff --git a/examples/azure/azure-stack/README.md b/examples/azure/azure-stack/README.md index dc2feac..f59d2fc 100644 --- a/examples/azure/azure-stack/README.md +++ b/examples/azure/azure-stack/README.md @@ -6,7 +6,7 @@ see the following links for more information on `stackql`, `stackql-deploy` and - [`azure` provider docs](https://stackql.io/registry/azure) - [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) +- [`stackql-deploy` on crates.io](https://crates.io/crates/stackql-deploy) - [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) ## Overview diff --git a/examples/azure/azure-stack/resources/hello-stackql.html b/examples/azure/azure-stack/resources/hello-stackql.html index 5454a02..698d675 100644 --- a/examples/azure/azure-stack/resources/hello-stackql.html +++ b/examples/azure/azure-stack/resources/hello-stackql.html @@ -1,41 +1,41 @@ - - - - - - StackQL on Azure - - - -
- - StackQL Logo - -

Hello, stackql-deploy on Azure!

-
- - + + + + + + StackQL on Azure + + + +
+ + StackQL Logo + +

Hello, stackql-deploy on Azure!

+
+ + diff --git a/examples/databricks/serverless/resources/aws/iam/roles.iql b/examples/databricks/serverless/resources/aws/iam/roles.iql index 4e78a9d..27258c2 100644 --- a/examples/databricks/serverless/resources/aws/iam/roles.iql +++ b/examples/databricks/serverless/resources/aws/iam/roles.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM awscc.iam.roles WHERE region = 'us-east-1' AND -Identifier = '{{ role_name }}' -; +Identifier = '{{ role_name }}'; /*+ create */ INSERT INTO awscc.iam.roles ( diff --git a/examples/databricks/serverless/resources/aws/s3/bucket_policies.iql b/examples/databricks/serverless/resources/aws/s3/bucket_policies.iql index 21dcc12..0a4e4c9 100644 --- a/examples/databricks/serverless/resources/aws/s3/bucket_policies.iql +++ b/examples/databricks/serverless/resources/aws/s3/bucket_policies.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM awscc.s3.bucket_policies WHERE region = '{{ region }}' AND -Identifier = '{{ bucket_name }}' -; +Identifier = '{{ bucket_name }}'; /*+ create */ INSERT INTO awscc.s3.bucket_policies ( diff --git a/examples/databricks/serverless/resources/aws/s3/buckets.iql b/examples/databricks/serverless/resources/aws/s3/buckets.iql index b50a316..28568db 100644 --- a/examples/databricks/serverless/resources/aws/s3/buckets.iql +++ b/examples/databricks/serverless/resources/aws/s3/buckets.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM awscc.s3.buckets WHERE region = '{{ region }}' AND -Identifier = '{{ bucket_name }}' -; +Identifier = '{{ bucket_name }}'; /*+ create */ INSERT INTO awscc.s3.buckets ( diff --git a/examples/databricks/serverless/resources/databricks_account/account_groups.iql b/examples/databricks/serverless/resources/databricks_account/account_groups.iql index 4c3f921..820e506 100644 --- a/examples/databricks/serverless/resources/databricks_account/account_groups.iql +++ b/examples/databricks/serverless/resources/databricks_account/account_groups.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM databricks_account.iam.account_groups WHERE account_id = '{{ account_id }}' -AND filter = 'displayName Eq "{{ displayName }}"' -; +AND filter = 'displayName Eq "{{ displayName }}"'; /*+ create */ INSERT INTO databricks_account.iam.account_groups ( @@ -39,8 +38,8 @@ SET operations = '{{ operations }}', schemas = '{{ schemas }}' WHERE -account_id = '{{ account_id }}' --required -AND id = '{{ id }}' --required; +account_id = '{{ account_id }}' +AND id = '{{ id }}'; /*+ statecheck, retries=5, retry_delay=10 */ SELECT count(*) as count @@ -50,15 +49,14 @@ id = '{{ id }}' AND members = '{{ members }}' AND meta = '{{ meta }}' AND roles = '{{ roles }}' AND -account_id = '{{ account_id }}' -- required +account_id = '{{ account_id }}' AND attributes = '{{ attributes }}' AND count = '{{ count }}' AND excluded_attributes = '{{ excluded_attributes }}' AND filter = '{{ filter }}' AND sort_by = '{{ sort_by }}' AND sort_order = '{{ sort_order }}' -AND start_index = '{{ start_index }}' -; +AND start_index = '{{ start_index }}'; /*+ exports */ SELECT id, @@ -66,18 +64,16 @@ members, meta, roles FROM databricks_account.iam.account_groups -WHERE account_id = '{{ account_id }}' -- required +WHERE account_id = '{{ account_id }}' AND attributes = '{{ attributes }}' AND count = '{{ count }}' AND excluded_attributes = '{{ excluded_attributes }}' AND filter = '{{ filter }}' AND sort_by = '{{ sort_by }}' AND sort_order = '{{ sort_order }}' -AND start_index = '{{ start_index }}' -; +AND start_index = '{{ start_index }}'; /*+ delete */ DELETE FROM databricks_account.iam.account_groups -WHERE account_id = '{{ account_id }}' --required -AND id = '{{ id }}' --required -; \ No newline at end of file +WHERE account_id = '{{ account_id }}' +AND id = '{{ id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_account/credentials.iql b/examples/databricks/serverless/resources/databricks_account/credentials.iql index 99de22f..9e1f864 100644 --- a/examples/databricks/serverless/resources/databricks_account/credentials.iql +++ b/examples/databricks/serverless/resources/databricks_account/credentials.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM databricks_account.provisioning.credentials WHERE account_id = '{{ account_id }}' -AND credentials_name = '{{ credentials_name }}' -; +AND credentials_name = '{{ credentials_name }}' ; /*+ create */ INSERT INTO databricks_account.provisioning.credentials ( @@ -32,11 +31,9 @@ credentials_id, JSON_EXTRACT(aws_credentials, '$.sts_role.external_id') as external_id FROM databricks_account.provisioning.credentials WHERE account_id = '{{ account_id }}' -AND credentials_name = '{{ credentials_name }}' -; +AND credentials_name = '{{ credentials_name }}'; /*+ delete */ DELETE FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ account_id }}' --required -AND credentials_id = '{{ credentials_id }}' --required -; \ No newline at end of file +WHERE account_id = '{{ account_id }}' +AND credentials_id = '{{ credentials_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_account/storage.iql b/examples/databricks/serverless/resources/databricks_account/storage.iql index f32f66d..3f7b5bf 100644 --- a/examples/databricks/serverless/resources/databricks_account/storage.iql +++ b/examples/databricks/serverless/resources/databricks_account/storage.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM databricks_account.provisioning.storage WHERE account_id = '{{ account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' -; +AND storage_configuration_name = '{{ storage_configuration_name }}'; /*+ create */ INSERT INTO databricks_account.provisioning.storage ( @@ -26,5 +25,4 @@ AND JSON_EXTRACT(root_bucket_info, '$.bucket_name') = '{{ root_bucket_info | fro /*+ delete */ DELETE FROM databricks_account.provisioning.storage WHERE account_id = '{{ account_id }}' -AND storage_configuration_id = '{{ storage_configuration_id }}' -; \ No newline at end of file +AND storage_configuration_id = '{{ storage_configuration_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_account/workspaces.iql b/examples/databricks/serverless/resources/databricks_account/workspaces.iql index db82e1a..5ddfc1f 100644 --- a/examples/databricks/serverless/resources/databricks_account/workspaces.iql +++ b/examples/databricks/serverless/resources/databricks_account/workspaces.iql @@ -2,8 +2,7 @@ SELECT count(*) as count FROM databricks_account.provisioning.workspaces WHERE account_id = '{{ account_id }}' -AND workspace_name = '{{ workspace_name }}' -; +AND workspace_name = '{{ workspace_name }}'; /*+ create */ INSERT INTO databricks_account.provisioning.workspaces ( @@ -38,8 +37,7 @@ AND workspace_id IN ( WHERE account_id = '{{ account_id }}' AND workspace_name = '{{ workspace_name }}' ) -AND update_mask = 'credentials_id,storage_configuration_id,compute_mode,pricing_tier' -; +AND update_mask = 'credentials_id,storage_configuration_id,compute_mode,pricing_tier'; /*+ statecheck, retries=5, retry_delay=10 */ SELECT count(*) as count @@ -50,8 +48,7 @@ storage_configuration_id = '{{ storage_configuration_id }}' AND workspace_name = '{{ workspace_name }}' AND aws_region = '{{ aws_region }}' AND pricing_tier = '{{ pricing_tier }}' AND -account_id = '{{ account_id }}' -; +account_id = '{{ account_id }}'; /*+ exports */ SELECT @@ -62,11 +59,9 @@ workspace_status, 'https://' || deployment_name || '.cloud.databricks.com' AS workspace_url FROM databricks_account.provisioning.workspaces WHERE account_id = '{{ account_id }}' -AND workspace_name = '{{ workspace_name }}' -; +AND workspace_name = '{{ workspace_name }}'; /*+ delete */ DELETE FROM databricks_account.provisioning.workspaces WHERE account_id = '{{ account_id }}' -AND workspace_id = '{{ workspace_id }}' -; \ No newline at end of file +AND workspace_id = '{{ workspace_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/stackql_manifest.yml b/examples/databricks/serverless/stackql_manifest.yml index d9ff5a2..d4b67a2 100644 --- a/examples/databricks/serverless/stackql_manifest.yml +++ b/examples/databricks/serverless/stackql_manifest.yml @@ -276,14 +276,14 @@ resources: - workspace_status - workspace_url - - name: workspace_admins_group - file: databricks_account/account_groups.iql - props: - - name: display_name - value: "{{ stack_name }}-{{ stack_env }}-workspace-admins" - exports: - - id - - display_name + # - name: workspace_admins_group + # file: databricks_account/account_groups.iql + # props: + # - name: display_name + # value: "{{ stack_name }}-{{ stack_env }}-workspace-admins" + # exports: + # - id + # - display_name # - name: databricks_account/get_users # type: query diff --git a/examples/google/k8s-the-hard-way/README.md b/examples/google/k8s-the-hard-way/README.md index 4ef7189..2d61772 100644 --- a/examples/google/k8s-the-hard-way/README.md +++ b/examples/google/k8s-the-hard-way/README.md @@ -4,7 +4,7 @@ Based upon the [Kubernetes the Hard Way](https://github.com/kelseyhightower/kube ## about `stackql-deploy` -[`stackql-deploy`](https://pypi.org/project/stackql-deploy/) is a multi cloud deployment automation and testing framework which is an alternative to Terraform or similar IaC tools. `stackql-deploy` uses a declarative model/ELT based approach to cloud resource deployment (inspired by [`dbt`](https://www.getdbt.com/)). Advantages of `stackql-deploy` include: +[`stackql-deploy`](https://crates.io/crates/stackql-deploy) is a multi cloud deployment automation and testing framework which is an alternative to Terraform or similar IaC tools. `stackql-deploy` uses a declarative model/ELT based approach to cloud resource deployment (inspired by [`dbt`](https://www.getdbt.com/)). Advantages of `stackql-deploy` include: - declarative framework - no state file (state is determined from the target environment) diff --git a/examples/google/load-balanced-vms/README.md b/examples/google/load-balanced-vms/README.md index 486de76..b7174e7 100644 --- a/examples/google/load-balanced-vms/README.md +++ b/examples/google/load-balanced-vms/README.md @@ -1,72 +1,72 @@ -# example `stackql-deploy` stack - -Based upon the [__terraform-google-load-balanced-vms__](https://github.com/GoogleCloudPlatform/terraform-google-load-balanced-vms) project. - -![load balanced vms](https://raw.githubusercontent.com/GoogleCloudPlatform/terraform-google-load-balanced-vms/c3e9669856df44a7b7399a7119eda3ae9ce5a2fa/assets/load_balanced_vms_v1.svg) - -## about `stackql-deploy` - -[`stackql-deploy`](https://pypi.org/project/stackql-deploy/) is a multi cloud deployment automation and testing framework which is an alternative to Terraform or similar IaC tools. `stackql-deploy` uses a declarative model/ELT based approach to cloud resource deployment (inspired by [`dbt`](https://www.getdbt.com/)). Advantages of `stackql-deploy` include: - -- declarative framework -- no state file (state is determined from the target environment) -- multi-cloud/omni-cloud ready -- includes resource tests which can include secure config tests - -## instaling `stackql-deploy` - -`stackql-deploy` is installed as a python based CLI using... - -```bash -pip install stackql-deploy -# or -pip3 install stackql-deploy -``` -> __Note for macOS users__ -> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: -> ```bash -> python3 -m venv myenv -> source myenv/bin/activate -> pip install stackql-deploy -> ``` - -## getting started with `stackql-deploy` - -Once installed, use the `init` command to scaffold a sample project directory to get started: - -```bash -stackql-deploy init load-balanced-vms -``` - -this will create a directory named `load-balanced-vms` which can be updated for your stack, as you can see in this project. - -## deploying using `stackql-deploy` - -```bash -export GOOGLE_CREDENTIALS=$(cat ./testcreds/stackql-deploy-project-demo-service-account.json) -# deploy a stack -stackql-deploy build \ -examples\google\load-balanced-vms \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run \ ---log-level DEBUG - -# test a stack -stackql-deploy test \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run - -# teardown a stack -stackql-deploy teardown \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run -``` - - - +# example `stackql-deploy` stack + +Based upon the [__terraform-google-load-balanced-vms__](https://github.com/GoogleCloudPlatform/terraform-google-load-balanced-vms) project. + +![load balanced vms](https://raw.githubusercontent.com/GoogleCloudPlatform/terraform-google-load-balanced-vms/c3e9669856df44a7b7399a7119eda3ae9ce5a2fa/assets/load_balanced_vms_v1.svg) + +## about `stackql-deploy` + +[`stackql-deploy`](https://crates.io/crates/stackql-deploy) is a multi cloud deployment automation and testing framework which is an alternative to Terraform or similar IaC tools. `stackql-deploy` uses a declarative model/ELT based approach to cloud resource deployment (inspired by [`dbt`](https://www.getdbt.com/)). Advantages of `stackql-deploy` include: + +- declarative framework +- no state file (state is determined from the target environment) +- multi-cloud/omni-cloud ready +- includes resource tests which can include secure config tests + +## instaling `stackql-deploy` + +`stackql-deploy` is installed as a python based CLI using... + +```bash +pip install stackql-deploy +# or +pip3 install stackql-deploy +``` +> __Note for macOS users__ +> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: +> ```bash +> python3 -m venv myenv +> source myenv/bin/activate +> pip install stackql-deploy +> ``` + +## getting started with `stackql-deploy` + +Once installed, use the `init` command to scaffold a sample project directory to get started: + +```bash +stackql-deploy init load-balanced-vms +``` + +this will create a directory named `load-balanced-vms` which can be updated for your stack, as you can see in this project. + +## deploying using `stackql-deploy` + +```bash +export GOOGLE_CREDENTIALS=$(cat ./testcreds/stackql-deploy-project-demo-service-account.json) +# deploy a stack +stackql-deploy build \ +examples\google\load-balanced-vms \ +dev \ +-e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ +--dry-run \ +--log-level DEBUG + +# test a stack +stackql-deploy test \ +examples/google/k8s-the-hard-way \ +dev \ +-e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ +--dry-run + +# teardown a stack +stackql-deploy teardown \ +examples/google/k8s-the-hard-way \ +dev \ +-e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ +--dry-run +``` + + + stackql-deploy-project \ No newline at end of file diff --git a/examples/snowflake/entitlements/README.md b/examples/snowflake/entitlements/README.md index 78215d4..0c01cf6 100644 --- a/examples/snowflake/entitlements/README.md +++ b/examples/snowflake/entitlements/README.md @@ -6,7 +6,7 @@ see the following links for more information on `stackql`, `stackql-deploy` and - [`snowflake` provider docs](https://stackql.io/registry/snowflake) - [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) +- [`stackql-deploy` on crates.io](https://crates.io/crates/stackql-deploy) - [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) ## Overview diff --git a/src/commands/base.rs b/src/commands/base.rs index 8caf6e1..a469435 100644 --- a/src/commands/base.rs +++ b/src/commands/base.rs @@ -18,9 +18,8 @@ use crate::core::templating::{self, ParsedQuery}; use crate::core::utils::{ catch_error_and_exit, check_exports_as_statecheck_proxy, check_short_circuit, export_vars, flatten_returning_row, has_returning_clause, perform_retries, perform_retries_with_fields, - pull_providers, - run_callback_poll, run_ext_script, run_stackql_command, run_stackql_dml_returning, - run_stackql_query, show_query, + pull_providers, run_callback_poll, run_ext_script, run_stackql_command, + run_stackql_dml_returning, run_stackql_query, show_query, }; use crate::resource::manifest::{Manifest, Resource}; use crate::resource::validation::validate_manifest; @@ -181,6 +180,18 @@ impl CommandRunner { templating::render_query(&self.engine, resource_name, anchor, template, full_context) } + /// Try to render a query template, returning None if variables are missing. + /// Used for deferred rendering where this.* fields may not yet be available. + pub fn try_render_query( + &self, + resource_name: &str, + anchor: &str, + template: &str, + full_context: &HashMap, + ) -> Option { + templating::try_render_query(&self.engine, resource_name, anchor, template, full_context) + } + /// Check if a resource exists using the exists query. #[allow(clippy::too_many_arguments)] /// Check if a resource exists by running the exists query. @@ -683,6 +694,27 @@ impl CommandRunner { if exports.is_empty() { if ignore_missing_exports { + // During teardown, set all expected exports to so + // downstream queries can still render (the resource may + // already be partially deleted). + let mut fallback = HashMap::new(); + for item in expected_exports { + if let Some(s) = item.as_str() { + fallback.insert(s.to_string(), "".to_string()); + } else if let Some(map) = item.as_mapping() { + for (_, val) in map { + if let Some(v) = val.as_str() { + fallback.insert(v.to_string(), "".to_string()); + } + } + } + } + export_vars( + &mut self.global_context, + &resource.name, + &fallback, + protected_exports, + ); return; } show_query(true, exports_query); diff --git a/src/commands/build.rs b/src/commands/build.rs index 39c98ba..50e6ebb 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -10,7 +10,7 @@ use std::collections::HashMap; use std::time::Instant; use clap::{Arg, ArgMatches, Command}; -use log::info; +use log::{debug, info}; use crate::commands::base::CommandRunner; use crate::commands::common_args::{ @@ -21,7 +21,7 @@ use crate::core::config::get_resource_type; use crate::core::utils::catch_error_and_exit; use crate::utils::connection::create_client; use crate::utils::display::{print_unicode_box, BorderColor}; -use crate::utils::server::check_and_start_server; +use crate::utils::server::{check_and_start_server, stop_local_server}; /// Defines the `build` command for the CLI application. pub fn command() -> Command { @@ -94,14 +94,15 @@ pub fn execute(matches: &ArgMatches) { } else { println!("build complete"); } + + stop_local_server(); } /// Render the statecheck query template with the given context. macro_rules! render_statecheck { ($runner:expr, $resource_queries:expr, $resource:expr, $ctx:expr) => { $resource_queries.get("statecheck").map(|q| { - let rendered = - $runner.render_query(&$resource.name, "statecheck", &q.template, $ctx); + let rendered = $runner.render_query(&$resource.name, "statecheck", &q.template, $ctx); (rendered, q.options.clone()) }) }; @@ -110,9 +111,18 @@ macro_rules! render_statecheck { /// Render the exports query template with the given context. macro_rules! render_exports { ($runner:expr, $resource_queries:expr, $resource:expr, $ctx:expr) => { - $resource_queries - .get("exports") - .map(|q| $runner.render_query(&$resource.name, "exports", &q.template, $ctx)) + $resource_queries.get("exports").and_then(|q| { + match $runner.try_render_query(&$resource.name, "exports", &q.template, $ctx) { + Some(rendered) => Some(rendered), + None => { + debug!( + "exports query for [{}] deferred (unresolved variables)", + $resource.name + ); + None + } + } + }) }; } @@ -311,7 +321,8 @@ fn run_build( } } else { // Use statecheck as exists check (render with current ctx) - let statecheck_query = render_statecheck!(runner, resource_queries, resource, &full_context); + let statecheck_query = + render_statecheck!(runner, resource_queries, resource, &full_context); let sq = statecheck_query.as_ref().unwrap(); let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( @@ -335,7 +346,8 @@ fn run_build( is_correct_state = true; } else { // Re-render statecheck with (possibly enriched) context - let statecheck_query = render_statecheck!(runner, resource_queries, resource, &full_context); + let statecheck_query = + render_statecheck!(runner, resource_queries, resource, &full_context); let sq = statecheck_query.as_ref().unwrap(); let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( @@ -349,8 +361,12 @@ fn run_build( } } - // Re-render exports with enriched context - exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + // Re-render exports with enriched context (only if exists + // captured fields; otherwise defer until post-create). + if resource_exists { + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + } } else if exports_query_str.is_some() { // Flow 2: Optimized flow using exports as proxy info!( @@ -396,14 +412,18 @@ fn run_build( if fields.is_some() { apply_exists_fields(fields, &resource.name, &mut full_context); - exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); } + // Always try to render exports after fallback exists + // (needed for count-based exists where exports doesn't + // depend on this.* fields). + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); } else { resource_exists = false; } } } else if let Some(ref eq) = exists_query { - // Flow 3: Basic flow with only exists query + // Flow 3: exists query only (no statecheck rendered yet) let eq_opts = resource_queries.get("exists").unwrap(); let (exists, fields) = runner.check_if_resource_exists( resource, @@ -415,10 +435,39 @@ fn run_build( false, ); resource_exists = exists; + let has_fields = fields.is_some(); - if fields.is_some() { + if has_fields { apply_exists_fields(fields, &resource.name, &mut full_context); - exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + } + // Always try to render exports after exists + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + + // Determine correctness based on what's available: + if exists { + if let Some(ref eq_str) = exports_query_str { + // Use exports as statecheck proxy + info!( + "using exports query as statecheck proxy for [{}]", + resource.name + ); + let (state, proxy) = runner.check_state_using_exports_proxy( + resource, + eq_str, + exports_retries, + exports_retry_delay, + dry_run, + show_queries, + ); + is_correct_state = state; + if proxy.is_some() { + exports_result_from_proxy = proxy; + } + } else { + // No statecheck and no exports: exists IS the statecheck + is_correct_state = true; + } } } else { catch_error_and_exit( @@ -443,7 +492,48 @@ fn run_build( // Capture RETURNING * result. if let Some(ref row) = returning_row { + debug!("RETURNING payload for [{}]: {:?}", resource.name, row); runner.store_callback_data(&resource.name, row); + + // Apply return_vals mappings from manifest. + let mappings = resource.get_return_val_mappings("create"); + if !mappings.is_empty() { + let mut fields = HashMap::new(); + for (src, tgt) in &mappings { + if let Some(val) = row.get(src.as_str()) { + if !val.is_empty() && val != "null" { + info!( + "RETURNING [{}] for [{}] captured as [this.{}] = [{}]", + src, resource.name, tgt, val + ); + fields.insert(tgt.clone(), val.clone()); + } else { + catch_error_and_exit(&format!( + "return_vals for [{}]: field [{}] in RETURNING result \ + is null or empty.", + resource.name, src + )); + } + } else { + catch_error_and_exit(&format!( + "return_vals for [{}]: expected field [{}] not found in \ + RETURNING result. Ensure the create query includes \ + 'RETURNING *' or 'RETURNING {}'.", + resource.name, src, src + )); + } + } + apply_exists_fields(Some(fields), &resource.name, &mut full_context); + // Re-render exports/statecheck with the captured values + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + } + } else if !resource.get_return_val_mappings("create").is_empty() { + catch_error_and_exit(&format!( + "return_vals specified for [{}] create but no RETURNING data received. \ + Ensure the create query includes 'RETURNING *'.", + resource.name + )); } // Run callback:create block if present. @@ -496,7 +586,52 @@ fn run_build( // Capture RETURNING * result. if let Some(ref row) = returning_row { + debug!( + "RETURNING payload for [{}] (update): {:?}", + resource.name, row + ); runner.store_callback_data(&resource.name, row); + + // Apply return_vals mappings from manifest. + let mappings = resource.get_return_val_mappings("update"); + if !mappings.is_empty() { + let mut fields = HashMap::new(); + for (src, tgt) in &mappings { + if let Some(val) = row.get(src.as_str()) { + if !val.is_empty() && val != "null" { + info!( + "RETURNING [{}] for [{}] captured as [this.{}] = [{}]", + src, resource.name, tgt, val + ); + fields.insert(tgt.clone(), val.clone()); + } else { + catch_error_and_exit(&format!( + "return_vals for [{}]: field [{}] in RETURNING result \ + is null or empty.", + resource.name, src + )); + } + } else { + catch_error_and_exit(&format!( + "return_vals for [{}]: expected field [{}] not found in \ + RETURNING result. Ensure the update query includes \ + 'RETURNING *' or 'RETURNING {}'.", + resource.name, src, src + )); + } + } + apply_exists_fields(Some(fields), &resource.name, &mut full_context); + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + } + } else if !resource.get_return_val_mappings("update").is_empty() + && is_created_or_updated + { + catch_error_and_exit(&format!( + "return_vals specified for [{}] update but no RETURNING data received. \ + Ensure the update query includes 'RETURNING *'.", + resource.name + )); } // Run callback:update block if present. @@ -536,26 +671,57 @@ fn run_build( // Post-deploy state check if is_created_or_updated { + // Check if return_vals already captured fields from RETURNING. + // If so, skip the post-create exists re-run to save API calls. + let op = if !resource_exists { "create" } else { "update" }; + let has_return_vals = !resource.get_return_val_mappings(op).is_empty(); + // After create/update, re-run the exists query to capture // this.* fields (e.g. identifier) that are needed by the - // statecheck and exports queries. - if let Some(ref eq) = exists_query { - let eq_opts = resource_queries.get("exists").unwrap(); - let (_exists, fields) = runner.check_if_resource_exists( - resource, - &eq.0, - eq_opts.options.retries.max(3), - eq_opts.options.retry_delay.max(5), - dry_run, - show_queries, - false, - ); - apply_exists_fields(fields, &resource.name, &mut full_context); - // Re-render exports with the newly captured fields - exports_query_str = render_exports!(runner, resource_queries, resource, &full_context); + // statecheck and exports queries — but skip this if + // return_vals already provided them. + if !has_return_vals { + if let Some(ref eq) = exists_query { + let eq_opts = resource_queries.get("exists").unwrap(); + let (post_exists, fields) = runner.check_if_resource_exists( + resource, + &eq.0, + eq_opts.options.retries.max(3), + eq_opts.options.retry_delay.max(5), + dry_run, + show_queries, + false, + ); + apply_exists_fields(fields, &resource.name, &mut full_context); + + // Always try to render exports after post-create exists + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + + // If exists confirms the resource is present and there is + // no statecheck or exports query, the exists query IS + // the statecheck: a successful re-run confirms the + // resource was created/updated successfully. + if post_exists + && !resource_queries.contains_key("statecheck") + && exports_query_str.is_none() + { + is_correct_state = true; + } + } } - if let Some(sq) = render_statecheck!(runner, resource_queries, resource, &full_context) { + debug!( + "post-deploy for [{}]: is_correct_state={}, has_statecheck={}, exports_query_str={}", + resource.name, + is_correct_state, + resource_queries.contains_key("statecheck"), + if exports_query_str.is_some() { "Some" } else { "None" } + ); + + if let Some(sq) = + render_statecheck!(runner, resource_queries, resource, &full_context) + { let sq_opts = resource_queries.get("statecheck").unwrap(); is_correct_state = runner.check_if_resource_is_correct_state( resource, diff --git a/src/commands/teardown.rs b/src/commands/teardown.rs index 1e8ed71..561ae1a 100644 --- a/src/commands/teardown.rs +++ b/src/commands/teardown.rs @@ -16,10 +16,9 @@ use crate::commands::common_args::{ FailureAction, }; use crate::core::config::get_resource_type; -use crate::core::utils::catch_error_and_exit; use crate::utils::connection::create_client; use crate::utils::display::{print_unicode_box, BorderColor}; -use crate::utils::server::check_and_start_server; +use crate::utils::server::{check_and_start_server, stop_local_server}; /// Configures the `teardown` command for the CLI application. pub fn command() -> Command { @@ -80,6 +79,8 @@ pub fn execute(matches: &ArgMatches) { ); println!("teardown complete (dry run: {})", is_dry_run); + + stop_local_server(); } /// Collect exports for all resources before teardown. @@ -95,7 +96,7 @@ fn collect_exports(runner: &mut CommandRunner, show_queries: bool, dry_run: bool let res_type = get_resource_type(resource).to_string(); info!("getting exports for resource [{}]", resource.name); - let full_context = runner.get_full_context(resource); + let mut full_context = runner.get_full_context(resource); if res_type == "command" { continue; @@ -107,10 +108,32 @@ fn collect_exports(runner: &mut CommandRunner, show_queries: bool, dry_run: bool (Some(iq), 1u32, 0u32) } else { let queries = runner.get_queries(resource, &full_context); + // Run exists query first to capture this.* fields needed by + // exports (e.g. this.identifier). + if let Some(eq) = queries.get("exists") { + let rendered = + runner.render_query(&resource.name, "exists", &eq.template, &full_context); + let (_exists, fields) = runner.check_if_resource_exists( + resource, + &rendered, + eq.options.retries, + eq.options.retry_delay, + dry_run, + show_queries, + false, + ); + if let Some(ref f) = fields { + for (k, v) in f { + full_context.insert(format!("{}.{}", resource.name, k), v.clone()); + } + } + } if let Some(eq) = queries.get("exports") { let rendered = runner.render_query(&resource.name, "exports", &eq.template, &full_context); - (Some(rendered), eq.options.retries, eq.options.retry_delay) + // During teardown use minimal retries - the resource may + // already be partially deleted. + (Some(rendered), 1u32, 0u32) } else { (None, 1u32, 0u32) } @@ -201,8 +224,8 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ exists_query_str, exists_retries, exists_retry_delay, - postdelete_retries, - postdelete_retry_delay, + _postdelete_retries, + _postdelete_retry_delay, ) = if let Some(eq) = resource_queries.get("exists") { let rendered = runner.render_query(&resource.name, "exists", &eq.template, &full_context); @@ -334,21 +357,26 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ continue; } - // Confirm deletion - let (resource_deleted, _) = runner.check_if_resource_exists( + // Confirm deletion - single check, don't poll excessively. + // Cloud Control deletes are async; if the resource is still + // visible on the first check that's expected, move on. + let (still_exists, _) = runner.check_if_resource_exists( resource, &exists_query_str, - postdelete_retries, - postdelete_retry_delay, + 1, + 0, dry_run, show_queries, true, // delete_test ); - if resource_deleted { + if !still_exists { info!("successfully deleted {}", resource.name); - } else if !dry_run { - catch_error_and_exit(&format!("failed to delete {}.", resource.name)); + } else { + info!( + "[{}] delete dispatched (resource may still be deleting asynchronously)", + resource.name + ); } } diff --git a/src/commands/test.rs b/src/commands/test.rs index 8b6024c..51a8942 100644 --- a/src/commands/test.rs +++ b/src/commands/test.rs @@ -21,7 +21,7 @@ use crate::core::config::get_resource_type; use crate::core::utils::catch_error_and_exit; use crate::utils::connection::create_client; use crate::utils::display::{print_unicode_box, BorderColor}; -use crate::utils::server::check_and_start_server; +use crate::utils::server::{check_and_start_server, stop_local_server}; /// Configures the `test` command for the CLI application. pub fn command() -> Command { @@ -90,6 +90,8 @@ pub fn execute(matches: &ArgMatches) { ); println!("tests complete (dry run: {})", is_dry_run); + + stop_local_server(); } /// Main test workflow matching Python's StackQLTestRunner.run(). @@ -129,7 +131,7 @@ fn run_test( catch_error_and_exit(&format!("unknown resource type: {}", res_type)); } - let full_context = runner.get_full_context(resource); + let mut full_context = runner.get_full_context(resource); // Get test queries (templates only, not yet rendered) let (test_queries, inline_query) = @@ -140,7 +142,39 @@ fn run_test( (runner.get_queries(resource, &full_context), None) }; - // Render statecheck JIT if present + // Run the exists query first if present to capture this.* fields + // (e.g. identifier) before rendering statecheck/exports. + let mut exists_is_statecheck = false; + if let Some(eq) = test_queries.get("exists") { + let rendered = + runner.render_query(&resource.name, "exists", &eq.template, &full_context); + let (exists, fields) = runner.check_if_resource_exists( + resource, + &rendered, + eq.options.retries, + eq.options.retry_delay, + dry_run, + show_queries, + false, + ); + let has_fields = fields.is_some(); + if let Some(ref f) = fields { + for (k, v) in f { + full_context.insert(format!("{}.{}", resource.name, k), v.clone()); + } + } + // If exists exports a variable and there is no statecheck or + // exports query, the exists query IS the statecheck. + if exists + && has_fields + && !test_queries.contains_key("statecheck") + && !test_queries.contains_key("exports") + { + exists_is_statecheck = true; + } + } + + // Render statecheck JIT (after exists fields are available) let statecheck_rendered = test_queries.get("statecheck").map(|q| { let rendered = runner.render_query(&resource.name, "statecheck", &q.template, &full_context); @@ -153,7 +187,7 @@ fn run_test( .get("statecheck") .map_or(0, |q| q.options.retry_delay); - // Render exports JIT if present + // Render exports JIT (after exists fields are available) let mut exports_query_str = test_queries .get("exports") .map(|q| runner.render_query(&resource.name, "exports", &q.template, &full_context)); @@ -205,6 +239,14 @@ fn run_test( ); is_correct_state = state; exports_result_from_proxy = proxy; + } else if exists_is_statecheck { + // Exists query exported a variable and there is no statecheck + // or exports; the successful exists check confirms the state. + info!( + "exists query with captured fields confirms state for [{}]", + resource.name + ); + is_correct_state = true; } else { catch_error_and_exit( "iql file must include either 'statecheck' or 'exports' anchor for validation.", diff --git a/src/core/config.rs b/src/core/config.rs index 895d777..b59e77f 100644 --- a/src/core/config.rs +++ b/src/core/config.rs @@ -13,6 +13,8 @@ use log::{debug, error}; use serde_json::Value as JsonValue; use serde_yaml::Value as YamlValue; +use crate::core::utils::catch_error_and_exit; + use crate::resource::manifest::{Manifest, Property}; use crate::template::engine::TemplateEngine; @@ -81,26 +83,44 @@ pub fn render_value( _ => format!("{:?}", k), }; let rendered = render_value(engine, v, context); - // Try to parse as JSON value, otherwise use as string - match serde_json::from_str::(&rendered) { - Ok(json_val) => { - rendered_map.insert(key, json_val); - } - Err(_) => { - rendered_map.insert(key, JsonValue::String(rendered)); + // Preserve the original YAML type: if the source value was a + // YAML string, keep it as a JSON string even if its content + // looks like a number (e.g. "-1"). Only attempt JSON + // re-parsing for values that were originally complex types + // (mappings, sequences) or template expressions. + let json_val = if matches!(v, YamlValue::String(_)) + && !rendered.starts_with('{') + && !rendered.starts_with('[') + { + JsonValue::String(rendered) + } else { + match serde_json::from_str::(&rendered) { + Ok(jv) => jv, + Err(_) => JsonValue::String(rendered), } - } + }; + rendered_map.insert(key, json_val); } serde_json::to_string(&JsonValue::Object(rendered_map)).unwrap_or_default() } YamlValue::Sequence(seq) => { let mut rendered_items = Vec::new(); - for item in seq { + for (idx, item) in seq.iter().enumerate() { let rendered = render_value(engine, item, context); - match serde_json::from_str::(&rendered) { - Ok(json_val) => rendered_items.push(json_val), - Err(_) => rendered_items.push(JsonValue::String(rendered)), - } + // Same type-preservation logic for sequence items. + let _ = idx; + let json_val = if matches!(item, YamlValue::String(_)) + && !rendered.starts_with('{') + && !rendered.starts_with('[') + { + JsonValue::String(rendered) + } else { + match serde_json::from_str::(&rendered) { + Ok(jv) => jv, + Err(_) => JsonValue::String(rendered), + } + }; + rendered_items.push(json_val); } serde_json::to_string(&rendered_items).unwrap_or_default() } @@ -399,13 +419,10 @@ pub fn get_resource_type(resource: &crate::resource::manifest::Resource) -> &str let res_type = resource.r#type.as_str(); match res_type { "resource" | "query" | "script" | "multi" | "command" => res_type, - _ => { - error!( - "Resource type must be 'resource', 'script', 'multi', 'query', or 'command', got '{}'", - res_type - ); - process::exit(1); - } + _ => catch_error_and_exit(&format!( + "Resource type must be 'resource', 'script', 'multi', 'query', or 'command', got '{}'", + res_type + )), } } @@ -437,6 +454,7 @@ mod tests { r#if: None, skip_validation: None, auth: None, + return_vals: None, } } diff --git a/src/core/templating.rs b/src/core/templating.rs index 39ac506..74cb24b 100644 --- a/src/core/templating.rs +++ b/src/core/templating.rs @@ -353,6 +353,39 @@ pub fn render_query( } } +/// Try to render a query template, returning None if variables are missing. +/// Used for deferred rendering where this.* fields may not yet be available. +pub fn try_render_query( + engine: &TemplateEngine, + res_name: &str, + anchor: &str, + template: &str, + context: &HashMap, +) -> Option { + let temp_context = prepare_query_context(context); + + let expanded = match preprocess_this_prefix(template, res_name) { + Ok(t) => t, + Err(_) => return None, + }; + + let mut ctx = temp_context; + let compat_query = preprocess_jinja2_compat(&expanded); + let processed_query = preprocess_inline_dicts(&compat_query, &mut ctx); + + let template_name = format!("{}__{}", res_name, anchor); + match engine.render_with_filters(&template_name, &processed_query, &ctx) { + Ok(rendered) => { + debug!( + "[{}] [{}] rendered query:\n\n{}\n", + res_name, anchor, rendered + ); + Some(rendered) + } + Err(_) => None, + } +} + /// Get queries for a resource: load from file, parse anchors. /// Templates are NOT rendered here — rendering is deferred to when /// each query is actually needed (JIT rendering). diff --git a/src/core/utils.rs b/src/core/utils.rs index 8ed9620..227bf50 100644 --- a/src/core/utils.rs +++ b/src/core/utils.rs @@ -19,6 +19,8 @@ use crate::utils::query::{execute_query, QueryResult}; /// Exit with error message. Matches Python's `catch_error_and_exit`. pub fn catch_error_and_exit(msg: &str) -> ! { error!("{}", msg); + // Stop the local server before exiting to avoid stale sessions + crate::utils::server::stop_local_server(); eprintln!("stackql-deploy operation failed"); process::exit(1); } @@ -64,7 +66,7 @@ pub fn run_stackql_query( } if rows.is_empty() { - debug!("Stackql query executed successfully, retrieved 0 items."); + debug!("Stackql query executed successfully, retrieved 0 items.\n\nresults:\n\n[]\n"); if attempt < retries { thread::sleep(Duration::from_secs(delay as u64)); attempt += 1; @@ -113,7 +115,12 @@ pub fn run_stackql_query( // Check for count query if let Some(count_str) = result_maps[0].get("count") { - debug!("Stackql query executed successfully, count: {}", count_str); + if let Ok(json) = serde_json::to_string_pretty(&result_maps) { + debug!( + "Stackql query executed successfully, count: {}\n\nresults:\n\n{}\n", + count_str, json + ); + } if let Ok(count) = count_str.parse::() { if count > 1 { catch_error_and_exit(&format!( @@ -126,10 +133,12 @@ pub fn run_stackql_query( } } - debug!( - "Stackql query executed successfully, retrieved {} items.", - result_maps.len() - ); + if let Ok(json) = serde_json::to_string_pretty(&result_maps) { + debug!( + "Stackql query executed successfully, retrieved {} items.\n\nresults:\n\n{}\n", + result_maps.len(), json + ); + } return result_maps; } QueryResult::Command(msg) => { @@ -364,9 +373,32 @@ pub fn run_test_with_fields( } // If no count field, for non-delete test consider any result as exists - // and capture all returned fields + // and capture all returned fields. + // However, if multiple rows are returned this is a fatal error — the + // exists (identifier) query must return exactly 0 or 1 rows. + if !delete_test && result.len() > 1 { + catch_error_and_exit(&format!( + "Exists query for [{}] returned {} rows (expected 0 or 1). \ + This indicates an ambiguous resource identifier — fix the \ + exists query or tag configuration so it returns a single row.", + resource_name, + result.len() + )); + } + + // However, if all non-trivial field values are "null" or empty, treat + // as "does not exist" (e.g. a CASE WHEN that returned NULL). if !delete_test && !result.is_empty() { - let fields = Some(result[0].clone()); + let row = &result[0]; + let all_null = row.values().all(|v| v == "null" || v.is_empty()); + if all_null { + debug!( + "Test result false for [{}]: all field values are null/empty", + resource_name + ); + return (false, None); + } + let fields = Some(row.clone()); return (true, fields); } diff --git a/src/resource/manifest.rs b/src/resource/manifest.rs index 438cd8b..7c44829 100644 --- a/src/resource/manifest.rs +++ b/src/resource/manifest.rs @@ -140,6 +140,47 @@ pub struct Resource { /// Auth configuration for the resource #[serde(default)] pub auth: Option, + + /// Return value mappings from mutation operations (create, update, delete). + /// Each operation maps to a list of field specs: + /// - `Identifier: identifier` (rename: capture `Identifier` as `this.identifier`) + /// - `ErrorCode` (direct: capture as `this.ErrorCode`) + #[serde(default)] + pub return_vals: Option>>, +} + +impl Resource { + /// Parse `return_vals` for a given operation (create, update, delete). + /// Returns a list of (source_field, target_field) pairs. + /// - `Identifier: identifier` -> ("Identifier", "identifier") + /// - `ErrorCode` (string) -> ("ErrorCode", "ErrorCode") + pub fn get_return_val_mappings(&self, operation: &str) -> Vec<(String, String)> { + let Some(ref rv) = self.return_vals else { + return vec![]; + }; + let Some(specs) = rv.get(operation) else { + return vec![]; + }; + let mut mappings = Vec::new(); + for spec in specs { + match spec { + serde_yaml::Value::String(s) => { + // Direct capture: field name used as-is + mappings.push((s.clone(), s.clone())); + } + serde_yaml::Value::Mapping(m) => { + // Rename: { SourceField: target_name } + for (k, v) in m { + if let (Some(src), Some(tgt)) = (k.as_str(), v.as_str()) { + mappings.push((src.to_string(), tgt.to_string())); + } + } + } + _ => {} + } + } + mappings + } } /// Default resource type value diff --git a/src/resource/validation.rs b/src/resource/validation.rs index 4f456fe..bc8b3d1 100644 --- a/src/resource/validation.rs +++ b/src/resource/validation.rs @@ -106,6 +106,7 @@ mod tests { r#if: None, skip_validation: None, auth: None, + return_vals: None, }) .collect(), exports: vec![], diff --git a/src/template/engine.rs b/src/template/engine.rs index 7c30aa3..3f66ff2 100644 --- a/src/template/engine.rs +++ b/src/template/engine.rs @@ -228,6 +228,7 @@ fn register_custom_filters(tera: &mut Tera) { tera.register_filter("generate_patch_document", filter_generate_patch_document); tera.register_filter("sql_list", filter_sql_list); tera.register_filter("sql_escape", filter_sql_escape); + tera.register_filter("to_aws_tag_filters", filter_to_aws_tag_filters); } /// from_json filter: parse a JSON string into a Tera value @@ -406,6 +407,32 @@ fn filter_sql_escape( Ok(tera::to_value(escaped)?) } +/// to_aws_tag_filters filter: converts a JSON array of AWS tags +/// from `[{"Key":"k","Value":"v"},...]` format to the AWS Resource Groups +/// Tagging API TagFilters format `[{"Key":"k","Values":["v"]},...]`. +/// Input is a string (the rendered global_tags JSON). +fn filter_to_aws_tag_filters( + value: &tera::Value, + _args: &HashMap, +) -> tera::Result { + let s = value + .as_str() + .ok_or_else(|| tera::Error::msg("to_aws_tag_filters: expected a string"))?; + let tags: Vec = serde_json::from_str(s) + .map_err(|e| tera::Error::msg(format!("to_aws_tag_filters: invalid JSON: {}", e)))?; + let filters: Vec = tags + .into_iter() + .filter_map(|tag| { + let key = tag.get("Key")?.as_str()?.to_string(); + let value = tag.get("Value")?.as_str()?.to_string(); + Some(serde_json::json!({"Key": key, "Values": [value]})) + }) + .collect(); + let result = serde_json::to_string(&filters) + .map_err(|e| tera::Error::msg(format!("to_aws_tag_filters: serialization error: {}", e)))?; + Ok(tera::to_value(result)?) +} + /// Unit tests for template engine functionality. #[cfg(test)] mod tests { diff --git a/src/utils/server.rs b/src/utils/server.rs index e2a31e0..fbf613a 100644 --- a/src/utils/server.rs +++ b/src/utils/server.rs @@ -387,33 +387,48 @@ pub fn check_and_start_server() { "Host '{}' is local; checking if server is running on port {}...", host, port ); + // Always stop any existing server to ensure a clean session + // with the current environment (auth creds, provider versions, etc.) if is_server_running(port) { - info!("Local server is already running on port {}.", port); - } else { - debug!( - "Server not detected on port {}; will attempt to start it.", + info!( + "Stopping existing server on port {} for clean session.", port ); - info!("Server not running. Starting server..."); - - let options = StartServerOptions { - host: host.to_string(), - port, - ..Default::default() - }; + if let Err(e) = stop_server(port) { + warn!("Failed to stop existing server: {}", e); + } + // Brief pause to allow the port to be released + thread::sleep(Duration::from_secs(1)); + } - debug!( - "StartServerOptions: host={}, port={}", - options.host, options.port - ); + info!("Starting server..."); + let options = StartServerOptions { + host: host.to_string(), + port, + ..Default::default() + }; - if let Err(e) = start_server(&options) { - error!("Failed to start server: {}", e); - process::exit(1); - } + if let Err(e) = start_server(&options) { + error!("Failed to start server: {}", e); + process::exit(1); } } else { debug!("Host '{}' is remote; skipping local server start.", host); info!("Using remote server {}:{}", host, port); } } + +/// Stops the local server after an operation completes. +/// Called at the end of build, test, and teardown to ensure +/// the server doesn't linger with stale auth context. +pub fn stop_local_server() { + let host = server_host(); + let port = server_port(); + + if LOCAL_SERVER_ADDRESSES.contains(&host) && is_server_running(port) { + debug!("Stopping local server after operation."); + if let Err(e) = stop_server(port) { + warn!("Failed to stop server after operation: {}", e); + } + } +} diff --git a/template-hub/aws/starter/resources/example_vpc.iql.template b/template-hub/aws/starter/resources/example_vpc.iql.template index ef223c2..5a28f6e 100644 --- a/template-hub/aws/starter/resources/example_vpc.iql.template +++ b/template-hub/aws/starter/resources/example_vpc.iql.template @@ -1,44 +1,56 @@ -/*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{% raw %}{{ region }}{% endraw %}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{% raw %}{{ stack_name }}{% endraw %}"]},{"Key":"stackql:stack-env","Values":["{% raw %}{{ stack_env }}{% endraw %}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' -AND ResourceTypeFilters = '["ec2:vpc"]'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, cidr_block -FROM awscc.ec2.vpcs -WHERE Identifier = '{% raw %}{{ this.identifier }}{% endraw %}' -AND region = '{% raw %}{{ region }}{% endraw %}' -) t -WHERE cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}'; - -/*+ create */ -INSERT INTO awscc.ec2.vpcs ( - CidrBlock, - Tags, - EnableDnsSupport, - EnableDnsHostnames, - region -) -SELECT - '{% raw %}{{ vpc_cidr_block }}{% endraw %}', - '{% raw %}{{ vpc_tags }}{% endraw %}', - true, - true, - '{% raw %}{{ region }}{% endraw %}'; - -/*+ exports, retries=5, retry_delay=5 */ -SELECT split_part(ResourceARN, '/', 2) as vpc_id, -'{% raw %}{{ vpc_cidr_block }}{% endraw %}' as vpc_cidr_block -FROM awscc.tagging.tagged_resources -WHERE region = '{% raw %}{{ region }}{% endraw %}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{% raw %}{{ stack_name }}{% endraw %}"]},{"Key":"stackql:stack-env","Values":["{% raw %}{{ stack_env }}{% endraw %}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' -AND ResourceTypeFilters = '["ec2:vpc"]'; - -/*+ delete */ -DELETE FROM awscc.ec2.vpcs -WHERE data__Identifier = '{% raw %}{{ vpc_id }}{% endraw %}' -AND region = '{% raw %}{{ region }}{% endraw %}'; +/*+ exists */ +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as vpc_id + FROM awscc.tagging.tagged_resources + WHERE region = '{% raw %}{{ region }}{% endraw %}' + AND TagFilters = '{% raw %}{{ global_tags | to_aws_tag_filters }}{% endraw %}' + AND ResourceTypeFilters = '["ec2:vpc"]' +), +vpcs AS +( + SELECT vpc_id + FROM awscc.ec2.vpcs_list_only + WHERE region = '{% raw %}{{ region }}{% endraw %}' +) +SELECT r.vpc_id +FROM vpcs r +INNER JOIN tagged_resources tr +ON r.vpc_id = tr.vpc_id; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +AWS_POLICY_EQUAL(tags, '{% raw %}{{ vpc_tags }}{% endraw %}') as test_tags +FROM awscc.ec2.vpcs +WHERE Identifier = '{% raw %}{{ this.vpc_id }}{% endraw %}' +AND region = '{% raw %}{{ region }}{% endraw %}' +AND cidr_block = '{% raw %}{{ vpc_cidr_block }}{% endraw %}' +) t +WHERE test_tags = 1; + +/*+ create */ +INSERT INTO awscc.ec2.vpcs ( + CidrBlock, + Tags, + EnableDnsSupport, + EnableDnsHostnames, + region +) +SELECT + '{% raw %}{{ vpc_cidr_block }}{% endraw %}', + '{% raw %}{{ vpc_tags }}{% endraw %}', + true, + true, + '{% raw %}{{ region }}{% endraw %}' +RETURNING *; + +/*+ exports */ +SELECT '{% raw %}{{ this.vpc_id }}{% endraw %}' as vpc_id, +'{% raw %}{{ vpc_cidr_block }}{% endraw %}' as vpc_cidr_block; + +/*+ delete */ +DELETE FROM awscc.ec2.vpcs +WHERE Identifier = '{% raw %}{{ vpc_id }}{% endraw %}' +AND region = '{% raw %}{{ region }}{% endraw %}'; diff --git a/template-hub/aws/starter/stackql_manifest.yml.template b/template-hub/aws/starter/stackql_manifest.yml.template index d2a8810..ad7fd9f 100644 --- a/template-hub/aws/starter/stackql_manifest.yml.template +++ b/template-hub/aws/starter/stackql_manifest.yml.template @@ -1,40 +1,40 @@ -# -# aws starter project manifest file, add and update values as needed -# -version: 1 -name: "{{ stack_name }}" -description: description for "{{ stack_name }}" -providers: - - awscc -globals: - - name: region - description: aws region - value: "{% raw %}{{ AWS_REGION }}{% endraw %}" - - name: global_tags - value: - - Key: 'stackql:stack-name' - Value: "{% raw %}{{ stack_name }}{% endraw %}" - - Key: 'stackql:stack-env' - Value: "{% raw %}{{ stack_env }}{% endraw %}" - - Key: 'stackql:resource-name' - Value: "{% raw %}{{ resource_name }}{% endraw %}" -resources: - - name: example_vpc - description: example vpc resource - props: - - name: vpc_cidr_block - values: - prd: - value: "10.0.0.0/16" - sit: - value: "10.1.0.0/16" - dev: - value: "10.2.0.0/16" - - name: vpc_tags - value: - - Key: Name - Value: "{% raw %}{{ stack_name }}-{{ stack_env }}-vpc{% endraw %}" - merge: ['global_tags'] - exports: - - vpc_id - - vpc_cidr_block +# +# aws starter project manifest file, add and update values as needed +# +version: 1 +name: "{{ stack_name }}" +description: description for "{{ stack_name }}" +providers: + - awscc +globals: + - name: region + description: aws region + value: "{% raw %}{{ AWS_REGION }}{% endraw %}" + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{% raw %}{{ stack_name }}{% endraw %}" + - Key: 'stackql:stack-env' + Value: "{% raw %}{{ stack_env }}{% endraw %}" + - Key: 'stackql:resource-name' + Value: "{% raw %}{{ resource_name }}{% endraw %}" +resources: + - name: example_vpc + description: example vpc resource + props: + - name: vpc_cidr_block + values: + prd: + value: "10.0.0.0/16" + sit: + value: "10.1.0.0/16" + dev: + value: "10.2.0.0/16" + - name: vpc_tags + value: + - Key: Name + Value: "{% raw %}{{ stack_name }}-{{ stack_env }}-vpc{% endraw %}" + merge: ['global_tags'] + exports: + - vpc_id + - vpc_cidr_block diff --git a/template-hub/azure/starter/README.md.template b/template-hub/azure/starter/README.md.template index da749d3..070996a 100644 --- a/template-hub/azure/starter/README.md.template +++ b/template-hub/azure/starter/README.md.template @@ -1,63 +1,63 @@ -# `stackql-deploy` starter project for `azure` - -> for starter projects using other providers, try `stackql-deploy {{ stack_name }} --provider=aws` or `stackql-deploy {{ stack_name }} --provider=google` - -see the following links for more information on `stackql`, `stackql-deploy` and the `azure` provider: - -- [`azure` provider docs](https://stackql.io/registry/azure) -- [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) -- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) - -## Overview - -__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `azure` and `aws` for example. - -## Prerequisites - -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `azure` provider, `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET` must be set (or their must be an authenticated session on the host using `az login`), for more information on authentication to `azure` see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). - -## Usage - -Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. - -The syntax for the `stackql-deploy` command is as follows: - -```bash -stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] -``` - -### Deploying a stack - -For example, to deploy the stack named {{ stack_name }} to an environment labeled `sit`, run the following: - -```bash -stackql-deploy build {{ stack_name }} sit \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 -``` - -Use the `--dry-run` flag to view the queries to be run without actually running them, for example: - -```bash -stackql-deploy build {{ stack_name }} sit \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 \ ---dry-run -``` - -### Testing a stack - -To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): - -```bash -stackql-deploy test {{ stack_name }} sit \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 -``` - -### Tearing down a stack - -To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: - -```bash -stackql-deploy teardown {{ stack_name }} sit \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +# `stackql-deploy` starter project for `azure` + +> for starter projects using other providers, try `stackql-deploy {{ stack_name }} --provider=aws` or `stackql-deploy {{ stack_name }} --provider=google` + +see the following links for more information on `stackql`, `stackql-deploy` and the `azure` provider: + +- [`azure` provider docs](https://stackql.io/registry/azure) +- [`stackql`](https://github.com/stackql/stackql) +- [`stackql-deploy` on crates.io](https://crates.io/crates/stackql-deploy) +- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) + +## Overview + +__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `azure` and `aws` for example. + +## Prerequisites + +This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `azure` provider, `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET` must be set (or their must be an authenticated session on the host using `az login`), for more information on authentication to `azure` see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). + +## Usage + +Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. + +The syntax for the `stackql-deploy` command is as follows: + +```bash +stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] +``` + +### Deploying a stack + +For example, to deploy the stack named {{ stack_name }} to an environment labeled `sit`, run the following: + +```bash +stackql-deploy build {{ stack_name }} sit \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +``` + +Use the `--dry-run` flag to view the queries to be run without actually running them, for example: + +```bash +stackql-deploy build {{ stack_name }} sit \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 \ +--dry-run +``` + +### Testing a stack + +To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): + +```bash +stackql-deploy test {{ stack_name }} sit \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +``` + +### Tearing down a stack + +To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: + +```bash +stackql-deploy teardown {{ stack_name }} sit \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 ``` \ No newline at end of file diff --git a/template-hub/google/starter/README.md.template b/template-hub/google/starter/README.md.template index 1b3c2cb..162d1f1 100644 --- a/template-hub/google/starter/README.md.template +++ b/template-hub/google/starter/README.md.template @@ -1,63 +1,63 @@ -# `stackql-deploy` starter project for `google` - -> for starter projects using other providers, try `stackql-deploy {{ stack_name }} --provider=aws` or `stackql-deploy {{ stack_name }} --provider=azure` - -see the following links for more information on `stackql`, `stackql-deploy` and the `google` provider: - -- [`google` provider docs](https://stackql.io/registry/google) -- [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) -- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) - -## Overview - -__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `azure` and `aws` for example. - -## Prerequisites - -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `google` provider, `GOOGLE_CREDENTIALS` needs to be set at runtime (from the local machine using export GOOGLE_CREDENTIALS=cat creds/my-sa-key.json for example or as a CI variable/secret). - -## Usage - -Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. - -The syntax for the `stackql-deploy` command is as follows: - -```bash -stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] -``` - -### Deploying a stack - -For example, to deploy the stack named {{ stack_name }} to an environment labeled `sit`, run the following: - -```bash -stackql-deploy build {{ stack_name }} sit \ --e MY_PROJECT_NAME={{ stack_name }} -``` - -Use the `--dry-run` flag to view the queries to be run without actually running them, for example: - -```bash -stackql-deploy build {{ stack_name }} sit \ --e MY_PROJECT_NAME={{ stack_name }} \ ---dry-run -``` - -### Testing a stack - -To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): - -```bash -stackql-deploy test {{ stack_name }} sit \ --e MY_PROJECT_NAME={{ stack_name }} -``` - -### Tearing down a stack - -To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: - -```bash -stackql-deploy teardown {{ stack_name }} sit \ --e MY_PROJECT_NAME={{ stack_name }} +# `stackql-deploy` starter project for `google` + +> for starter projects using other providers, try `stackql-deploy {{ stack_name }} --provider=aws` or `stackql-deploy {{ stack_name }} --provider=azure` + +see the following links for more information on `stackql`, `stackql-deploy` and the `google` provider: + +- [`google` provider docs](https://stackql.io/registry/google) +- [`stackql`](https://github.com/stackql/stackql) +- [`stackql-deploy` on crates.io](https://crates.io/crates/stackql-deploy) +- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) + +## Overview + +__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `azure` and `aws` for example. + +## Prerequisites + +This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `google` provider, `GOOGLE_CREDENTIALS` needs to be set at runtime (from the local machine using export GOOGLE_CREDENTIALS=cat creds/my-sa-key.json for example or as a CI variable/secret). + +## Usage + +Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` folder. + +The syntax for the `stackql-deploy` command is as follows: + +```bash +stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] +``` + +### Deploying a stack + +For example, to deploy the stack named {{ stack_name }} to an environment labeled `sit`, run the following: + +```bash +stackql-deploy build {{ stack_name }} sit \ +-e MY_PROJECT_NAME={{ stack_name }} +``` + +Use the `--dry-run` flag to view the queries to be run without actually running them, for example: + +```bash +stackql-deploy build {{ stack_name }} sit \ +-e MY_PROJECT_NAME={{ stack_name }} \ +--dry-run +``` + +### Testing a stack + +To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): + +```bash +stackql-deploy test {{ stack_name }} sit \ +-e MY_PROJECT_NAME={{ stack_name }} +``` + +### Tearing down a stack + +To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: + +```bash +stackql-deploy teardown {{ stack_name }} sit \ +-e MY_PROJECT_NAME={{ stack_name }} ``` \ No newline at end of file diff --git a/website/docs/getting-started.md b/website/docs/getting-started.md index 9dd0824..c6af74d 100644 --- a/website/docs/getting-started.md +++ b/website/docs/getting-started.md @@ -1,215 +1,262 @@ ---- -id: getting-started -title: Getting Started -hide_title: false -hide_table_of_contents: false -description: A quick overview of how to get started with StackQL Deploy, including basic concepts and the essential components of a deployment. -tags: [] -draft: false -unlisted: false ---- - -import File from '/src/components/File'; - -`stackql-deploy` is a model driven, declarative framework for provisioning, de-provisioning and testing cloud resources. Heard enough and ready to get started? Jump to a [__Quick Start__](#quick-start). - -## Installing `stackql-deploy` - -`stackql-deploy` is distributed as a standalone binary with no runtime dependencies required. - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -**Using Homebrew (recommended):** - -```bash -brew tap stackql/tap -brew install stackql-deploy -``` - -**Or download the installer package:** - -Download the latest `.pkg` installer from the [GitHub Releases](https://github.com/stackql-labs/stackql-deploy-rs/releases) page. - - - - -**Download the binary:** - -```bash -curl -L https://github.com/stackql-labs/stackql-deploy-rs/releases/latest/download/stackql-deploy-linux-x86_64.tar.gz | tar xz -sudo mv stackql-deploy /usr/local/bin/ -``` - - - - -**Using Chocolatey (recommended):** - -```powershell -choco install stackql-deploy -``` - -**Or download the MSI installer:** - -Download the latest `.msi` installer from the [GitHub Releases](https://github.com/stackql-labs/stackql-deploy-rs/releases) page. - - - - -For more installation options, see the [__GitHub Releases__](https://github.com/stackql-labs/stackql-deploy-rs/releases) page. - -## How `stackql-deploy` works - -The core components of `stackql-deploy` are the __stack directory__, the `stackql_manifest.yml` file and resource query (`.iql`) files. These files define your infrastructure and guide the deployment process. - -`stackql-deploy` uses the `stackql_manifest.yml` file in the `stack-dir`, to render query templates (`.iql` files) in the `resources` sub directory of the `stack-dir`, targeting an environment (`stack-env`). `stackql` is used to execute the queries to deploy, test, update or delete resources as directed. This is summarized in the diagram below: - -```mermaid -flowchart LR - subgraph stack-dir - direction LR - B(Manifest File) --> C(Resource Files) - end - - A(stackql-deploy) -->|uses...|stack-dir - stack-dir -->|deploys to...|D(☁️ Your Environment) -``` - -### `stackql_manifest.yml` File - -The `stackql_manifest.yml` file is the basis of your stack configuration. It contains the definitions of the resources you want to manage, the providers you're using (such as AWS, Google Cloud, or Azure), and the environment-specific settings that will guide the deployment. - -This manifest file acts as a blueprint for your infrastructure, describing the resources and how they should be configured. An example `stackql_manifest.yml` file is shown here: - - - -```yaml -version: 1 -name: "my-azure-stack" -description: description for "my-azure-stack" -providers: - - azure -globals: - - name: subscription_id - description: azure subscription id - value: "{{ AZURE_SUBSCRIPTION_ID }}" - - name: location - description: default location for resources - value: eastus - - name: global_tags - value: - provisioner: stackql - stackName: "{{ stack_name }}" - stackEnv: "{{ stack_env }}" -resources: - - name: example_res_grp - props: - - name: resource_group_name - value: "{{ stack_name }}-{{ stack_env }}-rg" - exports: - - resource_group_name -``` - - - -The `stackql_manifest.yml` file is detailed [__here__](/manifest-file). - -### Resource Query Files - -Each resource or query defined in the `resources` section of the `stackql_manifest.yml` has an associated StackQL query file (using the `.iql` extension by convention). The query file defines queries to deploy and test a cloud resource. These queries are demarcated by query anchors (or hints). Available query anchors include: - -- `exists` : tests for the existence or non-existence of a resource -- `create` : creates the resource in the desired state using a StackQL `INSERT` statement -- `update` : updates the resource to the desired state using a StackQL `UPDATE` statement -- `createorupdate`: for idempotent resources, uses a StackQL `INSERT` statement -- `statecheck`: tests the state of a resource after a DML operation, typically to determine if the resource is in the desired state -- `exports` : variables to export from the resource to be used in subsequent queries -- `delete` : deletes a resource using a StackQL `DELETE` statement - -An example resource query file is shown here: - - - -```sql -/*+ exists */ -SELECT COUNT(*) as count FROM azure.resources.resource_groups -WHERE subscriptionId = '{{ subscription_id }}' -AND resourceGroupName = '{{ resource_group_name }}' - -/*+ create */ -INSERT INTO azure.resources.resource_groups( - resourceGroupName, - subscriptionId, - location -) -SELECT - '{{ resource_group_name }}', - '{{ subscription_id }}', - '{{ location }}' - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM azure.resources.resource_groups -WHERE subscriptionId = '{{ subscription_id }}' -AND resourceGroupName = '{{ resource_group_name }}' -AND location = '{{ location }}' -AND JSON_EXTRACT(properties, '$.provisioningState') = 'Succeeded' - -/*+ exports */ -SELECT '{{ resource_group_name }}' as resource_group_name - -/*+ delete */ -DELETE FROM azure.resources.resource_groups -WHERE resourceGroupName = '{{ resource_group_name }}' AND subscriptionId = '{{ subscription_id }}' -``` - - - -Resource queries are detailed [__here__](/resource-query-files). - -### `stackql-deploy` commands - -Basic `stackql-deploy` commands include: - -- `build` : provisions a stack to the desired state in a specified environment (including `create` and `update` operations if necessary) -- `test` : tests a stack to confirm all resources exist and are in their desired state -- `teardown` : de-provisions a stack - -here are some examples: - -```bash title="deploy my-azure-stack to the prd environment" -stackql-deploy build my-azure-stack prd \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 -``` - -```bash title="test my-azure-stack in the sit environment" -stackql-deploy test my-azure-stack sit \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 -``` - -```bash title="teardown my-azure-stack in the dev environment" -stackql-deploy teardown my-azure-stack dev \ --e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 -``` - -For more detailed information see [`cli-reference/build`](/cli-reference/build), [`cli-reference/test`](/cli-reference/test), [`cli-reference/teardown`](/cli-reference/teardown), or other commands available. - - -### `stackql-deploy` deployment flow - -`stackql-deploy` processes the resources defined in the `stackql_manifest.yml` in top down order (`teardown` operations are processed in reverse order). - - - -## Quick Start - -To get up and running quickly, `stackql-deploy` provides a set of quick start templates for common cloud providers. These templates include predefined configurations and resource queries tailored to AWS, Azure, and Google Cloud, among others. - -- [**AWS Quick Start Template**](/template-library/aws/vpc-and-ec2-instance): A basic setup for deploying a VPC, including subnets and routing configurations. -- [**Azure Quick Start Template**](/template-library/azure/simple-vnet-and-vm): A setup for creating a Resource Group with associated resources. -- [**Google Cloud Quick Start Template**](/template-library/google/k8s-the-hard-way): A configuration for deploying a VPC with network and firewall rules. - -These templates are designed to help you kickstart your infrastructure deployment with minimal effort, providing a solid foundation that you can customize to meet your specific needs. \ No newline at end of file +--- +id: getting-started +title: Getting Started +hide_title: false +hide_table_of_contents: false +description: A quick overview of how to get started with StackQL Deploy, including basic concepts and the essential components of a deployment. +tags: [] +draft: false +unlisted: false +--- + +import File from '/src/components/File'; + +`stackql-deploy` is a model driven, declarative framework for provisioning, de-provisioning and testing cloud resources. Heard enough and ready to get started? Jump to a [__Quick Start__](#quick-start). + +## Installing `stackql-deploy` + +`stackql-deploy` is distributed as a standalone binary with no runtime dependencies required. + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +**Apple Silicon (ARM64):** + +```bash +curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-macos-arm64.tar.gz | tar xz +sudo mv stackql-deploy /usr/local/bin/ +``` + +**Intel (x86_64):** + +```bash +curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-macos-x86_64.tar.gz | tar xz +sudo mv stackql-deploy /usr/local/bin/ +``` + + + + +**x86_64:** + +```bash +curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-linux-x86_64.tar.gz | tar xz +sudo mv stackql-deploy /usr/local/bin/ +``` + +**ARM64:** + +```bash +curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-linux-arm64.tar.gz | tar xz +sudo mv stackql-deploy /usr/local/bin/ +``` + + + + +**PowerShell:** + +```powershell +Invoke-WebRequest -Uri "https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-windows-x86_64.zip" -OutFile stackql-deploy.zip +Expand-Archive stackql-deploy.zip -DestinationPath . +Move-Item stackql-deploy.exe "$env:LOCALAPPDATA\Microsoft\WindowsApps\" +Remove-Item stackql-deploy.zip +``` + +**WSL / Git Bash:** + +```bash +curl -L -o stackql-deploy.zip https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-windows-x86_64.zip +unzip stackql-deploy.zip +``` + + + + +If you have Rust installed (via [rustup](https://rustup.rs/)): + +```bash +cargo install stackql-deploy +``` + +This builds from source and installs to `~/.cargo/bin/`. + + + + +Use the [`stackql/setup-deploy`](https://github.com/marketplace/actions/stackql-deploy) action to install and run `stackql-deploy` in your CI/CD pipelines: + +```yaml +steps: + - uses: actions/checkout@v4 + + - name: Deploy Stack + uses: stackql/setup-deploy@v1.0.1 + with: + command: 'build' + stack_dir: 'examples/aws/aws-vpc-webserver' + stack_env: 'dev' + env_vars: 'AWS_REGION=us-east-1' +``` + +The action automatically downloads the latest binary for the runner's platform. See [__Deploying with GitHub Actions__](/github-actions) for the full reference. + + + + +All platform binaries are available on the [__GitHub Releases__](https://github.com/stackql/stackql-deploy-rs/releases) page. + +## How `stackql-deploy` works + +The core components of `stackql-deploy` are the __stack directory__, the `stackql_manifest.yml` file and resource query (`.iql`) files. These files define your infrastructure and guide the deployment process. + +`stackql-deploy` uses the `stackql_manifest.yml` file in the `stack-dir`, to render query templates (`.iql` files) in the `resources` sub directory of the `stack-dir`, targeting an environment (`stack-env`). `stackql` is used to execute the queries to deploy, test, update or delete resources as directed. This is summarized in the diagram below: + +```mermaid +flowchart LR + subgraph stack-dir + direction LR + B(Manifest File) --> C(Resource Files) + end + + A(stackql-deploy) -->|uses...|stack-dir + stack-dir -->|deploys to...|D(☁️ Your Environment) +``` + +### `stackql_manifest.yml` File + +The `stackql_manifest.yml` file is the basis of your stack configuration. It contains the definitions of the resources you want to manage, the providers you're using (such as AWS, Google Cloud, or Azure), and the environment-specific settings that will guide the deployment. + +This manifest file acts as a blueprint for your infrastructure, describing the resources and how they should be configured. An example `stackql_manifest.yml` file is shown here: + + + +```yaml +version: 1 +name: "my-azure-stack" +description: description for "my-azure-stack" +providers: + - azure +globals: + - name: subscription_id + description: azure subscription id + value: "{{ AZURE_SUBSCRIPTION_ID }}" + - name: location + description: default location for resources + value: eastus + - name: global_tags + value: + provisioner: stackql + stackName: "{{ stack_name }}" + stackEnv: "{{ stack_env }}" +resources: + - name: example_res_grp + props: + - name: resource_group_name + value: "{{ stack_name }}-{{ stack_env }}-rg" + exports: + - resource_group_name +``` + + + +The `stackql_manifest.yml` file is detailed [__here__](/manifest-file). + +### Resource Query Files + +Each resource or query defined in the `resources` section of the `stackql_manifest.yml` has an associated StackQL query file (using the `.iql` extension by convention). The query file defines queries to deploy and test a cloud resource. These queries are demarcated by query anchors (or hints). Available query anchors include: + +- `exists` : tests for the existence or non-existence of a resource +- `create` : creates the resource in the desired state using a StackQL `INSERT` statement +- `update` : updates the resource to the desired state using a StackQL `UPDATE` statement +- `createorupdate`: for idempotent resources, uses a StackQL `INSERT` statement +- `statecheck`: tests the state of a resource after a DML operation, typically to determine if the resource is in the desired state +- `exports` : variables to export from the resource to be used in subsequent queries +- `delete` : deletes a resource using a StackQL `DELETE` statement + +An example resource query file is shown here: + + + +```sql +/*+ exists */ +SELECT COUNT(*) as count FROM azure.resources.resource_groups +WHERE subscriptionId = '{{ subscription_id }}' +AND resourceGroupName = '{{ resource_group_name }}' + +/*+ create */ +INSERT INTO azure.resources.resource_groups( + resourceGroupName, + subscriptionId, + location +) +SELECT + '{{ resource_group_name }}', + '{{ subscription_id }}', + '{{ location }}' + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM azure.resources.resource_groups +WHERE subscriptionId = '{{ subscription_id }}' +AND resourceGroupName = '{{ resource_group_name }}' +AND location = '{{ location }}' +AND JSON_EXTRACT(properties, '$.provisioningState') = 'Succeeded' + +/*+ exports */ +SELECT '{{ resource_group_name }}' as resource_group_name + +/*+ delete */ +DELETE FROM azure.resources.resource_groups +WHERE resourceGroupName = '{{ resource_group_name }}' AND subscriptionId = '{{ subscription_id }}' +``` + + + +Resource queries are detailed [__here__](/resource-query-files). + +### `stackql-deploy` commands + +Basic `stackql-deploy` commands include: + +- `build` : provisions a stack to the desired state in a specified environment (including `create` and `update` operations if necessary) +- `test` : tests a stack to confirm all resources exist and are in their desired state +- `teardown` : de-provisions a stack + +here are some examples: + +```bash title="deploy my-azure-stack to the prd environment" +stackql-deploy build my-azure-stack prd \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +``` + +```bash title="test my-azure-stack in the sit environment" +stackql-deploy test my-azure-stack sit \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +``` + +```bash title="teardown my-azure-stack in the dev environment" +stackql-deploy teardown my-azure-stack dev \ +-e AZURE_SUBSCRIPTION_ID=00000000-0000-0000-0000-000000000000 +``` + +For more detailed information see [`cli-reference/build`](/cli-reference/build), [`cli-reference/test`](/cli-reference/test), [`cli-reference/teardown`](/cli-reference/teardown), or other commands available. + + +### `stackql-deploy` deployment flow + +`stackql-deploy` processes the resources defined in the `stackql_manifest.yml` in top down order (`teardown` operations are processed in reverse order). + + + +## Quick Start + +To get up and running quickly, `stackql-deploy` provides a set of quick start templates for common cloud providers. These templates include predefined configurations and resource queries tailored to AWS, Azure, and Google Cloud, among others. + +- [**AWS Quick Start Template**](/template-library/aws/vpc-and-ec2-instance): A basic setup for deploying a VPC, including subnets and routing configurations. +- [**Azure Quick Start Template**](/template-library/azure/simple-vnet-and-vm): A setup for creating a Resource Group with associated resources. +- [**Google Cloud Quick Start Template**](/template-library/google/k8s-the-hard-way): A configuration for deploying a VPC with network and firewall rules. + +These templates are designed to help you kickstart your infrastructure deployment with minimal effort, providing a solid foundation that you can customize to meet your specific needs. diff --git a/website/docs/manifest-file.md b/website/docs/manifest-file.md index 257d046..e446778 100644 --- a/website/docs/manifest-file.md +++ b/website/docs/manifest-file.md @@ -143,6 +143,42 @@ See [Resource Query Files - callback](resource-query-files#callback) for the ful *** +### `resource.return_vals` + +Specifies which fields from a `RETURNING *` response should be captured as resource-scoped variables (`this.*`). This is optional — if omitted, `RETURNING *` results are still logged and stored for callback queries, but no fields are injected into the template context. + +`return_vals` is scoped per operation (`create`, `update`, `delete`). Each operation maps to a list of field specifications: + +- **Rename pattern** — `SourceField: target_name` captures `SourceField` from the response and makes it available as `{{ this.target_name }}` +- **Direct capture** — `FieldName` (string) captures the field as `{{ this.FieldName }}` + +Fields captured by `return_vals` are mutable — a `create` can set a value that a subsequent `update` overwrites. + +If `return_vals` is specified for a resource and operation but the field is not present in the `RETURNING *` response (either because the provider didn't return it or the `RETURNING *` clause was omitted), the build will fail. + +```yaml +resources: + - name: example_vpc + props: + # ... + return_vals: + create: + - Identifier: identifier + - ErrorCode + exports: + - vpc_id +``` + +In this example, when a `create` operation runs: + +1. `Identifier` from the `RETURNING *` response is captured as `{{ this.identifier }}` +2. `ErrorCode` is captured as `{{ this.ErrorCode }}` +3. If the `RETURNING *` response doesn't include these fields, the build fails + +When `return_vals` successfully captures an identifier from `RETURNING *`, the framework skips the post-create `exists` re-run (saving an API call), since the identifier is already known. + +*** + ### `resource.props` diff --git a/website/docs/resource-query-files.md b/website/docs/resource-query-files.md index 8d6cf95..3b2a6ff 100644 --- a/website/docs/resource-query-files.md +++ b/website/docs/resource-query-files.md @@ -58,36 +58,51 @@ This pattern is particularly useful when you need to **discover a resource ident ```sql /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_vpc"]}]' -AND ResourceTypeFilters = '["ec2:vpc"]' +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as vpc_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:vpc"]' +), +vpcs AS +( + SELECT vpc_id + FROM awscc.ec2.vpcs_list_only + WHERE region = '{{ region }}' +) +SELECT r.vpc_id +FROM vpcs r +INNER JOIN tagged_resources tr +ON r.vpc_id = tr.vpc_id; ``` -In the example above, when the resource exists the `identifier` field (e.g. `vpc-0abc123def456`) is captured and available as `{{ this.identifier }}` in subsequent queries: +In the example above, when the resource exists the `vpc_id` field (e.g. `vpc-0abc123def456`) is captured and available as `{{ this.vpc_id }}` in subsequent queries: ```sql /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT vpc_id, cidr_block +SELECT +AWS_POLICY_EQUAL(tags, '{{ vpc_tags }}') as test_tags FROM awscc.ec2.vpcs -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.vpc_id }}' AND region = '{{ region }}' +AND cidr_block = '{{ vpc_cidr_block }}' ) t -WHERE cidr_block = '{{ vpc_cidr_block }}' +WHERE test_tags = 1; ``` :::tip The identifier capture pattern enables a powerful two-step workflow for providers like `awscc` (AWS Cloud Control) where resources are identified by tags rather than names: -1. **`exists`** — find the resource via a tag-based lookup (e.g. `awscc.tagging.tagged_resources`), capturing the cloud-assigned identifier -2. **`statecheck`** — use `{{ this.identifier }}` to query the resource directly and verify its properties match the desired state -3. **`exports`** — use `{{ this.identifier }}` to query the resource and extract values for downstream resources +1. **`exists`** — find the resource via a CTE that cross-references `awscc.tagging.tagged_resources` with the provider's `*_list_only` resource, capturing the cloud-assigned identifier. The `INNER JOIN` ensures the resource both has the expected tags **and** currently exists (eliminating stale tag records for terminated resources). +2. **`statecheck`** — use `{{ this. }}` to query the resource directly and verify its properties match the desired state (including tag comparison via `AWS_POLICY_EQUAL`). +3. **`exports`** — use `{{ this. }}` to query the resource and extract values for downstream resources. -This avoids the need for complex JOINs or subqueries between the tagging service and the resource provider. +The [`to_aws_tag_filters`](template-filters#to_aws_tag_filters) filter converts the `global_tags` manifest variable into the AWS TagFilters format automatically. ::: @@ -494,28 +509,43 @@ The corresponding manifest entry requires **no** `callback` section — callback ### Tag-based identifier discovery example (`awscc`) -This example demonstrates the **identifier capture** pattern for AWS Cloud Control (`awscc`) resources, where resources are discovered via the `awscc.tagging.tagged_resources` service. The `exists` query returns the resource identifier (extracted from the ARN), which is then used in `statecheck` and `exports` queries via `{{ this.identifier }}`. +This example demonstrates the **identifier capture** pattern for AWS Cloud Control (`awscc`) resources. Resources are discovered using a CTE that cross-references `awscc.tagging.tagged_resources` with the provider's `*_list_only` resource, ensuring the resource actually exists (not just a stale tag record). The captured field is then used via `{{ this. }}` in `statecheck` and `exports` queries. ```sql /*+ exists */ -SELECT split_part(ResourceARN, '/', 2) as identifier -FROM awscc.tagging.tagged_resources -WHERE region = '{{ region }}' -AND TagFilters = '[{"Key":"stackql:stack-name","Values":["{{ stack_name }}"]},{"Key":"stackql:stack-env","Values":["{{ stack_env }}"]},{"Key":"stackql:resource-name","Values":["example_subnet"]}]' -AND ResourceTypeFilters = '["ec2:subnet"]' +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as subnet_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:subnet"]' +), +subnets AS +( + SELECT subnet_id + FROM awscc.ec2.subnets_list_only + WHERE region = '{{ region }}' +) +SELECT r.subnet_id +FROM subnets r +INNER JOIN tagged_resources tr +ON r.subnet_id = tr.subnet_id; /*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( -SELECT subnet_id, vpc_id, cidr_block +SELECT +AWS_POLICY_EQUAL(tags, '{{ subnet_tags }}') as test_tags FROM awscc.ec2.subnets -WHERE Identifier = '{{ this.identifier }}' +WHERE Identifier = '{{ this.subnet_id }}' AND region = '{{ region }}' -) t -WHERE cidr_block = '{{ subnet_cidr_block }}' +AND cidr_block = '{{ subnet_cidr_block }}' AND vpc_id = '{{ vpc_id }}' +) t +WHERE test_tags = 1; /*+ create */ INSERT INTO awscc.ec2.subnets ( @@ -524,28 +554,29 @@ INSERT INTO awscc.ec2.subnets ( SELECT '{{ vpc_id }}', '{{ subnet_cidr_block }}', true, '{{ subnet_tags }}', '{{ region }}' +RETURNING *; /*+ exports, retries=5, retry_delay=5 */ SELECT subnet_id, availability_zone FROM awscc.ec2.subnets -WHERE Identifier = '{{ this.identifier }}' -AND region = '{{ region }}' +WHERE Identifier = '{{ this.subnet_id }}' +AND region = '{{ region }}'; /*+ delete */ DELETE FROM awscc.ec2.subnets -WHERE data__Identifier = '{{ subnet_id }}' -AND region = '{{ region }}' +WHERE Identifier = '{{ subnet_id }}' +AND region = '{{ region }}'; ``` In this example: -1. **`exists`** — queries `awscc.tagging.tagged_resources` filtered by stack-level tags and resource type. If a matching resource is found, `identifier` is captured (e.g. `subnet-0abc123...`). -2. **`statecheck`** — uses `{{ this.identifier }}` to query `awscc.ec2.subnets` directly and verify the CIDR block and VPC ID match the desired state. -3. **`create`** — standard `INSERT` with tags that include `stackql:stack-name`, `stackql:stack-env`, and `stackql:resource-name` for future discovery. -4. **`exports`** — uses `{{ this.identifier }}` to query the resource and extract `subnet_id` and `availability_zone` for downstream resources. -5. **`delete`** — uses the exported `subnet_id` (from the `exports` query, not `this.identifier`) with `data__Identifier`. +1. **`exists`** — uses a CTE to cross-reference `awscc.tagging.tagged_resources` (filtered by stack tags via the [`to_aws_tag_filters`](template-filters#to_aws_tag_filters) filter) with `awscc.ec2.subnets_list_only`. The `INNER JOIN` ensures the resource both has the expected tags **and** currently exists in the provider. The returned `subnet_id` is captured as `{{ this.subnet_id }}`. +2. **`statecheck`** — uses `{{ this.subnet_id }}` to query `awscc.ec2.subnets` directly and verify properties including tags (via [`AWS_POLICY_EQUAL`](https://stackql.io/docs/language-spec/functions/json/aws_policy_equal)). +3. **`create`** — `INSERT` with `RETURNING *` to capture the Cloud Control API response. Tags include `stackql:stack-name`, `stackql:stack-env`, and `stackql:resource-name` for future discovery. +4. **`exports`** — uses `{{ this.subnet_id }}` to query the resource and extract `subnet_id` and `availability_zone` for downstream resources. +5. **`delete`** — uses the exported `subnet_id` (from the `exports` query) with `Identifier`. ### `query` type example diff --git a/website/docs/template-filters.md b/website/docs/template-filters.md index f3fa810..06ed41f 100644 --- a/website/docs/template-filters.md +++ b/website/docs/template-filters.md @@ -149,6 +149,38 @@ SELECT ; ``` +### `to_aws_tag_filters` + +Converts a list of AWS tag key-value pairs (as used in `global_tags`) into the AWS Resource Groups Tagging API `TagFilters` format. This is an AWS-specific filter designed for use with `awscc.tagging.tagged_resources` queries. + +**Input format:** `[{"Key": "k", "Value": "v"}, ...]` +**Output format:** `[{"Key": "k", "Values": ["v"]}, ...]` + +**Example usage:** + +```sql +/*+ exists */ +SELECT split_part(ResourceARN, '/', 2) as vpc_id +FROM awscc.tagging.tagged_resources +WHERE region = '{{ region }}' +AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' +AND ResourceTypeFilters = '["ec2:vpc"]' +``` + +This filter is typically applied to the `global_tags` variable defined in the manifest: + +```yaml +globals: + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{{ stack_name }}" + - Key: 'stackql:stack-env' + Value: "{{ stack_env }}" + - Key: 'stackql:resource-name' + Value: "{{ resource_name }}" +``` + ## Special Variables StackQL Deploy injects the following built-in variables automatically — no manifest configuration is required. diff --git a/website/docs/template-library/aws/vpc-and-ec2-instance.md b/website/docs/template-library/aws/vpc-and-ec2-instance.md index c5095f0..04b3ed3 100644 --- a/website/docs/template-library/aws/vpc-and-ec2-instance.md +++ b/website/docs/template-library/aws/vpc-and-ec2-instance.md @@ -1,359 +1,342 @@ ---- -id: vpc-and-ec2-instance -title: AWS VPC and EC2 Instance -hide_title: false -hide_table_of_contents: false -description: A quick overview of how to get started with StackQL Deploy, including basic concepts and the essential components of a deployment. -tags: [] -draft: false -unlisted: false ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -In this example, we'll demonstrate how to set up a simple VPC with an EC2 instance in AWS using `stackql-deploy`. This setup is ideal for getting started with basic networking and compute resources on AWS. - -
- Simple AWS VPC EC2 Stack -
-The EC2 instance is bootstrapped with a web server that serves a simple page using the EC2 instance `UserData` property. - -## Deploying the Stack - -> Install `stackql-deploy` (see [__Installing stackql-deploy__](/getting-started#installing-stackql-deploy)), set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, that's it! - -Once you have setup your project directory (your "stack"), you can use the `stackql-deploy` cli application to deploy, test or teardown the stack in any given environment. To deploy the stack to an environment labeled `sit`, run the following: - -```bash -stackql-deploy build aws-stack sit \ --e AWS_REGION=ap-southeast-2 -``` -Use the `--dry-run` flag to view the queries to be run without actually running them, heres an example of a `dry-run` operation for a `prd` environment: - -```bash -stackql-deploy build aws-stack prd \ --e AWS_REGION=ap-southeast-2 \ ---dry-run -``` - -## stackql_manifest.yml - -The `stackql_manifest.yml` defines the resources in yoru stack and their property values (for one or more environments). - -
- Click to expand the stackql_manifest.yml file - -```yaml -version: 1 -name: "aws-stack" -description: description for "aws-stack" -providers: - - aws -globals: - - name: region - description: aws region - value: "{{ AWS_REGION }}" - - name: global_tags - value: - - Key: Provisioner - Value: stackql - - Key: StackName - Value: "{{ stack_name }}" - - Key: StackEnv - Value: "{{ stack_env }}" -resources: - - name: example_vpc - props: - - name: vpc_cidr_block - values: - prd: - value: "10.0.0.0/16" - sit: - value: "10.1.0.0/16" - dev: - value: "10.2.0.0/16" - - name: vpc_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-vpc" - merge: - - global_tags - exports: - - vpc_id - - vpc_cidr_block - - name: example_subnet - props: - - name: subnet_cidr_block - values: - prd: - value: "10.0.1.0/24" - sit: - value: "10.1.1.0/24" - dev: - value: "10.2.1.0/24" - - name: subnet_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-subnet" - merge: ['global_tags'] - exports: - - subnet_id - - availability_zone - - name: example_inet_gateway - props: - - name: inet_gateway_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway" - merge: ['global_tags'] - exports: - - internet_gateway_id - - name: example_inet_gw_attachment - props: [] - - name: example_route_table - props: - - name: route_table_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-route-table" - merge: ['global_tags'] - exports: - - route_table_id - - name: example_subnet_rt_assn - props: [] - exports: - - route_table_assn_id - - name: example_inet_route - props: [] - exports: - - inet_route_indentifer - - name: example_security_group - props: - - name: group_description - value: "web security group for {{ stack_name }} ({{ stack_env }} environment)" - - name: group_name - value: "{{ stack_name }}-{{ stack_env }}-web-sg" - - name: sg_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-web-sg" - merge: ['global_tags'] - - name: security_group_ingress - value: - - CidrIp: "0.0.0.0/0" - Description: Allow HTTP traffic - FromPort: 80 - ToPort: 80 - IpProtocol: "tcp" - - CidrIp: "{{ vpc_cidr_block }}" - Description: Allow SSH traffic from the internal network - FromPort: 22 - ToPort: 22 - IpProtocol: "tcp" - - name: security_group_egress - value: - - CidrIp: "0.0.0.0/0" - Description: Allow all outbound traffic - FromPort: 0 - ToPort: 0 - IpProtocol: "-1" - exports: - - security_group_id - - name: example_web_server - props: - - name: instance_name - value: "{{ stack_name }}-{{ stack_env }}-instance" - - name: ami_id - value: ami-030a5acd7c996ef60 - - name: instance_type - value: t2.micro - - name: instance_subnet_id - value: "{{ subnet_id }}" - - name: sg_ids - value: - - "{{ security_group_id }}" - - name: user_data - value: | - #!/bin/bash - yum update -y - yum install -y httpd - systemctl start httpd - systemctl enable httpd - echo 'StackQL on AWS' > /var/www/html/index.html - echo '
StackQL Logo

Hello, stackql-deploy on AWS!

' >> /var/www/html/index.html - - name: instance_tags - value: - - Key: Name - Value: "{{ stack_name }}-{{ stack_env }}-instance" - merge: ['global_tags'] - exports: - - instance_id - - public_dns_name -``` - -
- -## Resource Query Files - -Resource query files are templates which are used to create, update, test and delete resources in your stack. Here are some example resource query files in this example: - - - - -```sql -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.vpcs ( - CidrBlock, - Tags, - EnableDnsSupport, - EnableDnsHostnames, - region -) -SELECT - '{{ vpc_cidr_block }}', - '{{ vpc_tags }}', - true, - true, - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT vpc_id, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ vpc_cidr_block - - }}'; - -/*+ exports */ -SELECT vpc_id, vpc_cidr_block FROM -( -SELECT vpc_id, cidr_block as "vpc_cidr_block", -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ vpc_cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.vpcs -WHERE Identifier = '{{ vpc_id }}' -AND region = '{{ region }}'; -``` - - - - -```sql -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT subnet_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.subnets ( - VpcId, - CidrBlock, - MapPublicIpOnLaunch, - Tags, - region -) -SELECT - '{{ vpc_id }}', - '{{ subnet_cidr_block }}', - true, - '{{ subnet_tags }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT subnet_id, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ subnet_cidr_block }}'; - -/*+ exports */ -SELECT subnet_id, availability_zone FROM -( -SELECT subnet_id, -availability_zone, -cidr_block, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.subnet_tags -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -GROUP BY subnet_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t -WHERE cidr_block = '{{ subnet_cidr_block }}'; - -/*+ delete */ -DELETE FROM aws.ec2.subnets -WHERE Identifier = '{{ subnet_id }}' -AND region = '{{ region }}'; -``` - - - - -## More Information - -The complete code for this example stack is available [__here__](https://github.com/stackql-labs/stackql-deploy-rs/tree/main/examples/aws/aws-stack). For more information on how to use StackQL and StackQL Deploy, visit: - -- [`aws` provider docs](https://stackql.io/providers/aws) -- [`stackql`](https://github.com/stackql) -- [`stackql-deploy` GitHub repo](https://github.com/stackql-labs/stackql-deploy-rs) +--- +id: vpc-and-ec2-instance +title: AWS VPC and EC2 Instance +hide_title: false +hide_table_of_contents: false +description: Deploy a complete AWS VPC networking stack with an EC2 web server using the awscc Cloud Control provider. +tags: [] +draft: false +unlisted: false +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this example, we'll demonstrate how to set up a complete VPC networking stack with an EC2 web server instance in AWS using `stackql-deploy` and the `awscc` (Cloud Control) provider. Resources are identified using the `awscc.tagging.tagged_resources` service with a standard tag taxonomy. + +
+ Simple AWS VPC EC2 Stack +
+The EC2 instance is bootstrapped with a web server that serves a simple page using the EC2 instance `UserData` property. + +## Deploying the Stack + +> Install `stackql-deploy` (see [__Installing stackql-deploy__](/getting-started#installing-stackql-deploy)), set the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION` environment variables, that's it! + +Once you have setup your project directory (your "stack"), you can use the `stackql-deploy` cli application to deploy, test or teardown the stack in any given environment. To deploy the stack to an environment labeled `dev`, run the following: + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev +``` +Use the `--dry-run` flag to view the queries to be run without actually running them: + +```bash +stackql-deploy build examples/aws/aws-vpc-webserver dev --dry-run --show-queries +``` + +## stackql_manifest.yml + +The `stackql_manifest.yml` defines the resources in your stack and their property values (for one or more environments). This stack uses the `awscc` provider with a standard tag taxonomy (`stackql:stack-name`, `stackql:stack-env`, `stackql:resource-name`) for resource identification. + +
+ Click to expand the stackql_manifest.yml file + +```yaml +version: 1 +name: "aws-vpc-webserver" +description: Provisions a complete AWS networking stack (VPC, subnet, internet gateway, route table, security group) with an Apache web server EC2 instance. +providers: + - awscc +globals: + - name: region + description: aws region + value: "{{ AWS_REGION }}" + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{{ stack_name }}" + - Key: 'stackql:stack-env' + Value: "{{ stack_env }}" + - Key: 'stackql:resource-name' + Value: "{{ resource_name }}" +resources: + - name: example_vpc + props: + - name: vpc_cidr_block + values: + prd: + value: "10.0.0.0/16" + sit: + value: "10.1.0.0/16" + dev: + value: "10.2.0.0/16" + - name: vpc_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-vpc" + merge: + - global_tags + exports: + - vpc_id + - vpc_cidr_block + - name: example_subnet + props: + - name: subnet_cidr_block + values: + prd: + value: "10.0.1.0/24" + sit: + value: "10.1.1.0/24" + dev: + value: "10.2.1.0/24" + - name: subnet_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-subnet" + merge: ['global_tags'] + exports: + - subnet_id + - availability_zone + - name: example_inet_gateway + props: + - name: inet_gateway_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway" + merge: ['global_tags'] + exports: + - internet_gateway_id + - name: example_inet_gw_attachment + props: [] + - name: example_route_table + props: + - name: route_table_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-route-table" + merge: ['global_tags'] + exports: + - route_table_id + - name: example_subnet_rt_assn + props: [] + exports: + - subnet_route_table_assn_id + - name: example_inet_route + props: [] + - name: example_security_group + props: + - name: group_description + value: "web security group for {{ stack_name }} ({{ stack_env }} environment)" + - name: group_name + value: "{{ stack_name }}-{{ stack_env }}-web-sg" + - name: sg_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-web-sg" + merge: ['global_tags'] + - name: security_group_ingress + value: + - IpProtocol: "tcp" + CidrIp: "0.0.0.0/0" + Description: Allow HTTP traffic + FromPort: 80 + ToPort: 80 + - IpProtocol: "tcp" + CidrIp: "{{ vpc_cidr_block }}" + Description: Allow SSH traffic from the internal network + FromPort: 22 + ToPort: 22 + - name: security_group_egress + value: + - CidrIp: "0.0.0.0/0" + Description: "Allow all outbound traffic" + FromPort: -1 + ToPort: -1 + IpProtocol: "-1" + exports: + - security_group_id + - name: example_web_server + props: + - name: ami_id + value: ami-05024c2628f651b80 + - name: instance_type + value: t2.micro + - name: instance_subnet_id + value: "{{ subnet_id }}" + - name: sg_ids + value: + - "{{ security_group_id }}" + - name: user_data + value: | + #!/bin/bash + yum update -y + yum install -y httpd + systemctl start httpd + systemctl enable httpd + echo '...' > /var/www/html/index.html + - name: instance_tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-instance" + merge: ['global_tags'] + exports: + - instance_id + - name: get_web_server_url + type: query + props: [] + exports: + - public_dns_name +``` + +
+ +## Resource Query Files + +Resource query files are templates which are used to create, update, test and delete resources in your stack. This stack uses the **identifier capture** pattern — the `exists` query discovers the resource via tags and the captured field is used in `statecheck` and `exports` queries via `{{ this. }}`. + + + + +```sql +/*+ exists */ +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as vpc_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:vpc"]' +), +vpcs AS +( + SELECT vpc_id + FROM awscc.ec2.vpcs_list_only + WHERE region = '{{ region }}' +) +SELECT r.vpc_id +FROM vpcs r +INNER JOIN tagged_resources tr +ON r.vpc_id = tr.vpc_id; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +AWS_POLICY_EQUAL(tags, '{{ vpc_tags }}') as test_tags +FROM awscc.ec2.vpcs +WHERE Identifier = '{{ this.vpc_id }}' +AND region = '{{ region }}' +AND cidr_block = '{{ vpc_cidr_block }}' +) t +WHERE test_tags = 1; + +/*+ create */ +INSERT INTO awscc.ec2.vpcs ( + CidrBlock, Tags, EnableDnsSupport, EnableDnsHostnames, region +) +SELECT + '{{ vpc_cidr_block }}', '{{ vpc_tags }}', true, true, '{{ region }}' +RETURNING *; + +/*+ exports */ +SELECT '{{ this.vpc_id }}' as vpc_id, +'{{ vpc_cidr_block }}' as vpc_cidr_block; + +/*+ delete */ +DELETE FROM awscc.ec2.vpcs +WHERE Identifier = '{{ vpc_id }}' +AND region = '{{ region }}'; +``` + + + + +```sql +/*+ exists */ +WITH tagged_resources AS +( + SELECT split_part(ResourceARN, '/', 2) as subnet_id + FROM awscc.tagging.tagged_resources + WHERE region = '{{ region }}' + AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' + AND ResourceTypeFilters = '["ec2:subnet"]' +), +subnets AS +( + SELECT subnet_id + FROM awscc.ec2.subnets_list_only + WHERE region = '{{ region }}' +) +SELECT r.subnet_id +FROM subnets r +INNER JOIN tagged_resources tr +ON r.subnet_id = tr.subnet_id; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +AWS_POLICY_EQUAL(tags, '{{ subnet_tags }}') as test_tags +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.subnet_id }}' +AND region = '{{ region }}' +AND cidr_block = '{{ subnet_cidr_block }}' +AND vpc_id = '{{ vpc_id }}' +) t +WHERE test_tags = 1; + +/*+ create */ +INSERT INTO awscc.ec2.subnets ( + VpcId, CidrBlock, MapPublicIpOnLaunch, Tags, region +) +SELECT + '{{ vpc_id }}', '{{ subnet_cidr_block }}', true, + '{{ subnet_tags }}', '{{ region }}' +RETURNING *; + +/*+ exports, retries=5, retry_delay=5 */ +SELECT subnet_id, availability_zone +FROM awscc.ec2.subnets +WHERE Identifier = '{{ this.subnet_id }}' +AND region = '{{ region }}'; + +/*+ delete */ +DELETE FROM awscc.ec2.subnets +WHERE Identifier = '{{ subnet_id }}' +AND region = '{{ region }}'; +``` + + + + +## Key Patterns + +### Tag-Based Resource Discovery + +Resources are identified using `awscc.tagging.tagged_resources` cross-referenced with the provider's `*_list_only` resource via a CTE + `INNER JOIN`. This ensures the resource both has the expected tags **and** currently exists in the provider (eliminating stale tag records). + +### `to_aws_tag_filters` Filter + +The `global_tags` variable is converted to AWS TagFilters format using the [`to_aws_tag_filters`](/template-filters#to_aws_tag_filters) custom filter, keeping queries clean: + +```sql +AND TagFilters = '{{ global_tags | to_aws_tag_filters }}' +``` + +### Property Verification with `AWS_POLICY_EQUAL` + +Statechecks use [`AWS_POLICY_EQUAL`](https://stackql.io/docs/language-spec/functions/json/aws_policy_equal) for order-independent comparison of tags and security group rules. + +## More Information + +The complete code for this example stack is available [__here__](https://github.com/stackql/stackql-deploy-rs/tree/main/examples/aws/aws-vpc-webserver). For more information on how to use StackQL and StackQL Deploy, visit: + +- [`awscc` provider docs](https://awscc.stackql.io/providers/awscc/) +- [`stackql`](https://github.com/stackql/stackql) +- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy-rs) From 8824d5908e9df2b5edecd6280a0a1372ea519901 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Wed, 18 Mar 2026 20:46:53 +1100 Subject: [PATCH 5/8] 2.0.4 wip --- .../resources/example_subnet_rt_assn.iql | 2 +- .../databricks_account/account_groups.iql | 73 ++------- .../serverless/stackql_manifest.yml | 19 ++- src/commands/build.rs | 144 +++++++++--------- 4 files changed, 97 insertions(+), 141 deletions(-) diff --git a/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql index f54bc61..6a489e8 100644 --- a/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql +++ b/examples/aws/aws-vpc-webserver/resources/example_subnet_rt_assn.iql @@ -1,4 +1,4 @@ -/*+ exists */ +/*+ exists, retries=10, retry_delay=5 */ SELECT id as subnet_route_table_assn_id FROM awscc.ec2.vw_subnet_route_table_associations diff --git a/examples/databricks/serverless/resources/databricks_account/account_groups.iql b/examples/databricks/serverless/resources/databricks_account/account_groups.iql index 820e506..6d3fcaa 100644 --- a/examples/databricks/serverless/resources/databricks_account/account_groups.iql +++ b/examples/databricks/serverless/resources/databricks_account/account_groups.iql @@ -1,79 +1,26 @@ /*+ exists */ -SELECT count(*) as count +SELECT id AS databricks_group_id FROM databricks_account.iam.account_groups -WHERE account_id = '{{ account_id }}' -AND filter = 'displayName Eq "{{ displayName }}"'; +WHERE account_id = '{{ databricks_account_id }}' +AND filter = 'displayName Eq "{{ display_name }}"'; /*+ create */ INSERT INTO databricks_account.iam.account_groups ( -display_name, -external_id, -id, -members, -meta, -roles, +displayName, account_id ) SELECT '{{ display_name }}', -'{{ external_id }}', -'{{ id }}', -'{{ members }}', -'{{ meta }}', -'{{ roles }}', -'{{ account_id }}' +'{{ databricks_account_id }}' RETURNING -id, -account_id, -displayName, -externalId, -members, -meta, -roles +id ; -/*+ update */ -UPDATE databricks_account.iam.account_groups -SET -operations = '{{ operations }}', -schemas = '{{ schemas }}' -WHERE -account_id = '{{ account_id }}' -AND id = '{{ id }}'; - -/*+ statecheck, retries=5, retry_delay=10 */ -SELECT count(*) as count -FROM databricks_account.iam.account_groups -WHERE -id = '{{ id }}' AND -members = '{{ members }}' AND -meta = '{{ meta }}' AND -roles = '{{ roles }}' AND -account_id = '{{ account_id }}' -AND attributes = '{{ attributes }}' -AND count = '{{ count }}' -AND excluded_attributes = '{{ excluded_attributes }}' -AND filter = '{{ filter }}' -AND sort_by = '{{ sort_by }}' -AND sort_order = '{{ sort_order }}' -AND start_index = '{{ start_index }}'; - /*+ exports */ -SELECT id, -members, -meta, -roles -FROM databricks_account.iam.account_groups -WHERE account_id = '{{ account_id }}' -AND attributes = '{{ attributes }}' -AND count = '{{ count }}' -AND excluded_attributes = '{{ excluded_attributes }}' -AND filter = '{{ filter }}' -AND sort_by = '{{ sort_by }}' -AND sort_order = '{{ sort_order }}' -AND start_index = '{{ start_index }}'; +SELECT '{{ this.databricks_group_id }}' as databricks_group_id, +'{{ display_name }}' as display_name; /*+ delete */ DELETE FROM databricks_account.iam.account_groups -WHERE account_id = '{{ account_id }}' -AND id = '{{ id }}'; \ No newline at end of file +WHERE account_id = '{{ databricks_account_id }}' +AND id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/stackql_manifest.yml b/examples/databricks/serverless/stackql_manifest.yml index d4b67a2..4625435 100644 --- a/examples/databricks/serverless/stackql_manifest.yml +++ b/examples/databricks/serverless/stackql_manifest.yml @@ -276,14 +276,17 @@ resources: - workspace_status - workspace_url - # - name: workspace_admins_group - # file: databricks_account/account_groups.iql - # props: - # - name: display_name - # value: "{{ stack_name }}-{{ stack_env }}-workspace-admins" - # exports: - # - id - # - display_name + - name: workspace_admins_group + file: databricks_account/account_groups.iql + props: + - name: display_name + value: "{{ stack_name }}-{{ stack_env }}-workspace-admins" + return_vals: + create: + - id: databricks_group_id + exports: + - databricks_group_id + - display_name # - name: databricks_account/get_users # type: query diff --git a/src/commands/build.rs b/src/commands/build.rs index 50e6ebb..d271dfa 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -10,7 +10,7 @@ use std::collections::HashMap; use std::time::Instant; use clap::{Arg, ArgMatches, Command}; -use log::{debug, info}; +use log::{debug, info, warn}; use crate::commands::base::CommandRunner; use crate::commands::common_args::{ @@ -182,85 +182,62 @@ fn run_build( (runner.get_queries(resource, &full_context), None) }; - // Provisioning queries for resource/multi types - let mut create_query: Option = None; - let mut create_retries = 1u32; - let mut create_retry_delay = 0u32; - let mut update_query: Option = None; - let mut update_retries = 1u32; - let mut update_retry_delay = 0u32; - let mut has_createorupdate = false; + // Detect anchor presence and extract retry options (no rendering yet). + // All query rendering is deferred to the point of use (JIT) because + // exists may capture this.* fields needed by downstream queries. + let has_createorupdate = resource_queries.contains_key("createorupdate"); + let create_retries; + let create_retry_delay; + let update_retries; + let update_retry_delay; if res_type == "resource" || res_type == "multi" { - if let Some(cou) = resource_queries.get("createorupdate") { - has_createorupdate = true; - let rendered = runner.render_query( - &resource.name, - "createorupdate", - &cou.template, - &full_context, - ); - create_query = Some(rendered.clone()); + if has_createorupdate { + let cou = resource_queries.get("createorupdate").unwrap(); create_retries = cou.options.retries; create_retry_delay = cou.options.retry_delay; - update_query = Some(rendered); update_retries = cou.options.retries; update_retry_delay = cou.options.retry_delay; } else { if let Some(cq) = resource_queries.get("create") { - create_query = Some(runner.render_query( - &resource.name, - "create", - &cq.template, - &full_context, - )); create_retries = cq.options.retries; create_retry_delay = cq.options.retry_delay; + } else { + catch_error_and_exit( + "iql file must include either 'create' or 'createorupdate' anchor.", + ); } if let Some(uq) = resource_queries.get("update") { - update_query = Some(runner.render_query( - &resource.name, - "update", - &uq.template, - &full_context, - )); update_retries = uq.options.retries; update_retry_delay = uq.options.retry_delay; + } else { + update_retries = 1; + update_retry_delay = 0; } } - - if create_query.is_none() { - catch_error_and_exit( - "iql file must include either 'create' or 'createorupdate' anchor.", - ); - } + } else { + create_retries = 1; + create_retry_delay = 0; + update_retries = 1; + update_retry_delay = 0; } - // Render the exists query eagerly (it never depends on this.* fields) + // Render exists eagerly (it never depends on this.* fields) let exists_query = resource_queries.get("exists").map(|q| { let rendered = runner.render_query(&resource.name, "exists", &q.template, &full_context); (rendered, q.options.clone()) }); - // Statecheck and exports rendering is deferred until after the exists - // check runs, because the exists query may capture fields (e.g. - // `identifier`) that should be available as {{ this. }} in - // subsequent queries. let mut full_context = full_context; let exports_opts = resource_queries.get("exports"); let exports_retries = exports_opts.map_or(1, |q| q.options.retries); let exports_retry_delay = exports_opts.map_or(0, |q| q.options.retry_delay); - // Only eagerly render exports if there's no exists query; otherwise - // defer until after exists has captured this.* fields. - let mut exports_query_str: Option = if resource_queries.contains_key("exists") { - None // will be rendered after exists check injects this.* fields - } else { - resource_queries - .get("exports") - .map(|q| runner.render_query(&resource.name, "exports", &q.template, &full_context)) - }; + // All other queries (create, update, statecheck, exports) are rendered + // JIT at the point of use, after exists has had a chance to capture + // this.* fields into full_context. + let mut exports_query_str: Option = None; // Handle query type with no exports if res_type == "query" && exports_query_str.is_none() { @@ -302,12 +279,12 @@ fn run_build( } else if resource_queries.contains_key("statecheck") { // Flow 1: Traditional flow when statecheck exists if let Some(ref eq) = exists_query { - let eq_opts = resource_queries.get("exists").unwrap(); + // Pre-create: fast fail (1 attempt, no delay) let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, - eq_opts.options.retries, - eq_opts.options.retry_delay, + 1, + 0, dry_run, show_queries, false, @@ -398,12 +375,12 @@ fn run_build( exports_result_from_proxy = None; if let Some(ref eq) = exists_query { - let eq_opts = resource_queries.get("exists").unwrap(); + // Pre-create: fast fail (1 attempt, no delay) let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, - eq_opts.options.retries, - eq_opts.options.retry_delay, + 1, + 0, dry_run, show_queries, false, @@ -424,12 +401,12 @@ fn run_build( } } else if let Some(ref eq) = exists_query { // Flow 3: exists query only (no statecheck rendered yet) - let eq_opts = resource_queries.get("exists").unwrap(); + // Pre-create: fast fail (1 attempt, no delay) let (exists, fields) = runner.check_if_resource_exists( resource, &eq.0, - eq_opts.options.retries, - eq_opts.options.retry_delay, + 1, + 0, dry_run, show_queries, false, @@ -479,9 +456,23 @@ fn run_build( let mut is_created_or_updated = false; if !resource_exists { + // JIT render create/createorupdate query + let create_query = if has_createorupdate { + let cou = resource_queries.get("createorupdate").unwrap(); + runner.render_query( + &resource.name, + "createorupdate", + &cou.template, + &full_context, + ) + } else { + let cq = resource_queries.get("create").unwrap(); + runner.render_query(&resource.name, "create", &cq.template, &full_context) + }; + let (created, returning_row) = runner.create_resource( resource, - create_query.as_ref().unwrap(), + &create_query, create_retries, create_retry_delay, dry_run, @@ -529,11 +520,11 @@ fn run_build( render_exports!(runner, resource_queries, resource, &full_context); } } else if !resource.get_return_val_mappings("create").is_empty() { - catch_error_and_exit(&format!( + warn!( "return_vals specified for [{}] create but no RETURNING data received. \ - Ensure the create query includes 'RETURNING *'.", + Will fall back to post-create exists query.", resource.name - )); + ); } // Run callback:create block if present. @@ -573,6 +564,21 @@ fn run_build( } if resource_exists && !is_correct_state { + // JIT render update/createorupdate query + let update_query: Option = if has_createorupdate { + let cou = resource_queries.get("createorupdate").unwrap(); + Some(runner.render_query( + &resource.name, + "createorupdate", + &cou.template, + &full_context, + )) + } else { + resource_queries.get("update").map(|uq| { + runner.render_query(&resource.name, "update", &uq.template, &full_context) + }) + }; + let (updated, returning_row) = runner.update_resource( resource, update_query.as_deref(), @@ -627,11 +633,11 @@ fn run_build( } else if !resource.get_return_val_mappings("update").is_empty() && is_created_or_updated { - catch_error_and_exit(&format!( + warn!( "return_vals specified for [{}] update but no RETURNING data received. \ - Ensure the update query includes 'RETURNING *'.", + Will fall back to post-update exists query.", resource.name - )); + ); } // Run callback:update block if present. @@ -686,8 +692,8 @@ fn run_build( let (post_exists, fields) = runner.check_if_resource_exists( resource, &eq.0, - eq_opts.options.retries.max(3), - eq_opts.options.retry_delay.max(5), + eq_opts.options.retries, + eq_opts.options.retry_delay, dry_run, show_queries, false, From 4d7982968cac283b951b972426e0000c5b926a48 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Thu, 19 Mar 2026 05:40:08 +1100 Subject: [PATCH 6/8] 2.0.4 wip --- .../update_group_membership.iql | 2 +- .../databricks_account/get_users.iql | 6 ++++ .../update_group_membership.iql | 6 ++++ .../workspace_assignment.iql | 30 +++++++++++++++++++ .../serverless/stackql_manifest.yml | 29 +++++++++--------- src/commands/build.rs | 19 +++++++----- src/commands/teardown.rs | 6 +++- src/commands/test.rs | 6 +++- src/core/templating.rs | 8 +++-- src/core/utils.rs | 5 +++- 10 files changed, 89 insertions(+), 28 deletions(-) create mode 100644 examples/databricks/serverless/resources/databricks_account/get_users.iql create mode 100644 examples/databricks/serverless/resources/databricks_account/update_group_membership.iql create mode 100644 examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql b/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql index 7f28c52..10f36fa 100644 --- a/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql +++ b/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql @@ -3,4 +3,4 @@ update databricks_account.iam.groups set schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', Operations = '[{"op": "replace", "path": "members", "value": {{ databricks_workspace_group_members }} }]' WHERE account_id = '{{ databricks_account_id }}' -AND id = '{{ databricks_group_id }}'; +AND id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_account/get_users.iql b/examples/databricks/serverless/resources/databricks_account/get_users.iql new file mode 100644 index 0000000..5bb510f --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_account/get_users.iql @@ -0,0 +1,6 @@ +/*+ exports, retries=3, retry_delay=5 */ +SELECT +JSON_GROUP_ARRAY(JSON_OBJECT('value', id)) as databricks_workspace_group_members +FROM databricks_account.iam.account_users +WHERE account_id = '{{ databricks_account_id }}' +AND userName in {{ users | sql_list }}; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql b/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql new file mode 100644 index 0000000..1b4ecb9 --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql @@ -0,0 +1,6 @@ +/*+ command */ +update databricks_account.iam.account_groups +set schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', +Operations = '[{"op": "replace", "path": "members", "value": {{ databricks_workspace_group_members }} }]' +WHERE account_id = '{{ databricks_account_id }}' +AND id = '{{ databricks_group_id }}'; diff --git a/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql b/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql new file mode 100644 index 0000000..f75f2e5 --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql @@ -0,0 +1,30 @@ +/*+ createorupdate */ +REPLACE databricks_account.iam.workspace_assignment +SET +permissions = '["ADMIN"]' +WHERE +account_id = '{{ databricks_account_id }}' +AND workspace_id = '{{ workspace_id }}' +AND principal_id = '{{ databricks_group_id }}' +RETURNING +error, +permissions, +principal; + + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count +FROM databricks_account.iam.workspace_assignment +WHERE +account_id = '{{ databricks_account_id }}' +AND workspace_id = '{{ workspace_id }}' +AND JSON_EXTRACT(principal, '$.principal_id') = {{ databricks_group_id }} +AND permissions LIKE '%ADMIN%'; + + +/*+ delete */ +DELETE FROM databricks_account.iam.workspace_assignment +WHERE account_id = '{{ databricks_account_id }}' +AND workspace_id = '{{ workspace_id }}' +AND principal_id = '{{ databricks_group_id }}' +; \ No newline at end of file diff --git a/examples/databricks/serverless/stackql_manifest.yml b/examples/databricks/serverless/stackql_manifest.yml index 4625435..872d473 100644 --- a/examples/databricks/serverless/stackql_manifest.yml +++ b/examples/databricks/serverless/stackql_manifest.yml @@ -288,22 +288,23 @@ resources: - databricks_group_id - display_name - # - name: databricks_account/get_users - # type: query - # props: - # - name: users - # value: - # - "javen@stackql.io" - # - "krimmer@stackql.io" - # exports: - # - databricks_workspace_group_members + - name: get_databricks_users + file: databricks_account/get_users.iql + type: query + props: + - name: users + value: + - "javen@stackql.io" + - "krimmer@stackql.io" + exports: + - databricks_workspace_group_members - # - name: databricks_account/update_group_membership - # type: command - # props: [] + - name: databricks_account/update_group_membership + type: command + props: [] - # - name: databricks_account/workspace_permission_assignments - # props: [] + - name: databricks_account/workspace_assignment + props: [] # - name: databricks_workspace/storage_credential # props: diff --git a/src/commands/build.rs b/src/commands/build.rs index d271dfa..fc40caa 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -90,9 +90,9 @@ pub fn execute(matches: &ArgMatches) { ); if is_dry_run { - println!("dry-run build complete"); + print_unicode_box("dry-run build complete", BorderColor::Green); } else { - println!("build complete"); + print_unicode_box("build complete", BorderColor::Green); } stop_local_server(); @@ -239,14 +239,19 @@ fn run_build( // this.* fields into full_context. let mut exports_query_str: Option = None; - // Handle query type with no exports - if res_type == "query" && exports_query_str.is_none() { + // Handle query type: render exports eagerly (query types don't + // have exists/statecheck so there's no this.* deferral needed). + if res_type == "query" { if let Some(ref iq) = inline_query { exports_query_str = Some(iq.clone()); } else { - catch_error_and_exit( - "Inline sql must be supplied or an iql file must be present with an 'exports' anchor for query type resources.", - ); + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + if exports_query_str.is_none() { + catch_error_and_exit( + "Inline sql must be supplied or an iql file must be present with an 'exports' anchor for query type resources.", + ); + } } } diff --git a/src/commands/teardown.rs b/src/commands/teardown.rs index 561ae1a..849b8ca 100644 --- a/src/commands/teardown.rs +++ b/src/commands/teardown.rs @@ -78,7 +78,11 @@ pub fn execute(matches: &ArgMatches) { &format!("{:?}", on_failure_val), ); - println!("teardown complete (dry run: {})", is_dry_run); + if is_dry_run { + print_unicode_box("dry-run teardown complete", BorderColor::Green); + } else { + print_unicode_box("teardown complete", BorderColor::Green); + } stop_local_server(); } diff --git a/src/commands/test.rs b/src/commands/test.rs index 51a8942..094f838 100644 --- a/src/commands/test.rs +++ b/src/commands/test.rs @@ -89,7 +89,11 @@ pub fn execute(matches: &ArgMatches) { output_file.map(|s| s.as_str()), ); - println!("tests complete (dry run: {})", is_dry_run); + if is_dry_run { + print_unicode_box("dry-run tests complete", BorderColor::Green); + } else { + print_unicode_box("tests complete", BorderColor::Green); + } stop_local_server(); } diff --git a/src/core/templating.rs b/src/core/templating.rs index 74cb24b..0165478 100644 --- a/src/core/templating.rs +++ b/src/core/templating.rs @@ -294,8 +294,7 @@ pub fn render_query( let expanded = match preprocess_this_prefix(template, res_name) { Ok(t) => t, Err(e) => { - error!("[{}] [{}] {}", res_name, anchor, e); - process::exit(1); + crate::core::utils::catch_error_and_exit(&format!("[{}] [{}] {}", res_name, anchor, e)); } }; @@ -348,7 +347,10 @@ pub fn render_query( ctx.keys().collect::>() ); - process::exit(1); + crate::core::utils::catch_error_and_exit(&format!( + "Failed to render query for [{}] [{}]", + res_name, anchor + )); } } } diff --git a/src/core/utils.rs b/src/core/utils.rs index 227bf50..e5d4f20 100644 --- a/src/core/utils.rs +++ b/src/core/utils.rs @@ -21,7 +21,10 @@ pub fn catch_error_and_exit(msg: &str) -> ! { error!("{}", msg); // Stop the local server before exiting to avoid stale sessions crate::utils::server::stop_local_server(); - eprintln!("stackql-deploy operation failed"); + crate::utils::display::print_unicode_box( + "stackql-deploy operation failed", + crate::utils::display::BorderColor::Red, + ); process::exit(1); } From 0929dc6ec2c81cbb57d41264062be11a0e7548d6 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Thu, 19 Mar 2026 21:40:22 +1100 Subject: [PATCH 7/8] added get app --- .github/release-footer.md | 35 ++ .github/workflows/deploy-get-app.yml | 22 ++ .github/workflows/release.yml | 108 +++---- .../workspace_assignment.iql | 4 +- get-app/deno.json | 5 + get-app/main.ts | 22 ++ src/commands/base.rs | 24 +- src/core/templating.rs | 9 +- src/core/utils.rs | 131 ++++---- src/utils/pgwire.rs | 15 +- src/utils/query.rs | 18 +- tests/pgwire_replace_test.rs | 303 ++++++++++++++++++ 12 files changed, 531 insertions(+), 165 deletions(-) create mode 100644 .github/release-footer.md create mode 100644 .github/workflows/deploy-get-app.yml create mode 100644 get-app/deno.json create mode 100644 get-app/main.ts create mode 100644 tests/pgwire_replace_test.rs diff --git a/.github/release-footer.md b/.github/release-footer.md new file mode 100644 index 0000000..03cd813 --- /dev/null +++ b/.github/release-footer.md @@ -0,0 +1,35 @@ +### Download + +| Platform | Architecture | Asset | +|----------|--------------|-------| +| Linux | x86_64 | `stackql-deploy-linux-x86_64.tar.gz` | +| Linux | arm64 | `stackql-deploy-linux-arm64.tar.gz` | +| macOS | Universal (Apple Silicon + Intel) | `stackql-deploy-macos-universal.tar.gz` | +| Windows | x86_64 | `stackql-deploy-windows-x86_64.zip` | + +Each archive contains a single binary named `stackql-deploy` (or `stackql-deploy.exe` on Windows). Verify your download with `SHA256SUMS`. + +### Install (quick) + +**Linux / macOS:** + +```sh +curl -L https://get-stackql-deploy.rs -o stackql-deploy.tar.gz && tar xz stackql-deploy.tar.gz +``` + +**Windows (PowerShell):** + +```powershell +Invoke-WebRequest -Uri https://get-stackql-deploy.rs -OutFile stackql-deploy.zip +Expand-Archive stackql-deploy.zip -DestinationPath . +``` + +**cargo:** + +```sh +cargo install stackql-deploy +``` + +--- + +Full documentation: [stackql-deploy.io](https://stackql-deploy.io) - Source: [github.com/stackql/stackql-deploy](https://github.com/stackql/stackql-deploy) diff --git a/.github/workflows/deploy-get-app.yml b/.github/workflows/deploy-get-app.yml new file mode 100644 index 0000000..2dc31ce --- /dev/null +++ b/.github/workflows/deploy-get-app.yml @@ -0,0 +1,22 @@ +name: Deploy get-app + +on: + push: + branches: + - main + paths: + - 'get-app/**' + +jobs: + deploy: + name: Deploy to Deno Deploy + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v6 + - uses: denoland/deployctl@v1 + with: + project: stackql-deploy-get + entrypoint: get-app/main.ts diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 60e8ea4..bad7d7a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -132,10 +132,38 @@ jobs: path: ${{ matrix.artifact-name }}.* if-no-files-found: error + universal-macos: + name: Universal macOS binary + needs: build + runs-on: macos-latest + steps: + - uses: actions/download-artifact@v8 + with: + name: stackql-deploy-macos-arm64 + path: arm64 + - uses: actions/download-artifact@v8 + with: + name: stackql-deploy-macos-x86_64 + path: x86_64 + - name: Extract binaries + run: | + tar -xzf arm64/stackql-deploy-macos-arm64.tar.gz -C arm64 + tar -xzf x86_64/stackql-deploy-macos-x86_64.tar.gz -C x86_64 + - name: Create universal binary + run: | + lipo -create arm64/stackql-deploy x86_64/stackql-deploy \ + -output stackql-deploy + tar -czf stackql-deploy-macos-universal.tar.gz stackql-deploy + - uses: actions/upload-artifact@v8 + with: + name: stackql-deploy-macos-universal + path: stackql-deploy-macos-universal.tar.gz + if-no-files-found: error + # Runtime smoke test on each platform before releasing runtime-test: name: Runtime Test (${{ matrix.os }}) - needs: build + needs: [build, universal-macos] strategy: fail-fast: false matrix: @@ -192,16 +220,17 @@ jobs: release: name: Create GitHub Release - needs: [build, runtime-test] + needs: [build, runtime-test, universal-macos] runs-on: ubuntu-latest permissions: contents: write steps: - - name: Download all build artifacts + - uses: actions/checkout@v6 + - name: Download selected build artifacts uses: actions/download-artifact@v8 with: path: artifacts/ - pattern: stackql-deploy-* + pattern: stackql-deploy-@(linux-*|windows-*|macos-universal) - name: Collect archives and generate SHA256SUMS run: | mkdir -p dist @@ -209,6 +238,14 @@ jobs: -exec mv {} dist/ \; cd dist sha256sum * | tee SHA256SUMS + - name: Build release body + run: | + VERSION=${GITHUB_REF_NAME#v} + echo "## stackql-deploy ${GITHUB_REF_NAME}" > release-body.md + echo "" >> release-body.md + awk "/^## $VERSION/{found=1; next} found && /^## /{exit} found{print}" CHANGELOG.md >> release-body.md + echo "" >> release-body.md + cat .github/release-footer.md >> release-body.md - name: GH Release uses: softprops/action-gh-release@v2.6.0 with: @@ -219,68 +256,7 @@ jobs: dist/*.tar.gz dist/*.zip dist/SHA256SUMS - body: | - ## stackql-deploy ${{ github.ref_name }} - - ### What's new - - This release ships the **Rust rewrite** of `stackql-deploy` — a complete - ground-up reimplementation that replaces the original Python package. - - Key improvements over the Python version: - - - **Single self-contained binary** — no Python runtime, pip, or virtualenv required. - Drop the binary on any supported platform and run. - - **Faster startup and execution** — Rust compile-time optimisations mean commands - that previously took seconds to initialise now start instantly. - - **Smaller install footprint** — the stripped Linux x86_64 binary is under 10 MB; - no transitive Python dependencies to manage. - - **Statically linked on Linux** — works on any glibc >= 2.17 distro without - installing extra system libraries. - - **Native Windows and macOS ARM64 support** — pre-built for all five major targets - (see assets below). - - ### Download - - | Platform | Architecture | Asset | - |----------|--------------|-------| - | Linux | x86_64 | `stackql-deploy-linux-x86_64.tar.gz` | - | Linux | arm64 | `stackql-deploy-linux-arm64.tar.gz` | - | macOS | Apple Silicon (arm64) | `stackql-deploy-macos-arm64.tar.gz` | - | macOS | Intel (x86_64) | `stackql-deploy-macos-x86_64.tar.gz` | - | Windows | x86_64 | `stackql-deploy-windows-x86_64.zip` | - - Each archive contains a single binary named `stackql-deploy` (or - `stackql-deploy.exe` on Windows). Verify your download with `SHA256SUMS`. - - ### Migrating from the Python package - - If you are currently using the Python package on PyPI, please migrate to this - release. The Python package is now deprecated and will no longer receive updates: - https://crates.io/crates/stackql-deploy - - The CLI interface is fully compatible — existing `stackql_manifest.yml` files and - project layouts work without modification. - - ### Install (quick) - - **Linux / macOS:** - ```sh - curl -sSL https://github.com/stackql/stackql-deploy/releases/download/${{ github.ref_name }}/stackql-deploy-linux-x86_64.tar.gz \ - | tar -xz -C /usr/local/bin - ``` - - **Windows (PowerShell):** - ```powershell - Invoke-WebRequest -Uri https://github.com/stackql/stackql-deploy/releases/download/${{ github.ref_name }}/stackql-deploy-windows-x86_64.zip ` - -OutFile stackql-deploy.zip - Expand-Archive stackql-deploy.zip -DestinationPath $env:LOCALAPPDATA\stackql-deploy - ``` - - Or install via `cargo`: - ```sh - cargo install stackql-deploy - ``` + body_path: release-body.md # --no-verify skips the verification build that cargo publish runs by default. # That build fails because build.rs writes contributors.csv into the package diff --git a/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql b/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql index f75f2e5..1a3b5b1 100644 --- a/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql +++ b/examples/databricks/serverless/resources/databricks_account/workspace_assignment.iql @@ -5,7 +5,7 @@ permissions = '["ADMIN"]' WHERE account_id = '{{ databricks_account_id }}' AND workspace_id = '{{ workspace_id }}' -AND principal_id = '{{ databricks_group_id }}' +AND principal_id = {{ databricks_group_id }} RETURNING error, permissions, @@ -26,5 +26,5 @@ AND permissions LIKE '%ADMIN%'; DELETE FROM databricks_account.iam.workspace_assignment WHERE account_id = '{{ databricks_account_id }}' AND workspace_id = '{{ workspace_id }}' -AND principal_id = '{{ databricks_group_id }}' +AND principal_id = {{ databricks_group_id }} ; \ No newline at end of file diff --git a/get-app/deno.json b/get-app/deno.json new file mode 100644 index 0000000..a6267e6 --- /dev/null +++ b/get-app/deno.json @@ -0,0 +1,5 @@ +{ + "tasks": { + "dev": "deno run --allow-net main.ts" + } +} diff --git a/get-app/main.ts b/get-app/main.ts new file mode 100644 index 0000000..51358e6 --- /dev/null +++ b/get-app/main.ts @@ -0,0 +1,22 @@ +const GITHUB_REPO = "stackql/stackql-deploy"; + +function getAssetName(ua: string): string { + if (/windows/i.test(ua)) return "stackql-deploy-windows-x86_64.zip"; + if (/darwin|macintosh|mac os/i.test(ua)) return "stackql-deploy-macos-universal.tar.gz"; + return "stackql-deploy-linux-x86_64.tar.gz"; +} + +Deno.serve((req: Request) => { + const url = new URL(req.url); + + if (url.pathname !== "/") { + return Response.redirect("https://stackql-deploy.io", 301); + } + + const ua = req.headers.get("user-agent") ?? ""; + const asset = getAssetName(ua); + return Response.redirect( + `https://github.com/${GITHUB_REPO}/releases/latest/download/${asset}`, + 302 + ); +}); \ No newline at end of file diff --git a/src/commands/base.rs b/src/commands/base.rs index a469435..10b7cd2 100644 --- a/src/commands/base.rs +++ b/src/commands/base.rs @@ -383,7 +383,11 @@ impl CommandRunner { retries, retry_delay, ); - debug!("Create response: {}", msg); + if msg.is_empty() && returning_row.is_none() { + debug!("Create response: no response"); + } else { + debug!("Create response: {}", msg); + } (true, returning_row) } else { let msg = run_stackql_command( @@ -393,7 +397,11 @@ impl CommandRunner { retries, retry_delay, ); - debug!("Create response: {}", msg); + if msg.is_empty() { + debug!("Create response: no response"); + } else { + debug!("Create response: {}", msg); + } (true, None) } } @@ -442,7 +450,11 @@ impl CommandRunner { retries, retry_delay, ); - debug!("Update response: {}", msg); + if msg.is_empty() && returning_row.is_none() { + debug!("Update response: no response"); + } else { + debug!("Update response: {}", msg); + } (true, returning_row) } else { let msg = run_stackql_command( @@ -452,7 +464,11 @@ impl CommandRunner { retries, retry_delay, ); - debug!("Update response: {}", msg); + if msg.is_empty() { + debug!("Update response: no response"); + } else { + debug!("Update response: {}", msg); + } (true, None) } } diff --git a/src/core/templating.rs b/src/core/templating.rs index 0165478..0dfb743 100644 --- a/src/core/templating.rs +++ b/src/core/templating.rs @@ -286,11 +286,6 @@ pub fn render_query( ) -> String { let temp_context = prepare_query_context(context); - debug!( - "[{}] [{}] query template:\n\n{}\n", - res_name, anchor, template - ); - let expanded = match preprocess_this_prefix(template, res_name) { Ok(t) => t, Err(e) => { @@ -306,7 +301,7 @@ pub fn render_query( match engine.render_with_filters(&template_name, &processed_query, &ctx) { Ok(rendered) => { debug!( - "[{}] [{}] rendered query:\n\n{}\n", + "Rendered [{}] [{}] query:\n\n{}\n", res_name, anchor, rendered ); rendered @@ -379,7 +374,7 @@ pub fn try_render_query( match engine.render_with_filters(&template_name, &processed_query, &ctx) { Ok(rendered) => { debug!( - "[{}] [{}] rendered query:\n\n{}\n", + "Rendered [{}] [{}] query:\n\n{}\n", res_name, anchor, rendered ); Some(rendered) diff --git a/src/core/utils.rs b/src/core/utils.rs index e5d4f20..d17c840 100644 --- a/src/core/utils.rs +++ b/src/core/utils.rs @@ -11,7 +11,7 @@ use std::process; use std::thread; use std::time::{Duration, Instant}; -use log::{debug, error, info, warn}; +use log::{debug, error, info}; use crate::utils::pgwire::PgwireLite; use crate::utils::query::{execute_query, QueryResult}; @@ -42,12 +42,6 @@ pub fn run_stackql_query( let mut last_error: Option = None; while attempt <= retries { - debug!( - "Executing stackql query on attempt {}:\n\n{}\n", - attempt + 1, - query - ); - match execute_query(query, client) { Ok(result) => match result { QueryResult::Data { @@ -69,7 +63,7 @@ pub fn run_stackql_query( } if rows.is_empty() { - debug!("Stackql query executed successfully, retrieved 0 items.\n\nresults:\n\n[]\n"); + debug!("Query returned no results"); if attempt < retries { thread::sleep(Duration::from_secs(delay as u64)); attempt += 1; @@ -149,7 +143,7 @@ pub fn run_stackql_query( return Vec::new(); } QueryResult::Empty => { - debug!("Empty result from query"); + debug!("Query returned no results"); if attempt < retries { thread::sleep(Duration::from_secs(delay as u64)); attempt += 1; @@ -160,15 +154,12 @@ pub fn run_stackql_query( }, Err(e) => { last_error = Some(e.clone()); - if attempt == retries { - if !suppress_errors { - catch_error_and_exit(&format!( - "Exception during stackql query execution:\n\n{}\n", - e - )); - } - } else { - error!("Exception on attempt {}:\n\n{}\n", attempt + 1, e); + debug!("Query error on attempt {}: {}", attempt + 1, e); + if attempt == retries && !suppress_errors { + catch_error_and_exit(&format!( + "Exception during stackql query execution:\n\n{}\n", + e + )); } } } @@ -177,11 +168,6 @@ pub fn run_stackql_query( attempt += 1; } - debug!( - "All attempts ({}) to execute the query completed.", - retries + 1 - ); - // If suppress_errors and we have an error, return error marker if suppress_errors { if let Some(err) = last_error { @@ -223,57 +209,54 @@ pub fn run_stackql_command( }; while attempt <= retries { - debug!( - "Executing stackql command (attempt {}):\n\n{}\n", - attempt + 1, - processed_command - ); - match execute_query(&processed_command, client) { - Ok(result) => match result { - QueryResult::Data { notices, .. } => { - // Check for errors in notices - for notice in ¬ices { - if error_detected_in_notice(notice) && !ignore_errors { - if attempt < retries { - warn!( - "Dependent resource(s) may not be ready, retrying in {} seconds (attempt {} of {})...", - retry_delay, attempt + 1, retries + 1 + Ok(result) => { + match result { + QueryResult::Data { notices, .. } => { + // Check for errors in notices + for notice in ¬ices { + if error_detected_in_notice(notice) && !ignore_errors { + if attempt < retries { + debug!( + "Command notice on attempt {}/{}, retrying in {} seconds: {}", + attempt + 1, retries + 1, retry_delay, notice ); - thread::sleep(Duration::from_secs(retry_delay as u64)); - attempt += 1; - continue; - } else { - catch_error_and_exit(&format!( - "Error during stackql command execution:\n\n{}\n", - notice - )); + thread::sleep(Duration::from_secs(retry_delay as u64)); + attempt += 1; + continue; + } else { + catch_error_and_exit(&format!( + "Error during stackql command execution:\n\n{}\n", + notice + )); + } } } + let msg = notices.join("\n"); + if !msg.is_empty() { + debug!("Stackql command executed successfully:\n\n{}\n", msg); + } + return msg; } - let msg = notices.join("\n"); - if !msg.is_empty() { + QueryResult::Command(msg) => { debug!("Stackql command executed successfully:\n\n{}\n", msg); + return msg; + } + QueryResult::Empty => { + debug!("Command executed with empty result"); + return String::new(); } - return msg; - } - QueryResult::Command(msg) => { - debug!("Stackql command executed successfully:\n\n{}\n", msg); - return msg; - } - QueryResult::Empty => { - debug!("Command executed with empty result"); - return String::new(); } - }, + } Err(e) => { if !ignore_errors { if attempt < retries { - warn!( - "Command failed, retrying in {} seconds (attempt {} of {})...", - retry_delay, + debug!( + "Command returned error on attempt {}/{}, retrying in {} seconds: {}", attempt + 1, - retries + 1 + retries + 1, + retry_delay, + e ); thread::sleep(Duration::from_secs(retry_delay as u64)); attempt += 1; @@ -715,12 +698,6 @@ pub fn run_stackql_dml_returning( let mut attempt = 0u32; while attempt <= retries { - debug!( - "Executing stackql DML (attempt {}):\n\n{}\n", - attempt + 1, - command - ); - match execute_query(command, client) { Ok(result) => match result { QueryResult::Data { @@ -733,9 +710,12 @@ pub fn run_stackql_dml_returning( for notice in ¬ices { if error_detected_in_notice(notice) && !ignore_errors { if attempt < retries { - warn!( - "DML error in notice, retrying in {} seconds (attempt {} of {})...", - retry_delay, attempt + 1, retries + 1 + debug!( + "DML notice on attempt {}/{}, retrying in {} seconds: {}", + attempt + 1, + retries + 1, + retry_delay, + notice ); thread::sleep(Duration::from_secs(retry_delay as u64)); attempt += 1; @@ -781,11 +761,12 @@ pub fn run_stackql_dml_returning( Err(e) => { if !ignore_errors { if attempt < retries { - warn!( - "DML failed, retrying in {} seconds (attempt {} of {})...", - retry_delay, + debug!( + "DML error on attempt {}/{}, retrying in {} seconds: {}", attempt + 1, - retries + 1 + retries + 1, + retry_delay, + e ); thread::sleep(Duration::from_secs(retry_delay as u64)); attempt += 1; diff --git a/src/utils/pgwire.rs b/src/utils/pgwire.rs index b9453c8..5064212 100644 --- a/src/utils/pgwire.rs +++ b/src/utils/pgwire.rs @@ -166,7 +166,20 @@ impl PgwireLite { notices.push(parse_notice_fields(&data)); } b'E' => { - return Err(parse_error_fields(&data)); + // Capture the error but DON'T return yet — we must + // drain the stream until ReadyForQuery ('Z') so the + // connection is left in a clean state for the next query. + let err_msg = parse_error_fields(&data); + // Continue reading until ReadyForQuery + loop { + let drain_type = self.read_byte()?; + let drain_len = self.read_i32()? as usize; + let _drain_data = self.read_bytes(drain_len.saturating_sub(4))?; + if drain_type == b'Z' { + break; + } + } + return Err(err_msg); } b'I' => {} // EmptyQueryResponse b'Z' => break, // ReadyForQuery — done diff --git a/src/utils/query.rs b/src/utils/query.rs index ccc6766..b75bf6d 100644 --- a/src/utils/query.rs +++ b/src/utils/query.rs @@ -67,16 +67,14 @@ pub fn execute_query(query: &str, client: &mut PgwireLite) -> Result = columns .iter() - .map(|col| { - match row_map.get(&col.name) { - Some(Value::String(s)) => s.clone(), - Some(Value::Null) => "NULL".to_string(), - Some(Value::Bool(b)) => b.to_string(), - Some(Value::Integer(i)) => i.to_string(), - Some(Value::Float(f)) => f.to_string(), - Some(_) => "UNKNOWN_TYPE".to_string(), // For any future value types - None => "NULL".to_string(), - } + .map(|col| match row_map.get(&col.name) { + Some(Value::String(s)) => s.clone(), + Some(Value::Null) => "NULL".to_string(), + Some(Value::Bool(b)) => b.to_string(), + Some(Value::Integer(i)) => i.to_string(), + Some(Value::Float(f)) => f.to_string(), + Some(_) => "UNKNOWN_TYPE".to_string(), + None => "NULL".to_string(), }) .collect(); diff --git a/tests/pgwire_replace_test.rs b/tests/pgwire_replace_test.rs new file mode 100644 index 0000000..798aa0d --- /dev/null +++ b/tests/pgwire_replace_test.rs @@ -0,0 +1,303 @@ +//! Manual test: REPLACE ... RETURNING over pgwire +//! +//! Requires a running stackql server on localhost:5444 with +//! databricks_account provider configured. +//! +//! Run with: +//! cargo test --test pgwire_replace_test -- --nocapture --ignored + +use std::collections::HashMap; +use std::io::{Read, Write}; +use std::net::TcpStream; + +fn read_byte(stream: &mut TcpStream) -> u8 { + let mut buf = [0u8; 1]; + stream.read_exact(&mut buf).unwrap(); + buf[0] +} + +fn read_i32(stream: &mut TcpStream) -> i32 { + let mut buf = [0u8; 4]; + stream.read_exact(&mut buf).unwrap(); + i32::from_be_bytes(buf) +} + +fn read_bytes(stream: &mut TcpStream, n: usize) -> Vec { + let mut buf = vec![0u8; n]; + stream.read_exact(&mut buf).unwrap(); + buf +} + +fn parse_error_fields(data: &[u8]) -> HashMap { + let mut fields = HashMap::new(); + let mut pos = 0; + while pos < data.len() { + let field_type = data[pos]; + if field_type == 0 { + break; + } + pos += 1; + let end = data[pos..] + .iter() + .position(|&b| b == 0) + .unwrap_or(data.len() - pos); + let value = String::from_utf8_lossy(&data[pos..pos + end]).to_string(); + let key = match field_type { + b'S' => "severity", + b'V' => "severity_v", + b'C' => "code", + b'M' => "message", + b'D' => "detail", + b'H' => "hint", + b'P' => "position", + b'W' => "where", + _ => "unknown", + }; + fields.insert(key.to_string(), value); + pos += end + 1; + } + fields +} + +fn startup(stream: &mut TcpStream) { + const PROTOCOL_V3: i32 = 196608; + let params = b"user\0stackql\0database\0stackql\0\0"; + let total_len = 4 + 4 + params.len(); + let mut msg = Vec::with_capacity(total_len); + msg.extend_from_slice(&(total_len as i32).to_be_bytes()); + msg.extend_from_slice(&PROTOCOL_V3.to_be_bytes()); + msg.extend_from_slice(params); + stream.write_all(&msg).unwrap(); + + loop { + let msg_type = read_byte(stream); + let payload_len = read_i32(stream) as usize; + let _data = read_bytes(stream, payload_len.saturating_sub(4)); + match msg_type { + b'Z' => break, + b'E' => { + let fields = parse_error_fields(&_data); + panic!("Startup error: {:?}", fields); + } + _ => {} + } + } + println!(" [startup] Connected and ready"); +} + +fn send_query(stream: &mut TcpStream, sql: &str) { + let sql_bytes = sql.as_bytes(); + let payload_len = 4 + sql_bytes.len() + 1; + let mut msg = Vec::with_capacity(1 + payload_len); + msg.push(b'Q'); + msg.extend_from_slice(&(payload_len as i32).to_be_bytes()); + msg.extend_from_slice(sql_bytes); + msg.push(0u8); + stream.write_all(&msg).unwrap(); +} + +struct QueryResponse { + columns: Vec, + rows: Vec>, + notices: Vec, + errors: Vec>, + command_tag: Option, +} + +fn read_response(stream: &mut TcpStream) -> QueryResponse { + let mut columns = Vec::new(); + let mut rows = Vec::new(); + let mut notices = Vec::new(); + let mut errors = Vec::new(); + let mut command_tag = None; + + loop { + let msg_type = read_byte(stream); + let payload_len = read_i32(stream) as usize; + let data = read_bytes(stream, payload_len.saturating_sub(4)); + + match msg_type { + b'T' => { + // RowDescription + let num_fields = u16::from_be_bytes([data[0], data[1]]) as usize; + let mut pos = 2; + columns.clear(); + for _ in 0..num_fields { + let null_off = data[pos..].iter().position(|&b| b == 0).unwrap(); + let name = String::from_utf8_lossy(&data[pos..pos + null_off]).to_string(); + columns.push(name); + pos += null_off + 1 + 18; // skip field metadata + } + println!(" [T] RowDescription: {:?}", columns); + } + b'D' => { + // DataRow + let num_cols = u16::from_be_bytes([data[0], data[1]]) as usize; + let mut pos = 2; + let mut row = Vec::new(); + for _ in 0..num_cols { + let col_len = i32::from_be_bytes(data[pos..pos + 4].try_into().unwrap()); + pos += 4; + if col_len < 0 { + row.push("NULL".to_string()); + } else { + let val = + String::from_utf8_lossy(&data[pos..pos + col_len as usize]).to_string(); + row.push(val); + pos += col_len as usize; + } + } + println!(" [D] DataRow: {:?}", row); + rows.push(row); + } + b'C' => { + // CommandComplete + let tag = + String::from_utf8_lossy(data.strip_suffix(b"\0").unwrap_or(&data)).to_string(); + println!(" [C] CommandComplete: {}", tag); + command_tag = Some(tag); + } + b'N' => { + // NoticeResponse + let fields = parse_error_fields(&data); + let msg = fields.get("message").cloned().unwrap_or_default(); + println!(" [N] Notice: {}", msg); + notices.push(msg); + } + b'E' => { + // ErrorResponse + let fields = parse_error_fields(&data); + let msg = fields.get("message").cloned().unwrap_or_default(); + println!(" [E] ERROR: {}", msg); + errors.push(fields); + } + b'I' => { + println!(" [I] EmptyQueryResponse"); + } + b'Z' => { + let status = if data.is_empty() { + '?' + } else { + data[0] as char + }; + println!(" [Z] ReadyForQuery (status={})", status); + break; + } + _ => { + println!( + " [{}] Unknown message ({} bytes)", + msg_type as char, + data.len() + ); + } + } + } + + QueryResponse { + columns, + rows, + notices, + errors, + command_tag, + } +} + +#[test] +#[ignore] +fn test_replace_returning_over_pgwire() { + println!("\n=== REPLACE ... RETURNING over pgwire test ===\n"); + + let mut stream = TcpStream::connect("localhost:5444") + .expect("Failed to connect to stackql server on localhost:5444"); + + startup(&mut stream); + + // Test 1: Simple SELECT to confirm connection works + println!("\n--- Test 1: Simple SELECT ---"); + send_query(&mut stream, "SELECT 1 as test_val;"); + let resp = read_response(&mut stream); + assert!(resp.errors.is_empty(), "Simple SELECT should not error"); + assert_eq!(resp.rows.len(), 1, "Should return 1 row"); + println!(" PASS: Simple SELECT works\n"); + + // Test 2: REPLACE ... RETURNING (first attempt) + let replace_sql = r#"REPLACE databricks_account.iam.workspace_assignment +SET +permissions = '["ADMIN"]' +WHERE +account_id = 'ebfcc5a9-9d49-4c93-b651-b3ee6cf1c9ce' +AND workspace_id = '7474653260057820' +AND principal_id = 82893155042608 +RETURNING +error, +permissions, +principal;"#; + + println!("--- Test 2: REPLACE ... RETURNING (attempt 1) ---"); + send_query(&mut stream, replace_sql); + let resp1 = read_response(&mut stream); + println!(" Errors: {}", resp1.errors.len()); + println!(" Rows: {}", resp1.rows.len()); + println!(" Notices: {}", resp1.notices.len()); + println!(" Command tag: {:?}", resp1.command_tag); + + if !resp1.errors.is_empty() { + println!(" ** FIRST ATTEMPT FAILED (reproduces the bug) **"); + for (i, err) in resp1.errors.iter().enumerate() { + println!(" Error {}: {:?}", i, err); + } + } else { + println!(" ** FIRST ATTEMPT SUCCEEDED **"); + } + + // Test 3: Same REPLACE ... RETURNING (second attempt) + println!("\n--- Test 3: REPLACE ... RETURNING (attempt 2) ---"); + send_query(&mut stream, replace_sql); + let resp2 = read_response(&mut stream); + println!(" Errors: {}", resp2.errors.len()); + println!(" Rows: {}", resp2.rows.len()); + println!(" Notices: {}", resp2.notices.len()); + println!(" Command tag: {:?}", resp2.command_tag); + + if !resp2.errors.is_empty() { + println!(" ** SECOND ATTEMPT ALSO FAILED **"); + for (i, err) in resp2.errors.iter().enumerate() { + println!(" Error {}: {:?}", i, err); + } + } else { + println!(" ** SECOND ATTEMPT SUCCEEDED **"); + } + + // Test 4: Simple INSERT ... RETURNING on a CC resource for comparison + println!("\n--- Test 4: Simple SELECT for sanity ---"); + send_query(&mut stream, "SELECT 1 as still_alive;"); + let resp3 = read_response(&mut stream); + assert!(resp3.errors.is_empty(), "Connection should still be alive"); + println!(" PASS: Connection still alive after REPLACE tests\n"); + + // Summary + println!("=== SUMMARY ==="); + println!( + " Attempt 1: {}", + if resp1.errors.is_empty() { + "SUCCESS" + } else { + "FAILED" + } + ); + println!( + " Attempt 2: {}", + if resp2.errors.is_empty() { + "SUCCESS" + } else { + "FAILED" + } + ); + if !resp1.errors.is_empty() && resp2.errors.is_empty() { + println!(" CONCLUSION: Bug reproduced - first attempt fails, second succeeds"); + } else if resp1.errors.is_empty() && resp2.errors.is_empty() { + println!(" CONCLUSION: Both succeeded - bug not reproduced this time"); + } else { + println!(" CONCLUSION: Unexpected pattern"); + } +} From 40e2245285af61a05ac46e406af378e6cd465c43 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Thu, 19 Mar 2026 21:46:51 +1100 Subject: [PATCH 8/8] updated ci --- .github/workflows/deploy-get-app.yml | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 .github/workflows/deploy-get-app.yml diff --git a/.github/workflows/deploy-get-app.yml b/.github/workflows/deploy-get-app.yml deleted file mode 100644 index 2dc31ce..0000000 --- a/.github/workflows/deploy-get-app.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Deploy get-app - -on: - push: - branches: - - main - paths: - - 'get-app/**' - -jobs: - deploy: - name: Deploy to Deno Deploy - runs-on: ubuntu-latest - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v6 - - uses: denoland/deployctl@v1 - with: - project: stackql-deploy-get - entrypoint: get-app/main.ts