diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6ff6a8c..eb547bd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -83,7 +83,7 @@ jobs: query_file_path: ci-scripts/get-contributors.iql query_output: csv - name: Save contributors CSV - run: echo "${{ steps.get-contributors.outputs.stackql-query-results }}" > contributors.csv + run: echo "${{ steps.get-contributors.outputs.stackql-query-results }}" | tail -n +2 > contributors.csv - name: Upload contributors artifact uses: actions/upload-artifact@v7 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3eb9b34..d52bcf7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,7 +46,7 @@ jobs: query_file_path: ci-scripts/get-contributors.iql query_output: csv - name: Save contributors CSV - run: echo "${{ steps.get-contributors.outputs.stackql-query-results }}" > contributors.csv + run: echo "${{ steps.get-contributors.outputs.stackql-query-results }}" | tail -n +2 > contributors.csv - name: Upload contributors artifact uses: actions/upload-artifact@v7 with: diff --git a/.gitignore b/.gitignore index 75cebe2..b0f2d9f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,8 @@ stackql*.pkg stackql_history.txt stackql.log stackql-zip +stackql-deploy +.stackql-deploy-exports .env nohup.out contributors.csv diff --git a/Cargo.lock b/Cargo.lock index a12cdc4..cea14c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1809,7 +1809,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "stackql-deploy" -version = "2.0.4" +version = "2.0.5" dependencies = [ "base64", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 708251b..ada6f91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stackql-deploy" -version = "2.0.4" +version = "2.0.5" edition = "2021" rust-version = "1.75" description = "Infrastructure-as-code framework for declarative cloud resource management using StackQL" diff --git a/examples/aws/aws-vpc-webserver/README.md b/examples/aws/aws-vpc-webserver/README.md index e01164d..44d4c59 100644 --- a/examples/aws/aws-vpc-webserver/README.md +++ b/examples/aws/aws-vpc-webserver/README.md @@ -5,22 +5,17 @@ This example provisions a complete AWS networking stack with an Apache web serve ## Architecture ```mermaid -architecture-beta - group vpc(logos:aws-vpc)[VPC 10.x.0.0/16] - - service subnet(logos:aws-vpc)[Subnet 10.x.1.0/24] in vpc - service rt(logos:aws-route-53)[Route Table] in vpc - service sg(logos:aws-shield)[Security Group] in vpc - service ec2(logos:aws-ec2)[Web Server t2.micro] in vpc - - group edge(logos:aws-cloudfront)[Edge] - - service igw(logos:aws-api-gateway)[Internet Gateway] in edge - - igw:R --> L:rt - rt:B -- T:subnet - sg:R -- L:ec2 - subnet:T -- B:ec2 +flowchart LR + subgraph VPC["VPC 10.x.0.0/16"] + Subnet["Subnet\n10.x.1.0/24"] + RT["Route Table"] + SG["Security Group\nHTTP:80, SSH:22"] + EC2["Web Server\nt2.micro"] + Subnet --> EC2 + SG --> EC2 + end + IGW["Internet\nGateway"] --> RT --> Subnet + Internet(("Internet")) --> IGW ``` ## Resources @@ -62,37 +57,46 @@ architecture-beta ### Deploy ```bash -stackql-deploy build examples/aws/aws-vpc-webserver dev +target/release/stackql-deploy build examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} ``` With query visibility: ```bash -stackql-deploy build examples/aws/aws-vpc-webserver dev --show-queries +target/release/stackql-deploy build examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} \ +--show-queries ``` Dry run (no changes): ```bash -stackql-deploy build examples/aws/aws-vpc-webserver dev --dry-run --show-queries +target/release/stackql-deploy build examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} \ +--dry-run --show-queries ``` ### Test ```bash -stackql-deploy test examples/aws/aws-vpc-webserver dev +target/release/stackql-deploy test examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} ``` ### Teardown ```bash -stackql-deploy teardown examples/aws/aws-vpc-webserver dev +target/release/stackql-deploy teardown examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} ``` ### Debug mode ```bash -stackql-deploy build examples/aws/aws-vpc-webserver dev --log-level debug +target/release/stackql-deploy build examples/aws/aws-vpc-webserver dev \ +-e AWS_REGION=${AWS_REGION} \ +--log-level debug ``` ## How It Works diff --git a/examples/aws/aws-vpc-webserver/stackql_manifest.yml b/examples/aws/aws-vpc-webserver/stackql_manifest.yml index e7724ee..81e17eb 100644 --- a/examples/aws/aws-vpc-webserver/stackql_manifest.yml +++ b/examples/aws/aws-vpc-webserver/stackql_manifest.yml @@ -2,7 +2,7 @@ version: 1 name: "aws-vpc-webserver" description: Provisions a complete AWS networking stack (VPC, subnet, internet gateway, route table, security group) with an Apache web server EC2 instance. providers: - - awscc + - awscc::v26.03.00379 globals: - name: region description: aws region @@ -143,3 +143,6 @@ resources: props: [] exports: - public_dns_name + +exports: + - public_dns_name \ No newline at end of file diff --git a/examples/aws/patch-doc-test/stackql_manifest.yml b/examples/aws/patch-doc-test/stackql_manifest.yml index c5a52b9..cd3c91c 100644 --- a/examples/aws/patch-doc-test/stackql_manifest.yml +++ b/examples/aws/patch-doc-test/stackql_manifest.yml @@ -2,7 +2,7 @@ version: 1 name: "patch-doc-test" description: Demonstrates the Cloud Control API update (PatchDocument) workflow using an S3 bucket - deploy, modify versioning config, re-deploy to apply the update. providers: - - awscc + - awscc::v26.03.00379 globals: - name: region description: aws region diff --git a/examples/azure/azure-stack/README.md b/examples/azure/azure-stack/README.md deleted file mode 100644 index f59d2fc..0000000 --- a/examples/azure/azure-stack/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# `stackql-deploy` starter project for `azure` - -> for starter projects using other providers, try `stackql-deploy my_stack --provider=aws` or `stackql-deploy my_stack --provider=google` - -see the following links for more information on `stackql`, `stackql-deploy` and the `azure` provider: - -- [`azure` provider docs](https://stackql.io/registry/azure) -- [`stackql`](https://github.com/stackql/stackql) -- [`stackql-deploy` on crates.io](https://crates.io/crates/stackql-deploy) -- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) - -## Overview - -__`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-Code (IaC) framework. There is no state file required as the current state is assessed for each resource at runtime. __`stackql-deploy`__ is capable of provisioning, deprovisioning and testing a stack which can include resources across different providers, like a stack spanning `azure` and `azure` for example. - -## Prerequisites - -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `azure` provider, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and optionally `AWS_SESSION_TOKEN` must be set, for more information on authentication to `azure` see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). - -> __Note for macOS users__ -> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: -> ```bash -> python3 -m venv myenv -> source myenv/bin/activate -> pip install stackql-deploy -> ``` - -## Usage - -Adjust the values in the [__`stackql_manifest.yml`__](stackql_manifest.yml) file if desired. The [__`stackql_manifest.yml`__](stackql_manifest.yml) file contains resource configuration variables to support multiple deployment environments, these will be used for `stackql` queries in the `resources` and `resources` folders. - -The syntax for the `stackql-deploy` command is as follows: - -```bash -stackql-deploy { build | test | teardown } { stack-directory } { deployment environment} [ optional flags ] -``` - -### Deploying a stack - -For example, to deploy the stack to an environment labeled `sit`, run the following: - -```bash -export AZURE_VM_ADMIN_PASSWORD="Your_password_here1" -stackql-deploy build \ -examples/azure/azure-stack sit \ --e AZURE_SUBSCRIPTION_ID=631d1c6d-2a65-43e7-93c2-688bfe4e1468 \ --e AZURE_VM_ADMIN_PASSWORD=$AZURE_VM_ADMIN_PASSWORD -``` - -Use the `--dry-run` flag to view the queries to be run without actually running them, for example: - -```bash -stackql-deploy build \ -examples/azure/azure-stack sit \ --e AZURE_SUBSCRIPTION_ID=631d1c6d-2a65-43e7-93c2-688bfe4e1468 \ ---dry-run -``` - -### Testing a stack - -To test a stack to ensure that all resources are present and in the desired state, run the following (in our `sit` deployment example): - -```bash -stackql-deploy test \ -examples/azure/azure-stack sit \ --e AZURE_SUBSCRIPTION_ID=631d1c6d-2a65-43e7-93c2-688bfe4e1468 \ --e AZURE_VM_ADMIN_PASSWORD=$AZURE_VM_ADMIN_PASSWORD -``` - -### Tearing down a stack - -To destroy or deprovision all resources in a stack for our `sit` deployment example, run the following: - -```bash -stackql-deploy teardown \ -examples/azure/azure-stack sit \ --e AZURE_SUBSCRIPTION_ID=631d1c6d-2a65-43e7-93c2-688bfe4e1468 \ --e AZURE_VM_ADMIN_PASSWORD=$AZURE_VM_ADMIN_PASSWORD -``` \ No newline at end of file diff --git a/examples/azure/azure-web-server/README.md b/examples/azure/azure-web-server/README.md new file mode 100644 index 0000000..1b6df0e --- /dev/null +++ b/examples/azure/azure-web-server/README.md @@ -0,0 +1,94 @@ +# Azure Web Server Example + +This example provisions an Azure networking stack with a web server VM using the `azure` provider. + +## Architecture + +```mermaid +flowchart LR + subgraph RG["Resource Group"] + subgraph VNet["VNet 10.x.0.0/16"] + Subnet["Subnet\n10.x.1.0/24"] + NIC["Network\nInterface"] + Subnet --> NIC + end + NSG["NSG\nHTTP:8080\nSSH:22"] --> NIC + PIP["Public IP"] --> NIC + NIC --> VM["Web Server\nStandard_DS1_v2\nUbuntu 18.04"] + VM --> EXT["Custom Script\nExtension"] + end + Internet(("Internet")) --> PIP +``` + +## Resources + +| # | Resource | Provider Resource | Description | +|---|----------|-------------------|-------------| +| 1 | `example_resource_group` | `azure.resources.resource_groups` | Resource group for all stack resources | +| 2 | `example_vnet` | `azure.network.virtual_networks` | Virtual network with environment-specific CIDR | +| 3 | `example_subnet` | `azure.network.subnets` | Subnet within the VNet | +| 4 | `example_public_ip` | `azure.network.public_ip_addresses` | Static public IP for the VM | +| 5 | `example_nsg` | `azure.network.network_security_groups` | NSG allowing HTTP (8080) and SSH (22 from VNet) | +| 6 | `example_nic` | `azure.network.network_interfaces` | NIC with subnet, public IP, and NSG | +| 7 | `example_web_server` | `azure.compute.virtual_machines` | Ubuntu 18.04 VM (Standard_DS1_v2) | +| 8 | `example_vm_ext` | `azure.compute.virtual_machine_extensions` | Custom script extension to start a web server | + +## Environment-Specific CIDR Blocks + +| Environment | VNet CIDR | Subnet CIDR | +|-------------|-----------|-------------| +| `prd` | 10.0.0.0/16 | 10.0.1.0/24 | +| `sit` | 10.1.0.0/16 | 10.1.1.0/24 | +| `dev` | 10.2.0.0/16 | 10.2.1.0/24 | + +## Prerequisites + +- `stackql-deploy` installed ([releases](https://github.com/stackql/stackql-deploy-rs/releases)) +- Azure service principal credentials set as environment variables (used for provider authentication): + + ```bash + export AZURE_TENANT_ID=your_tenant_id + export AZURE_CLIENT_ID=your_client_id + export AZURE_CLIENT_SECRET=your_client_secret + ``` + +- Stack-specific variables passed via `-e` flags (mapped to manifest globals): + - `AZURE_SUBSCRIPTION_ID` - your Azure subscription ID + - `AZURE_VM_ADMIN_PASSWORD` - password for the VM admin user + + For more information on authentication, see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). + +## Usage + +### Deploy + +```bash +target/release/stackql-deploy build examples/azure/azure-web-server dev \ + -e AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \ + -e AZURE_VM_ADMIN_PASSWORD=${AZURE_VM_ADMIN_PASSWORD} +``` + +### Test + +```bash +stackql-deploy test examples/azure/azure-web-server dev \ + -e AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \ + -e AZURE_VM_ADMIN_PASSWORD=${AZURE_VM_ADMIN_PASSWORD} +``` + +### Teardown + +```bash +stackql-deploy teardown examples/azure/azure-web-server dev \ + -e AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \ + -e AZURE_VM_ADMIN_PASSWORD=${AZURE_VM_ADMIN_PASSWORD} +``` + +### Debug mode + +```bash +stackql-deploy build examples/azure/azure-web-server dev \ + -e AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \ + -e AZURE_VM_ADMIN_PASSWORD=${AZURE_VM_ADMIN_PASSWORD} \ + --log-level debug +``` diff --git a/examples/azure/azure-stack/resources/example_nic.iql b/examples/azure/azure-web-server/resources/example_nic.iql similarity index 92% rename from examples/azure/azure-stack/resources/example_nic.iql rename to examples/azure/azure-web-server/resources/example_nic.iql index 6b3f1fa..27be6fc 100644 --- a/examples/azure/azure-stack/resources/example_nic.iql +++ b/examples/azure/azure-web-server/resources/example_nic.iql @@ -3,9 +3,9 @@ INSERT INTO azure.network.interfaces( networkInterfaceName, resourceGroupName, subscriptionId, - location, - properties, - tags + data__location, + data__properties, + data__tags ) SELECT '{{ nic_name }}', diff --git a/examples/azure/azure-stack/resources/example_nsg.iql b/examples/azure/azure-web-server/resources/example_nsg.iql similarity index 92% rename from examples/azure/azure-stack/resources/example_nsg.iql rename to examples/azure/azure-web-server/resources/example_nsg.iql index 48a24ce..5d37386 100644 --- a/examples/azure/azure-stack/resources/example_nsg.iql +++ b/examples/azure/azure-web-server/resources/example_nsg.iql @@ -3,9 +3,9 @@ INSERT INTO azure.network.security_groups( networkSecurityGroupName, resourceGroupName, subscriptionId, - location, - properties, - tags + data__location, + data__properties, + data__tags ) SELECT '{{ nsg_name }}', diff --git a/examples/azure/azure-stack/resources/example_public_ip.iql b/examples/azure/azure-web-server/resources/example_public_ip.iql similarity index 87% rename from examples/azure/azure-stack/resources/example_public_ip.iql rename to examples/azure/azure-web-server/resources/example_public_ip.iql index b244378..9c5c53e 100644 --- a/examples/azure/azure-stack/resources/example_public_ip.iql +++ b/examples/azure/azure-web-server/resources/example_public_ip.iql @@ -3,15 +3,17 @@ INSERT INTO azure.network.public_ip_addresses( publicIpAddressName, resourceGroupName, subscriptionId, - location, - properties, - tags + data__location, + data__sku, + data__properties, + data__tags ) SELECT '{{ public_ip_name }}', '{{ resource_group_name }}', '{{ subscription_id }}', '{{ location }}', + '{"name":"Standard","tier":"Regional"}', '{"publicIPAllocationMethod":"Static"}', '{{ global_tags }}' diff --git a/examples/azure/azure-stack/resources/example_resource_group.iql b/examples/azure/azure-web-server/resources/example_resource_group.iql similarity index 93% rename from examples/azure/azure-stack/resources/example_resource_group.iql rename to examples/azure/azure-web-server/resources/example_resource_group.iql index 265e452..dc9c4b6 100644 --- a/examples/azure/azure-stack/resources/example_resource_group.iql +++ b/examples/azure/azure-web-server/resources/example_resource_group.iql @@ -7,8 +7,8 @@ AND resourceGroupName = '{{ resource_group_name }}' INSERT INTO azure.resources.resource_groups( resourceGroupName, subscriptionId, - location, - tags + data__location, + data__tags ) SELECT '{{ resource_group_name }}', diff --git a/examples/azure/azure-stack/resources/example_subnet.iql b/examples/azure/azure-web-server/resources/example_subnet.iql similarity index 95% rename from examples/azure/azure-stack/resources/example_subnet.iql rename to examples/azure/azure-web-server/resources/example_subnet.iql index 1be82b3..fffb317 100644 --- a/examples/azure/azure-stack/resources/example_subnet.iql +++ b/examples/azure/azure-web-server/resources/example_subnet.iql @@ -4,7 +4,7 @@ INSERT INTO azure.network.subnets( virtualNetworkName, resourceGroupName, subscriptionId, - properties + data__properties ) SELECT '{{ subnet_name }}', diff --git a/examples/azure/azure-stack/resources/example_vm_ext.iql b/examples/azure/azure-web-server/resources/example_vm_ext.iql similarity index 92% rename from examples/azure/azure-stack/resources/example_vm_ext.iql rename to examples/azure/azure-web-server/resources/example_vm_ext.iql index 42b9942..6291d15 100644 --- a/examples/azure/azure-stack/resources/example_vm_ext.iql +++ b/examples/azure/azure-web-server/resources/example_vm_ext.iql @@ -4,9 +4,9 @@ INSERT INTO azure.compute.virtual_machine_extensions( subscriptionId, vmExtensionName, vmName, - location, - properties, - tags + data__location, + data__properties, + data__tags ) SELECT '{{ resource_group_name }}', diff --git a/examples/azure/azure-stack/resources/example_vnet.iql b/examples/azure/azure-web-server/resources/example_vnet.iql similarity index 91% rename from examples/azure/azure-stack/resources/example_vnet.iql rename to examples/azure/azure-web-server/resources/example_vnet.iql index 152414b..55fc558 100644 --- a/examples/azure/azure-stack/resources/example_vnet.iql +++ b/examples/azure/azure-web-server/resources/example_vnet.iql @@ -3,9 +3,9 @@ INSERT INTO azure.network.virtual_networks( virtualNetworkName, resourceGroupName, subscriptionId, - location, - properties, - tags + data__location, + data__properties, + data__tags ) SELECT '{{ vnet_name }}', diff --git a/examples/azure/azure-stack/resources/example_web_server.iql b/examples/azure/azure-web-server/resources/example_web_server.iql similarity index 92% rename from examples/azure/azure-stack/resources/example_web_server.iql rename to examples/azure/azure-web-server/resources/example_web_server.iql index f53bdfe..a069441 100644 --- a/examples/azure/azure-stack/resources/example_web_server.iql +++ b/examples/azure/azure-web-server/resources/example_web_server.iql @@ -3,9 +3,9 @@ INSERT INTO azure.compute.virtual_machines( resourceGroupName, subscriptionId, vmName, - location, - properties, - tags + data__location, + data__properties, + data__tags ) SELECT '{{ resource_group_name }}', diff --git a/examples/azure/azure-stack/resources/hello-stackql.html b/examples/azure/azure-web-server/resources/hello-stackql.html similarity index 100% rename from examples/azure/azure-stack/resources/hello-stackql.html rename to examples/azure/azure-web-server/resources/hello-stackql.html diff --git a/examples/azure/azure-stack/stackql_manifest.yml b/examples/azure/azure-web-server/stackql_manifest.yml similarity index 88% rename from examples/azure/azure-stack/stackql_manifest.yml rename to examples/azure/azure-web-server/stackql_manifest.yml index acba86c..831bbd2 100644 --- a/examples/azure/azure-stack/stackql_manifest.yml +++ b/examples/azure/azure-web-server/stackql_manifest.yml @@ -1,18 +1,15 @@ -# -# azure starter project manifest file, add and update values as needed -# version: 1 -name: "azure-stack" -description: description for "azure-stack" +name: "azure-web-server" +description: Provisions an Azure networking stack (resource group, VNet, subnet, NSG, public IP, NIC) with an Ubuntu VM running a web server. providers: - - azure + - azure::v24.10.00267 globals: - name: subscription_id description: azure subscription id value: "{{ AZURE_SUBSCRIPTION_ID }}" - name: location description: default location for resources - value: eastus + value: eastus2 - name: admin_password description: vm admin password value: "{{ AZURE_VM_ADMIN_PASSWORD }}" @@ -115,13 +112,13 @@ resources: value: "{{ stack_name }}-{{ stack_env }}-vm" - name: hardwareProfile value: - vmSize: Standard_DS1_v2 + vmSize: Standard_F1als_v7 - name: storageProfile value: imageReference: publisher: Canonical - offer: UbuntuServer - sku: 18.04-LTS + offer: ubuntu-24_04-lts + sku: server version: latest osDisk: name: "{{ stack_name }}-{{ stack_env }}-vm-disk1" @@ -148,7 +145,6 @@ resources: - name: vm_ext_name value: "{{ stack_name }}-{{ stack_env }}-microsoft.custom-script-linux" - name: command_to_execute - value: | - wget -O index.html https://raw.githubusercontent.com/stackql/stackql-deploy/main/examples/azure/azure-stack/resources/hello-stackql.html && nohup busybox httpd -f -p 8080 & + value: "wget -O index.html https://raw.githubusercontent.com/stackql/stackql-deploy/main/examples/azure/azure-stack/resources/hello-stackql.html && nohup busybox httpd -f -p 8080 &" exports: - web_url \ No newline at end of file diff --git a/examples/databricks/classic/README.md b/examples/databricks/classic/README.md index 404f7bc..b293956 100644 --- a/examples/databricks/classic/README.md +++ b/examples/databricks/classic/README.md @@ -69,10 +69,9 @@ For extra credit, you can (asynchronously) delete the unnecessary workspace with Time to get down to business. From the root of this repository: ```bash -python3 -m venv myenv source examples/databricks/all-purpose-cluster/convenience.sh source venv/bin/activate -pip install stackql-deploy +install stackql-deploy from https://github.com/stackql/stackql-deploy-rs/releases ``` > alternatively set the `AWS_REGION`, `AWS_ACCOUNT_ID`, `DATABRICKS_ACCOUNT_ID`, `DATABRICKS_AWS_ACCOUNT_ID` along with provider credentials `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DATABRICKS_CLIENT_ID`, `DATABRICKS_CLIENT_SECRET` diff --git a/examples/databricks/serverless/README.md b/examples/databricks/serverless/README.md index 409894d..3a8aa7f 100644 --- a/examples/databricks/serverless/README.md +++ b/examples/databricks/serverless/README.md @@ -69,10 +69,9 @@ For extra credit, you can (asynchronously) delete the unnecessary workspace with Time to get down to business. From the root of this repository: ```bash -python3 -m venv myenv source examples/databricks/serverless/convenience.sh source venv/bin/activate -pip install stackql-deploy +install stackql-deploy from https://github.com/stackql/stackql-deploy-rs/releases ``` > alternatively set the `AWS_REGION`, `AWS_ACCOUNT_ID`, `DATABRICKS_ACCOUNT_ID`, `DATABRICKS_AWS_ACCOUNT_ID` along with provider credentials `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DATABRICKS_CLIENT_ID`, `DATABRICKS_CLIENT_SECRET` diff --git a/examples/databricks/serverless/resources/OLD/aws/iam/iam_role.iql b/examples/databricks/serverless/resources/OLD/aws/iam/iam_role.iql deleted file mode 100644 index ff01932..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/iam/iam_role.iql +++ /dev/null @@ -1,69 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM awscc.iam.roles -WHERE Identifier = '{{ RoleName }}'; -- snake case in {{ }} - -/*+ create */ -INSERT INTO awscc.iam.roles ( -AssumeRolePolicyDocument, -Description, -ManagedPolicyArns, -MaxSessionDuration, -Path, -PermissionsBoundary, -Policies, -RoleName, -Tags, -region -) -SELECT -'{{ AssumeRolePolicyDocument }}', -- snake case values -'{{ Description }}', -'{{ ManagedPolicyArns }}', -'{{ MaxSessionDuration }}', -'{{ Path }}', -'{{ PermissionsBoundary }}', -'{{ Policies }}', -'{{ RoleName }}', -'{{ Tags }}', -'us-east-1'; --always us-east-1 for iam - -/*+ update */ -UPDATE awscc.iam.roles -SET PatchDocument = string('{{ { -"AssumeRolePolicyDocument": assume_role_policy_document, -"Description": description, -"ManagedPolicyArns": managed_policy_arns, -"MaxSessionDuration": max_session_duration, -"PermissionsBoundary": permissions_boundary, -"Policies": policies, -"Tags": tags -} | generate_patch_document }}') -WHERE region = 'us-east-1' --always us-east-1 for iam -AND Identifier = '{{ RoleName }}'; -- snake case in {{ }} - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM ( - SELECT - max_session_duration, - path, - AWS_POLICY_EQUAL(assume_role_policy_document, '{{ assume_role_policy_document }}') as test_assume_role_policy_doc, - AWS_POLICY_EQUAL(policies, '{{ policies }}') as test_policies - FROM aws.iam.roles - WHERE Identifier = '{{ role_name }}')t -WHERE test_assume_role_policy_doc = 1 -AND test_policies = 1 -AND path = '{{ path }}'; - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -'{{ role_name }}' as aws_iam_role_name, -arn as aws_iam_role_arn -FROM aws.iam.roles -WHERE Identifier = '{{ role_name }}' - -/*+ delete */ -DELETE FROM awscc.iam.roles -WHERE Identifier = '' -- snake case in {{ }} -AND region = 'us-east-1'; - diff --git a/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/ec2_permissions.json b/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/ec2_permissions.json deleted file mode 100644 index d626ee1..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/ec2_permissions.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "Sid": "Stmt1403287045000", - "Effect": "Allow", - "Action": [ - "ec2:AllocateAddress", - "ec2:AssociateDhcpOptions", - "ec2:AssociateIamInstanceProfile", - "ec2:AssociateRouteTable", - "ec2:AttachInternetGateway", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupEgress", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CancelSpotInstanceRequests", - "ec2:CreateDhcpOptions", - "ec2:CreateInternetGateway", - "ec2:CreateKeyPair", - "ec2:CreateNatGateway", - "ec2:CreatePlacementGroup", - "ec2:CreateRoute", - "ec2:CreateRouteTable", - "ec2:CreateSecurityGroup", - "ec2:CreateSubnet", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateVpc", - "ec2:CreateVpcEndpoint", - "ec2:DeleteDhcpOptions", - "ec2:DeleteInternetGateway", - "ec2:DeleteKeyPair", - "ec2:DeleteNatGateway", - "ec2:DeletePlacementGroup", - "ec2:DeleteRoute", - "ec2:DeleteRouteTable", - "ec2:DeleteSecurityGroup", - "ec2:DeleteSubnet", - "ec2:DeleteTags", - "ec2:DeleteVolume", - "ec2:DeleteVpc", - "ec2:DeleteVpcEndpoints", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeIamInstanceProfileAssociations", - "ec2:DescribeInstanceStatus", - "ec2:DescribeInstances", - "ec2:DescribeInternetGateways", - "ec2:DescribeNatGateways", - "ec2:DescribePlacementGroups", - "ec2:DescribePrefixLists", - "ec2:DescribeReservedInstancesOfferings", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSpotInstanceRequests", - "ec2:DescribeSpotPriceHistory", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:DescribeVpcs", - "ec2:DescribeVpcAttribute", - "ec2:DescribeNetworkAcls", - "ec2:DetachInternetGateway", - "ec2:DisassociateIamInstanceProfile", - "ec2:DisassociateRouteTable", - "ec2:ModifyVpcAttribute", - "ec2:ReleaseAddress", - "ec2:ReplaceIamInstanceProfileAssociation", - "ec2:ReplaceRoute", - "ec2:RequestSpotInstances", - "ec2:RevokeSecurityGroupEgress", - "ec2:RevokeSecurityGroupIngress", - "ec2:RunInstances", - "ec2:TerminateInstances" - ], - "Resource": ["*"] -} \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/iam_service_linked_role.json b/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/iam_service_linked_role.json deleted file mode 100644 index 3c099aa..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/iam/policy_statements/iam_service_linked_role.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Effect": "Allow", - "Action": [ - "iam:CreateServiceLinkedRole", - "iam:PutRolePolicy" - ], - "Resource": [ - "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot" - ], - "Condition": { - "StringLike": { - "iam:AWSServiceName": "spot.amazonaws.com" - } - } -} \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/aws/iam/roles.iql b/examples/databricks/serverless/resources/OLD/aws/iam/roles.iql deleted file mode 100644 index 4e78a9d..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/iam/roles.iql +++ /dev/null @@ -1,72 +0,0 @@ -/*+ exists */ -SELECT count(*) as count -FROM awscc.iam.roles -WHERE region = 'us-east-1' AND -Identifier = '{{ role_name }}' -; - -/*+ create */ -INSERT INTO awscc.iam.roles ( - AssumeRolePolicyDocument, - Description, - ManagedPolicyArns, - MaxSessionDuration, - Path, - PermissionsBoundary, - Policies, - RoleName, - Tags, - region -) -SELECT - '{{ assume_role_policy_document }}', - '{{ description }}', - '{{ managed_policy_arns }}', - '{{ max_session_duration }}', - '{{ path }}', - '{{ permissions_boundary }}', - '{{ policies }}', - '{{ role_name }}', - '{{ tags }}', - 'us-east-1'; - -/*+ update */ -UPDATE awscc.iam.roles -SET PatchDocument = string('{{ { - "AssumeRolePolicyDocument": assume_role_policy_document, - "Description": description, - "ManagedPolicyArns": managed_policy_arns, - "MaxSessionDuration": max_session_duration, - "PermissionsBoundary": permissions_boundary, - "Path": path, - "Policies": policies, - "Tags": tags -} | generate_patch_document }}') -WHERE region = 'us-east-1' -AND Identifier = '{{ role_name }}'; - -/*+ statecheck, retries=5, retry_delay=10 */ -SELECT COUNT(*) as count FROM ( - SELECT - max_session_duration, - path, - AWS_POLICY_EQUAL(assume_role_policy_document, '{{ assume_role_policy_document }}') as test_assume_role_policy_doc, - AWS_POLICY_EQUAL(policies, '{{ policies }}') as test_policies - FROM awscc.iam.roles - WHERE Identifier = '{{ role_name }}' AND region = 'us-east-1')t -WHERE test_assume_role_policy_doc = 1 -AND test_policies = 1 -AND path = '{{ path }}'; - -/*+ exports */ -SELECT -arn, -role_name -FROM awscc.iam.roles -WHERE region = 'us-east-1' AND -Identifier = '{{ role_name }}'; - -/*+ delete */ -DELETE FROM awscc.iam.roles -WHERE Identifier = '{{ role_name }}' -AND region = 'us-east-1'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket.iql b/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket.iql deleted file mode 100644 index 48baabe..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket.iql +++ /dev/null @@ -1,58 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM aws.s3.buckets -WHERE region = '{{ region }}' -AND Identifier = '{{ bucket_name }}' - -/*+ create */ -INSERT INTO aws.s3.buckets ( - BucketName, - OwnershipControls, - BucketEncryption, - PublicAccessBlockConfiguration, - VersioningConfiguration, - Tags, - region -) -SELECT - '{{ bucket_name }}', - '{{ ownership_controls }}', - '{{ bucket_encryption }}', - '{{ public_access_block_configuration }}', - '{{ versioning_configuration }}', - '{{ global_tags }}', - '{{ region }}' - -/*+ update */ -update aws.s3.buckets -set PatchDocument = string('{{ { - "OwnershipControls": ownership_controls, - "BucketEncryption": bucket_encryption, - "PublicAccessBlockConfiguration": public_access_block_configuration, - "VersioningConfiguration": versioning_configuration, - "Tags": global_tags - } | generate_patch_document }}') -WHERE -region = '{{ region }}' -AND Identifier = '{{ bucket_name }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -arn, -bucket_name -FROM ( - SELECT - arn, - bucket_name, - JSON_EQUAL(ownership_controls, '{{ ownership_controls }}') as test_ownership_controls, - JSON_EQUAL(bucket_encryption, '{{ bucket_encryption }}') as test_encryption, - JSON_EQUAL(public_access_block_configuration, '{{ public_access_block_configuration }}') as test_public_access_block_configuration, - JSON_EQUAL(versioning_configuration, '{{ versioning_configuration }}') as test_versioning_configuration - FROM aws.s3.buckets - WHERE region = '{{ region }}' - AND Identifier = '{{ bucket_name }}' -)t -WHERE test_ownership_controls = 1 -AND test_encryption = 1 -AND test_public_access_block_configuration = 1 -AND test_versioning_configuration = 1 diff --git a/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket_policy.iql b/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket_policy.iql deleted file mode 100644 index dc6a724..0000000 --- a/examples/databricks/serverless/resources/OLD/aws/s3/s3_bucket_policy.iql +++ /dev/null @@ -1,36 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM aws.s3.bucket_policies -WHERE region = '{{ region }}' -AND bucket = '{{ aws_s3_workspace_bucket_name }}'; - -/*+ create */ -INSERT INTO aws.s3.bucket_policies ( - Bucket, - PolicyDocument, - ClientToken, - region -) -SELECT - '{{ aws_s3_workspace_bucket_name }}', - '{{ policy_document }}', - '{{ uuid() }}', - '{{ region }}' - -/*+ update */ -update aws.s3.bucket_policies -set PatchDocument = string('{{ { - "PolicyDocument": policy_document - } | generate_patch_document }}') -WHERE -region = '{{ region }}' -AND Identifier = '{{ aws_s3_workspace_bucket_name }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM ( - SELECT - AWS_POLICY_EQUAL(policy_document, '{{ policy_document }}') as test_policy_document - FROM aws.s3.bucket_policies - WHERE region = '{{ region }}' - AND Identifier = '{{ aws_s3_workspace_bucket_name }}')t -WHERE test_policy_document = 1; diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/credentials.iql b/examples/databricks/serverless/resources/OLD/databricks_account/credentials.iql deleted file mode 100644 index 8c841c1..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/credentials.iql +++ /dev/null @@ -1,31 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' -AND credentials_name = '{{ credentials_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.credentials ( -account_id, -credentials_name, -aws_credentials -) -SELECT -'{{ databricks_account_id }}', -'{{ credentials_name }}', -'{{ aws_credentials }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -'{{ credentials_name }}' as databricks_credentials_name, -credentials_id as databricks_credentials_id, -JSON_EXTRACT(aws_credentials, '$.sts_role.external_id') as databricks_role_external_id -FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' -AND credentials_name = '{{ credentials_name }}' -AND JSON_EXTRACT(aws_credentials, '$.sts_role.role_arn') = '{{ aws_iam_cross_account_role_arn }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' AND -credentials_id = '{{ databricks_credentials_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/get_users.iql b/examples/databricks/serverless/resources/OLD/databricks_account/get_users.iql deleted file mode 100644 index e94c2d7..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/get_users.iql +++ /dev/null @@ -1,6 +0,0 @@ -/*+ exports, retries=3, retry_delay=5 */ -SELECT -JSON_GROUP_ARRAY(JSON_OBJECT('value', id)) as databricks_workspace_group_members -FROM databricks_account.iam.users -WHERE account_id = '{{ databricks_account_id }}' -AND userName in {{ users | sql_list }}; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/network.iql b/examples/databricks/serverless/resources/OLD/databricks_account/network.iql deleted file mode 100644 index 541e3ed..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/network.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' -AND network_name = '{{ databricks_network_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.networks ( -account_id, -network_name, -vpc_id, -subnet_ids, -security_group_ids -) -SELECT -'{{ databricks_account_id }}', -'{{ databricks_network_name }}', -'{{ vpc_id }}', -'{{ subnet_ids }}', -'{{ security_group_ids }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -network_id as databricks_network_id -FROM ( -SELECT -network_id, -JSON_EQUAL(subnet_ids, '{{ subnet_ids }}') as subnet_test, -JSON_EQUAL(security_group_ids, '{{ security_group_ids }}') as sg_test -FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' -AND network_name = '{{ databricks_network_name }}' -AND vpc_id = '{{ vpc_id }}' -)t -WHERE subnet_test = 1 -AND sg_test = 1 - -/*+ delete */ -DELETE FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' AND -network_id = '{{ databricks_network_id }}' \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/storage_configuration.iql b/examples/databricks/serverless/resources/OLD/databricks_account/storage_configuration.iql deleted file mode 100644 index c118b4e..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/storage_configuration.iql +++ /dev/null @@ -1,29 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.storage ( -account_id, -storage_configuration_name, -root_bucket_info -) -SELECT -'{{ databricks_account_id }}', -'{{ storage_configuration_name }}', -'{{ root_bucket_info }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -storage_configuration_id as databricks_storage_configuration_id -FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' -AND JSON_EXTRACT(root_bucket_info, '$.bucket_name') = '{{ aws_s3_workspace_bucket_name }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' AND -storage_configuration_id = '{{ databricks_storage_configuration_id }}' \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql b/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql deleted file mode 100644 index 10f36fa..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/update_group_membership.iql +++ /dev/null @@ -1,6 +0,0 @@ -/*+ command */ -update databricks_account.iam.groups -set schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', -Operations = '[{"op": "replace", "path": "members", "value": {{ databricks_workspace_group_members }} }]' -WHERE account_id = '{{ databricks_account_id }}' -AND id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/workspace.iql b/examples/databricks/serverless/resources/OLD/databricks_account/workspace.iql deleted file mode 100644 index 1094860..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/workspace.iql +++ /dev/null @@ -1,42 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' -AND workspace_name = '{{ workspace_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.workspaces ( -account_id, -workspace_name, -aws_region, -credentials_id, -storage_configuration_id, -pricing_tier -) -SELECT -'{{ databricks_account_id }}', -'{{ workspace_name }}', -'{{ aws_region }}', -'{{ credentials_id }}', -'{{ storage_configuration_id }}', -'{{ pricing_tier }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -'{{ workspace_name }}' AS databricks_workspace_name, -workspace_id AS databricks_workspace_id, -deployment_name AS databricks_deployment_name, -workspace_status AS databricks_workspace_status, -'https://' || deployment_name || '.cloud.databricks.com' AS databricks_workspace_url -FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' -AND workspace_name = '{{ workspace_name }}' -AND aws_region = '{{ aws_region }}' -AND credentials_id = '{{ credentials_id }}' -AND storage_configuration_id = '{{ storage_configuration_id }}' -AND pricing_tier = '{{ pricing_tier }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/workspace_group.iql b/examples/databricks/serverless/resources/OLD/databricks_account/workspace_group.iql deleted file mode 100644 index 4a2cfcc..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/workspace_group.iql +++ /dev/null @@ -1,26 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' -AND displayName = '{{ display_name }}' - -/*+ create */ -INSERT INTO databricks_account.iam.groups ( -account_id, -displayName -) -SELECT -'{{ databricks_account_id }}', -'{{ display_name }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT id AS databricks_group_id, -displayName AS databricks_group_name -FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' -AND displayName = '{{ display_name }}' - -/*+ delete */ -DELETE FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' AND -id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_account/workspace_permission_assignments.iql b/examples/databricks/serverless/resources/OLD/databricks_account/workspace_permission_assignments.iql deleted file mode 100644 index 6051aba..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_account/workspace_permission_assignments.iql +++ /dev/null @@ -1,32 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' -AND JSON_EXTRACT(principal, '$.principal_id') = {{ databricks_group_id }} - -/*+ createorupdate */ -INSERT INTO databricks_account.iam.workspace_permission_assignments ( -account_id, -principal_id, -workspace_id, -permissions -) -SELECT -'{{ databricks_account_id }}', -'{{ databricks_group_id }}', -'{{ databricks_workspace_id }}', -'["ADMIN"]' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' -AND JSON_EXTRACT(principal, '$.principal_id') = {{ databricks_group_id }} - -/*+ delete */ -DELETE FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -principal_id = '{{ databricks_group_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_workspace/external_location.iql b/examples/databricks/serverless/resources/OLD/databricks_workspace/external_location.iql deleted file mode 100644 index 0df1518..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_workspace/external_location.iql +++ /dev/null @@ -1,40 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_workspace.unitycatalog.external_locations -WHERE name = '{{ name | replace('-', '_') }}' AND -deployment_name = '{{ databricks_deployment_name }}'; - -/*+ create */ -INSERT INTO databricks_workspace.unitycatalog.external_locations ( -deployment_name, -name, -url, -credential_name, -read_only, -comment, -skip_validation -) -SELECT -'{{ databricks_deployment_name }}', -'{{ name | replace('-', '_') }}', -'{{ url }}', -'{{ credential_name | replace('-', '_') }}', -{{ read_only }}, -'{{ comment }}', -{{ skip_validation }} -; - -/*+ exports, retries=3, retry_delay=5 */ -SELECT name as external_location_name -FROM databricks_workspace.unitycatalog.external_locations -WHERE name = '{{ name | replace('-', '_') }}' AND -deployment_name = '{{ databricks_deployment_name }}' -AND url = '{{ url }}' AND -credential_name = '{{ credential_name | replace('-', '_') }}' AND -read_only = {{ read_only }} AND -comment = '{{ comment }}'; - -/*+ delete */ -DELETE FROM databricks_workspace.unitycatalog.external_locations -WHERE name = '{{ name | replace('-', '_') }}' AND -deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/databricks_workspace/storage_credential.iql b/examples/databricks/serverless/resources/OLD/databricks_workspace/storage_credential.iql deleted file mode 100644 index 2ade455..0000000 --- a/examples/databricks/serverless/resources/OLD/databricks_workspace/storage_credential.iql +++ /dev/null @@ -1,37 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}'; - -/*+ create */ -INSERT INTO databricks_workspace.unitycatalog.storage_credentials ( -deployment_name, -name, -comment, -read_only, -aws_iam_role, -skip_validation -) -SELECT -'{{ databricks_deployment_name }}', -'{{ name | replace('-', '_') | upper }}', -'{{ comment }}', -'{{ read_only }}', -'{{ aws_iam_role }}', -'{{ skip_validation }}' -; - -/*+ exports, retries=3, retry_delay=5 */ -SELECT -name as storage_credential_name, -JSON_EXTRACT(aws_iam_role, '$.external_id') as storage_credential_external_id -FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}' AND -JSON_EXTRACT(aws_iam_role, '$.role_arn') = '{{ metastore_access_role_arn }}'; - -/*+ delete */ -DELETE FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/OLD/aws/iam/update_metastore_access_role.iql b/examples/databricks/serverless/resources/aws/iam/update_metastore_access_role.iql similarity index 87% rename from examples/databricks/serverless/resources/OLD/aws/iam/update_metastore_access_role.iql rename to examples/databricks/serverless/resources/aws/iam/update_metastore_access_role.iql index 0bc750a..c7941b5 100644 --- a/examples/databricks/serverless/resources/OLD/aws/iam/update_metastore_access_role.iql +++ b/examples/databricks/serverless/resources/aws/iam/update_metastore_access_role.iql @@ -1,5 +1,5 @@ /*+ command */ -update aws.iam.roles +update awscc.iam.roles set PatchDocument = string('{{ { "AssumeRolePolicyDocument": assume_role_policy_document } | generate_patch_document }}') diff --git a/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql b/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql index 1b4ecb9..3169508 100644 --- a/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql +++ b/examples/databricks/serverless/resources/databricks_account/update_group_membership.iql @@ -1,6 +1,6 @@ /*+ command */ -update databricks_account.iam.account_groups -set schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', +UPDATE databricks_account.iam.account_groups +SET schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', Operations = '[{"op": "replace", "path": "members", "value": {{ databricks_workspace_group_members }} }]' WHERE account_id = '{{ databricks_account_id }}' -AND id = '{{ databricks_group_id }}'; +AND id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_workspace/external_locations.iql b/examples/databricks/serverless/resources/databricks_workspace/external_locations.iql new file mode 100644 index 0000000..11d1ab5 --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_workspace/external_locations.iql @@ -0,0 +1,53 @@ +/*+ exists */ +SELECT +count(*) as count +FROM databricks_workspace.catalog.external_locations +WHERE name = '{{ name }}' +AND deployment_name = '{{ deployment_name }}' +; + +/*+ create */ +INSERT INTO databricks_workspace.catalog.external_locations ( +name, +url, +credential_name, +comment, +read_only, +skip_validation, +deployment_name +) +SELECT +'{{ name }}', +'{{ url }}', +'{{ credential_name }}', +'{{ comment }}', +{{ read_only }}, +{{ skip_validation }}, +'{{ deployment_name }}' +RETURNING * +; + +/*+ update */ +UPDATE databricks_workspace.catalog.external_locations +SET +comment = '{{ comment }}', +credential_name = '{{ credential_name }}', +read_only = {{ read_only }}, +skip_validation = {{ skip_validation }}, +url = '{{ url }}' +WHERE name = '{{ name }}' +AND deployment_name = '{{ deployment_name }}' +RETURNING *; + +/*+ exports, retries=5, retry_delay=10 */ +SELECT name as external_location_name +FROM databricks_workspace.catalog.external_locations +WHERE deployment_name = '{{ deployment_name }}' +AND name = '{{ name }}' +; + +/*+ delete */ +DELETE FROM databricks_workspace.catalog.external_locations +WHERE name = '{{ name }}' +AND deployment_name = '{{ deployment_name }}' +RETURNING *; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/databricks_workspace/storage_credentials.iql b/examples/databricks/serverless/resources/databricks_workspace/storage_credentials.iql new file mode 100644 index 0000000..aaf061a --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_workspace/storage_credentials.iql @@ -0,0 +1,62 @@ +/*+ exists */ +SELECT id as storage_credential_id +FROM databricks_workspace.catalog.storage_credentials +WHERE deployment_name = '{{ deployment_name }}' +AND name = '{{ name }}' +; + +/*+ create */ +INSERT INTO databricks_workspace.catalog.storage_credentials ( +name, +aws_iam_role, +comment, +read_only, +skip_validation, +deployment_name +) +SELECT +'{{ name }}', +'{{ aws_iam_role }}', +'{{ comment }}', +{{ read_only }}, +{{ skip_validation }}, +'{{ deployment_name }}' +RETURNING +* +; + +/*+ update */ +UPDATE databricks_workspace.catalog.storage_credentials +SET +aws_iam_role = '{{ aws_iam_role }}', +comment = '{{ comment }}', +-- force = {{ force }}, +-- isolation_mode = '{{ isolation_mode }}', +-- owner = '{{ owner }}', +read_only = {{ read_only }}, +skip_validation = {{ skip_validation }} +WHERE +name = '{{ name }}' +AND deployment_name = '{{ deployment_name }}' +RETURNING +*; + +/*+ exports, retries=5, retry_delay=10 */ +SELECT +id as storage_credential_id, +name as storage_credential_name, +JSON_EXTRACT(aws_iam_role, '$.external_id') as storage_credential_external_id +FROM databricks_workspace.catalog.storage_credentials +WHERE deployment_name = '{{ deployment_name }}' +AND name = '{{ name }}' +AND comment = '{{ comment }}' +AND read_only = {{ read_only }} +AND JSON_EXTRACT(aws_iam_role, '$.role_arn') = '{{ aws_metastore_access_role.arn }}' +; + +/*+ delete */ +DELETE FROM databricks_workspace.catalog.storage_credentials +WHERE name = '{{ name }}' +AND deployment_name = '{{ deployment_name }}' +AND force = '{{ force }}' +; \ No newline at end of file diff --git a/examples/databricks/serverless/stackql_manifest.yml b/examples/databricks/serverless/stackql_manifest.yml index 872d473..dccc7e9 100644 --- a/examples/databricks/serverless/stackql_manifest.yml +++ b/examples/databricks/serverless/stackql_manifest.yml @@ -2,9 +2,9 @@ version: 1 name: "stackql-serverless" description: creates a serverless databricks workspace providers: - - awscc - - databricks_account - - databricks_workspace + - awscc::v26.03.00379 + - databricks_account::v26.03.00381 + - databricks_workspace::v26.03.00381 globals: - name: databricks_account_id description: databricks account id @@ -306,90 +306,97 @@ resources: - name: databricks_account/workspace_assignment props: [] - # - name: databricks_workspace/storage_credential - # props: - # - name: name - # value: "{{ stack_name }}_{{ stack_env }}_storage_credential" - # - name: comment - # value: "Storage credential for {{ stack_name }} {{ stack_env }} metastore S3 access" - # - name: read_only - # value: false - # - name: aws_iam_role - # value: - # role_arn: "{{ metastore_access_role_arn }}" - # - name: skip_validation - # value: false - # exports: - # - storage_credential_name - # - storage_credential_external_id + - name: databricks_workspace/storage_credentials + props: + - name: name + value: "{{ stack_name }}_{{ stack_env }}_storage_credential" + - name: comment + value: "Storage credential for {{ stack_name }} {{ stack_env }} metastore S3 access" + - name: read_only + value: false + - name: aws_iam_role + value: + role_arn: "{{ aws_metastore_access_role.arn }}" + - name: skip_validation + value: false + exports: + - storage_credential_id + - storage_credential_name + - storage_credential_external_id - # - name: aws/iam/update_metastore_access_role - # type: command - # props: - # - name: role_name - # value: "{{ stack_name }}-{{ stack_env }}-metastore-role" - # - name: assume_role_policy_document - # value: - # Version: "2012-10-17" - # Statement: - # - Effect: "Allow" - # Principal: - # AWS: - # - "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" - # - "{{ metastore_access_role_arn }}" - # Action: "sts:AssumeRole" - # Condition: - # StringEquals: - # sts:ExternalId: "{{ storage_credential_external_id }}" + - name: aws/iam/update_metastore_access_role + type: command + props: + - name: role_name + value: "{{ aws_metastore_access_role.role_name }}" + - name: assume_role_policy_document + value: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Principal: + AWS: + - "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + - "{{ aws_metastore_access_role.arn }}" + Action: "sts:AssumeRole" + Condition: + StringEquals: + sts:ExternalId: "{{ storage_credential_external_id }}" - # - name: databricks_workspace/unitycatalog/credential_grants - # type: command - # props: - # - name: privileges - # value: - # - "ALL_PRIVILEGES" - # - "MANAGE" - # sql: | - # UPDATE databricks_workspace.unitycatalog.grants - # SET changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' - # WHERE full_name = '{{ storage_credential_name }}' AND - # securable_type = 'storage_credential' AND - # deployment_name = '{{ databricks_deployment_name }}'; + - name: databricks_credential_grants + type: command + props: + - name: privileges + value: + - "ALL_PRIVILEGES" + - "MANAGE" + sql: | + UPDATE databricks_workspace.catalog.grants + SET changes = '[{"add": {{ privileges }},"principal": "{{ workspace_admins_group.display_name }}"}]' + WHERE full_name = '{{ storage_credential_name }}' AND + securable_type = 'storage_credential' AND + deployment_name = '{{ deployment_name }}' + RETURNING + privilege_assignments; - # - name: databricks_workspace/external_location - # props: - # - name: name - # value: "{{ stack_name }}_{{ stack_env }}_external_location" - # - name: comment - # value: "External location for {{ stack_name }} {{ stack_env }} metastore S3 access" - # - name: url - # value: "s3://{{ aws_s3_metastore_bucket_name }}/unitycatalog/demo" - # - name: credential_name - # value: "{{ storage_credential_name }}" - # - name: read_only - # value: false - # - name: skip_validation - # value: false - # exports: - # - external_location_name + - name: external_location + file: databricks_workspace/external_locations.iql + props: + - name: name + value: "{{ stack_name }}_{{ stack_env }}_external_location" + - name: comment + value: "External location for {{ stack_name }} {{ stack_env }} metastore S3 access" + - name: url + value: "s3://{{ aws_s3_metastore_bucket.bucket_name }}/unitycatalog/demo" + - name: credential_name + value: "{{ storage_credential_name }}" + - name: read_only + value: false + - name: skip_validation + value: false + exports: + - external_location_name - # - name: databricks_workspace/unitycatalog/location_grants - # type: command - # props: - # - name: privileges - # value: - # - "ALL_PRIVILEGES" - # - "MANAGE" - # sql: | - # UPDATE databricks_workspace.unitycatalog.grants - # SET changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' - # WHERE full_name = '{{ external_location_name }}' AND - # securable_type = 'external_location' AND - # deployment_name = '{{ databricks_deployment_name }}'; + - name: databricks_workspace/unitycatalog/location_grants + type: command + props: + - name: privileges + value: + - "ALL_PRIVILEGES" + - "MANAGE" + sql: | + UPDATE databricks_workspace.catalog.grants + SET changes = '[{"add": {{ privileges }},"principal": "{{ workspace_admins_group.display_name }}"}]' + WHERE + securable_type = 'external_location' + AND full_name = '{{ external_location_name }}' + AND deployment_name = '{{ deployment_name }}' + RETURNING + privilege_assignments; -# exports: -# - databricks_workspace_name -# - databricks_workspace_id -# - databricks_deployment_name -# - databricks_workspace_status -# - databricks_workspace_url \ No newline at end of file +exports: + - workspace_name + - workspace_id + - deployment_name + - workspace_status + - workspace_url \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/README.md b/examples/databricks/snowflake-interoperability/README.md index 409894d..3a8aa7f 100644 --- a/examples/databricks/snowflake-interoperability/README.md +++ b/examples/databricks/snowflake-interoperability/README.md @@ -69,10 +69,9 @@ For extra credit, you can (asynchronously) delete the unnecessary workspace with Time to get down to business. From the root of this repository: ```bash -python3 -m venv myenv source examples/databricks/serverless/convenience.sh source venv/bin/activate -pip install stackql-deploy +install stackql-deploy from https://github.com/stackql/stackql-deploy-rs/releases ``` > alternatively set the `AWS_REGION`, `AWS_ACCOUNT_ID`, `DATABRICKS_ACCOUNT_ID`, `DATABRICKS_AWS_ACCOUNT_ID` along with provider credentials `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DATABRICKS_CLIENT_ID`, `DATABRICKS_CLIENT_SECRET` diff --git a/examples/google/google-web-server/README.md b/examples/google/google-web-server/README.md new file mode 100644 index 0000000..8c0a90e --- /dev/null +++ b/examples/google/google-web-server/README.md @@ -0,0 +1,80 @@ +# Google Cloud Web Server Example + +This example provisions a Google Cloud networking stack with an Apache web server using the `google` provider. + +## Architecture + +```mermaid +flowchart LR + subgraph VPC["VPC Network"] + Subnet["Subnet\n10.x.0.0/24"] + VM["Web Server\ne2-micro\nDebian 12"] + Subnet --> VM + end + FW_HTTP["Firewall\nHTTP :80"] --> VM + FW_SSH["Firewall\nSSH :22"] --> VM + IP["Static\nPublic IP"] --> VM + Internet(("Internet")) --> FW_HTTP + Internet --> IP +``` + +## Resources + +| # | Resource | Provider Resource | Description | +|---|----------|-------------------|-------------| +| 1 | `example_network` | `google.compute.networks` | Custom-mode VPC network | +| 2 | `example_subnetwork` | `google.compute.subnetworks` | Regional subnet with private Google access | +| 3 | `example_firewall_http` | `google.compute.firewalls` | Allow HTTP (port 80) from anywhere | +| 4 | `example_firewall_ssh` | `google.compute.firewalls` | Allow SSH (port 22, restricted by environment) | +| 5 | `example_public_ip` | `google.compute.addresses` | Static external IP address | +| 6 | `example_web_server` | `google.compute.instances` | e2-micro VM running Apache on Debian 12 | +| 7 | `get_web_server_url` | *(query)* | Constructs the web server URL from the static IP | + +## Environment-Specific CIDR Blocks + +| Environment | Subnet CIDR | SSH Source | +|-------------|-------------|-----------| +| `prd` | 10.0.0.0/24 | VPC only (10.0.0.0/24) | +| `sit` | 10.1.0.0/24 | VPC only (10.1.0.0/24) | +| `dev` | 10.2.0.0/24 | Anywhere (0.0.0.0/0) | + +## Prerequisites + +- `stackql-deploy` installed ([releases](https://github.com/stackql/stackql-deploy-rs/releases)) +- Google Cloud credentials: + +```bash +export GOOGLE_CREDENTIALS=$(cat path/to/sa-key.json) +export GOOGLE_PROJECT=stackql-demo +export GOOGLE_REGION=us-central1 +export GOOGLE_ZONE=us-central1-a + ``` + +## Usage + +### Deploy + +```bash +target/release/stackql-deploy build \ +examples/google/google-web-server dev \ +-e GOOGLE_PROJECT=${GOOGLE_PROJECT} \ +-e GOOGLE_REGION=${GOOGLE_REGION} \ +-e GOOGLE_ZONE=${GOOGLE_ZONE} +``` +### Test + +```bash +stackql-deploy test examples/google/google-web-server dev +``` + +### Teardown + +```bash +stackql-deploy teardown examples/google/google-web-server dev +``` + +### Debug mode + +```bash +stackql-deploy build examples/google/google-web-server dev --log-level debug +``` diff --git a/examples/google/google-web-server/resources/example_network.iql b/examples/google/google-web-server/resources/example_network.iql new file mode 100644 index 0000000..973d1b3 --- /dev/null +++ b/examples/google/google-web-server/resources/example_network.iql @@ -0,0 +1,36 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM google.compute.networks +WHERE name = '{{ vpc_name }}' +AND project = '{{ project }}' + +/*+ create */ +INSERT INTO google.compute.networks +( + project, + name, + autoCreateSubnetworks, + routingConfig +) +SELECT + '{{ project }}', + '{{ vpc_name }}', + false, + '{"routingMode": "REGIONAL"}' + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM google.compute.networks +WHERE name = '{{ vpc_name }}' +AND project = '{{ project }}' +AND autoCreateSubnetworks = false + +/*+ exports */ +SELECT + '{{ vpc_name }}' as vpc_name, + selfLink as vpc_link +FROM google.compute.networks +WHERE name = '{{ vpc_name }}' +AND project = '{{ project }}' + +/*+ delete, retries=20, retry_delay=10 */ +DELETE FROM google.compute.networks +WHERE network = '{{ vpc_name }}' AND project = '{{ project }}' diff --git a/examples/google/google-web-server/resources/example_public_ip.iql b/examples/google/google-web-server/resources/example_public_ip.iql new file mode 100644 index 0000000..bb04593 --- /dev/null +++ b/examples/google/google-web-server/resources/example_public_ip.iql @@ -0,0 +1,36 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM google.compute.addresses +WHERE name = '{{ address_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' + +/*+ create */ +INSERT INTO google.compute.addresses +( + project, + region, + name +) +SELECT + '{{ project }}', + '{{ region }}', + '{{ address_name }}' + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM google.compute.addresses +WHERE name = '{{ address_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' + +/*+ exports */ +SELECT address +FROM google.compute.addresses +WHERE name = '{{ address_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' + +/*+ delete */ +DELETE FROM google.compute.addresses +WHERE address = '{{ address_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' diff --git a/examples/google/google-web-server/resources/example_subnetwork.iql b/examples/google/google-web-server/resources/example_subnetwork.iql new file mode 100644 index 0000000..353687a --- /dev/null +++ b/examples/google/google-web-server/resources/example_subnetwork.iql @@ -0,0 +1,46 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM google.compute.subnetworks +WHERE subnetwork = '{{ subnet_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' +AND network = '{{ vpc_link }}' + +/*+ create, retries=5, retry_delay=10 */ +INSERT INTO google.compute.subnetworks +( + project, + region, + name, + network, + ipCidrRange, + privateIpGoogleAccess +) +SELECT + '{{ project }}', + '{{ region }}', + '{{ subnet_name }}', + '{{ vpc_link }}', + '{{ ip_cidr_range }}', + true + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM google.compute.subnetworks +WHERE project = '{{ project }}' +AND region = '{{ region }}' +AND subnetwork = '{{ subnet_name }}' +AND network = '{{ vpc_link }}' + +/*+ exports */ +SELECT + name as subnet_name, + selfLink as subnet_link +FROM google.compute.subnetworks +WHERE subnetwork = '{{ subnet_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' + +/*+ delete */ +DELETE FROM google.compute.subnetworks +WHERE subnetwork = '{{ subnet_name }}' +AND project = '{{ project }}' +AND region = '{{ region }}' diff --git a/examples/google/google-web-server/resources/example_web_server.iql b/examples/google/google-web-server/resources/example_web_server.iql new file mode 100644 index 0000000..c93d3fb --- /dev/null +++ b/examples/google/google-web-server/resources/example_web_server.iql @@ -0,0 +1,53 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM google.compute.instances +WHERE project = '{{ project }}' +AND zone = '{{ zone }}' +AND instance = '{{ instance_name }}' + +/*+ create */ +INSERT INTO google.compute.instances +( + zone, + project, + name, + machineType, + canIpForward, + deletionProtection, + scheduling, + networkInterfaces, + disks, + tags, + metadata +) +SELECT + '{{ zone }}', + '{{ project }}', + '{{ instance_name }}', + '{{ machine_type }}', + false, + false, + '{"automaticRestart": true}', + '{{ network_interfaces }}', + '{{ disks }}', + '{{ tags }}', + '{{ metadata }}' + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM google.compute.instances +WHERE project = '{{ project }}' +AND zone = '{{ zone }}' +AND instance = '{{ instance_name }}' +AND status = 'RUNNING' + +/*+ exports */ +SELECT name as instance_name +FROM google.compute.instances +WHERE project = '{{ project }}' +AND zone = '{{ zone }}' +AND instance = '{{ instance_name }}' + +/*+ delete, retries=10, retry_delay=10 */ +DELETE FROM google.compute.instances +WHERE project = '{{ project }}' +AND zone = '{{ zone }}' +AND instance = '{{ instance_name }}' diff --git a/examples/google/google-web-server/resources/firewalls.iql b/examples/google/google-web-server/resources/firewalls.iql new file mode 100644 index 0000000..8844513 --- /dev/null +++ b/examples/google/google-web-server/resources/firewalls.iql @@ -0,0 +1,54 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM google.compute.firewalls +WHERE project = '{{ project }}' +AND name = '{{ fw_name }}' + +/*+ create */ +INSERT INTO google.compute.firewalls +( + project, + name, + network, + direction, + sourceRanges, + allowed +) +SELECT + '{{ project }}', + '{{ fw_name }}', + '{{ vpc_link }}', + '{{ fw_direction }}', + '{{ fw_source_ranges }}', + '{{ fw_allowed }}' + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM +( +SELECT + network = '{{ vpc_link }}' as test_network, + direction = '{{ fw_direction }}' as test_direction, + JSON_EQUAL(allowed, '{{ fw_allowed }}') as test_allowed, + JSON_EQUAL(sourceRanges, '{{ fw_source_ranges }}') as test_source_ranges +FROM google.compute.firewalls +WHERE project = '{{ project }}' +AND name = '{{ fw_name }}' +) t +WHERE test_network = 1 +AND test_direction = 1 +AND test_allowed = 1 +AND test_source_ranges = 1 + +/*+ update */ +UPDATE google.compute.firewalls +SET + network = '{{ vpc_link }}', + direction = '{{ fw_direction }}', + sourceRanges = '{{ fw_source_ranges }}', + allowed = '{{ fw_allowed }}' +WHERE firewall = '{{ fw_name }}' +AND project = '{{ project }}' + +/*+ delete, retries=20, retry_delay=10 */ +DELETE FROM google.compute.firewalls +WHERE project = '{{ project }}' +AND firewall = '{{ fw_name }}' diff --git a/examples/google/google-web-server/resources/get_web_server_code.iql b/examples/google/google-web-server/resources/get_web_server_code.iql new file mode 100644 index 0000000..ac72ab9 --- /dev/null +++ b/examples/google/google-web-server/resources/get_web_server_code.iql @@ -0,0 +1,2 @@ +/*+ exports */ +SELECT '{{ web_server_code | base64 }}' as web_server_code_base64 \ No newline at end of file diff --git a/examples/google/google-web-server/resources/get_web_server_url.iql b/examples/google/google-web-server/resources/get_web_server_url.iql new file mode 100644 index 0000000..4dfdcd0 --- /dev/null +++ b/examples/google/google-web-server/resources/get_web_server_url.iql @@ -0,0 +1,2 @@ +/*+ exports */ +SELECT 'http://' || '{{ address }}' as web_server_url diff --git a/examples/google/google-web-server/stackql_manifest.yml b/examples/google/google-web-server/stackql_manifest.yml new file mode 100644 index 0000000..1895078 --- /dev/null +++ b/examples/google/google-web-server/stackql_manifest.yml @@ -0,0 +1,118 @@ +version: 1 +name: "google-web-server" +description: Provisions a Google Cloud VPC networking stack (network, subnet, firewall rules, static IP) with an e2-micro VM running an Apache web server. +providers: + - google +globals: + - name: project + description: google cloud project id + value: "{{ GOOGLE_PROJECT }}" + - name: region + value: "{{ GOOGLE_REGION }}" + - name: zone + value: "{{ GOOGLE_ZONE }}" +resources: + - name: example_network + props: + - name: vpc_name + value: "{{ stack_name }}-{{ stack_env }}-vpc" + exports: + - vpc_name + - vpc_link + - name: example_subnetwork + props: + - name: subnet_name + value: "{{ stack_name }}-{{ stack_env }}-{{ region }}-subnet" + - name: ip_cidr_range + values: + prd: + value: "10.0.0.0/24" + sit: + value: "10.1.0.0/24" + dev: + value: "10.2.0.0/24" + exports: + - subnet_name + - subnet_link + - name: example_firewall_http + file: firewalls.iql + props: + - name: fw_name + value: "{{ stack_name }}-{{ stack_env }}-allow-http" + - name: fw_direction + value: INGRESS + - name: fw_source_ranges + value: ["0.0.0.0/0"] + - name: fw_allowed + value: [{IPProtocol: tcp, ports: ["80"]}] + - name: example_firewall_ssh + file: firewalls.iql + props: + - name: fw_name + value: "{{ stack_name }}-{{ stack_env }}-allow-ssh" + - name: fw_direction + value: INGRESS + - name: fw_source_ranges + values: + prd: + value: ["10.0.0.0/24"] + sit: + value: ["10.1.0.0/24"] + dev: + value: ["0.0.0.0/0"] + - name: fw_allowed + value: [{IPProtocol: tcp, ports: ["22"]}] + - name: example_public_ip + props: + - name: address_name + value: "{{ stack_name }}-{{ stack_env }}-ip" + exports: + - address + - name: get_web_server_code + type: query + props: + - name: web_server_code + value: 'Hello StackQL

Hello, StackQL!

' + sql: | + SELECT '{{ web_server_code | base64_encode }}' as web_server_code_base64 + exports: + - web_server_code_base64 + - name: example_web_server + props: + - name: instance_name + value: "{{ stack_name }}-{{ stack_env }}-vm" + - name: machine_type + value: "https://compute.googleapis.com/compute/v1/projects/{{ project }}/zones/{{ zone }}/machineTypes/e2-micro" + - name: disks + value: + - autoDelete: true + boot: true + initializeParams: + diskSizeGb: 10 + sourceImage: https://compute.googleapis.com/compute/v1/projects/debian-cloud/global/images/family/debian-12 + mode: READ_WRITE + type: PERSISTENT + - name: network_interfaces + value: + - subnetwork: "{{ subnet_link }}" + accessConfigs: + - name: external-nat + type: ONE_TO_ONE_NAT + natIP: "{{ address }}" + - name: tags + value: + items: + - "{{ stack_name }}" + - http-server + - name: metadata + value: + items: + - key: startup-script + value: "#!/bin/bash\\napt-get update\\napt-get install -y apache2\\nsystemctl start apache2\\nsystemctl enable apache2\\necho {{ web_server_code_base64 }} | base64 -d > /var/www/html/index.html\\n" + exports: + - instance_name + - name: get_web_server_url + type: query + props: [] + exports: + - web_server_url diff --git a/examples/google/k8s-the-hard-way/README.md b/examples/google/k8s-the-hard-way/README.md index 2d61772..8df6f6f 100644 --- a/examples/google/k8s-the-hard-way/README.md +++ b/examples/google/k8s-the-hard-way/README.md @@ -16,51 +16,7 @@ Based upon the [Kubernetes the Hard Way](https://github.com/kelseyhightower/kube `stackql-deploy` is installed as a python based CLI using... ```bash -pip install stackql-deploy +install stackql-deploy from https://github.com/stackql/stackql-deploy-rs/releases # or pip3 install stackql-deploy ``` -> __Note for macOS users__ -> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: -> ```bash -> python3 -m venv myenv -> source myenv/bin/activate -> pip install stackql-deploy -> ``` - -## getting started with `stackql-deploy` - -Once installed, use the `init` command to scaffold a sample project directory to get started: - -```bash -stackql-deploy init k8s-the-hard-way -``` - -this will create a directory named `k8s-the-hard-way` which can be updated for your stack, as you can see in this project. - -## deploying using `stackql-deploy` - -```bash -export GOOGLE_CREDENTIALS=$(cat ./testcreds/k8s-the-hard-way-project-demo-service-account.json) -# deploy a stack -stackql-deploy build \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run \ ---log-level DEBUG - -# test a stack -stackql-deploy test \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run - -# teardown a stack -stackql-deploy teardown \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run -``` diff --git a/examples/google/load-balanced-vms/README.md b/examples/google/load-balanced-vms/README.md index b7174e7..095eff4 100644 --- a/examples/google/load-balanced-vms/README.md +++ b/examples/google/load-balanced-vms/README.md @@ -18,55 +18,7 @@ Based upon the [__terraform-google-load-balanced-vms__](https://github.com/Googl `stackql-deploy` is installed as a python based CLI using... ```bash -pip install stackql-deploy +install stackql-deploy from https://github.com/stackql/stackql-deploy-rs/releases # or pip3 install stackql-deploy ``` -> __Note for macOS users__ -> to install `stackql-deploy` in a virtual environment (which may be necessary on __macOS__), use the following: -> ```bash -> python3 -m venv myenv -> source myenv/bin/activate -> pip install stackql-deploy -> ``` - -## getting started with `stackql-deploy` - -Once installed, use the `init` command to scaffold a sample project directory to get started: - -```bash -stackql-deploy init load-balanced-vms -``` - -this will create a directory named `load-balanced-vms` which can be updated for your stack, as you can see in this project. - -## deploying using `stackql-deploy` - -```bash -export GOOGLE_CREDENTIALS=$(cat ./testcreds/stackql-deploy-project-demo-service-account.json) -# deploy a stack -stackql-deploy build \ -examples\google\load-balanced-vms \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run \ ---log-level DEBUG - -# test a stack -stackql-deploy test \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run - -# teardown a stack -stackql-deploy teardown \ -examples/google/k8s-the-hard-way \ -dev \ --e GOOGLE_PROJECT=stackql-k8s-the-hard-way-demo \ ---dry-run -``` - - - -stackql-deploy-project \ No newline at end of file diff --git a/examples/snowflake/entitlements/README.md b/examples/snowflake/entitlements/README.md index 0c01cf6..829b567 100644 --- a/examples/snowflake/entitlements/README.md +++ b/examples/snowflake/entitlements/README.md @@ -15,7 +15,7 @@ __`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-C ## Prerequisites -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `snowflake` provider, `SNOWFLAKE_PAT` must be set, for more information on authentication to `snowflake` see the [`snowflake` provider documentation](https://snowflake.stackql.io/providers/snowflake). +This example requires `stackql-deploy` to be installed. Pre-built binaries are available from the [releases page](https://github.com/stackql/stackql-deploy-rs/releases). See`stackql-deploy` ([releases](https://github.com/stackql/stackql-deploy-rs/releases))__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `snowflake` provider, `SNOWFLAKE_PAT` must be set, for more information on authentication to `snowflake` see the [`snowflake` provider documentation](https://snowflake.stackql.io/providers/snowflake). ## Usage diff --git a/src/commands/base.rs b/src/commands/base.rs index 10b7cd2..a7abd02 100644 --- a/src/commands/base.rs +++ b/src/commands/base.rs @@ -24,6 +24,7 @@ use crate::core::utils::{ use crate::resource::manifest::{Manifest, Resource}; use crate::resource::validation::validate_manifest; use crate::template::engine::TemplateEngine; +use crate::utils::display::{print_unicode_box, BorderColor}; use crate::utils::pgwire::PgwireLite; /// Core state for all command operations, equivalent to Python's StackQLBase. @@ -237,9 +238,9 @@ impl CommandRunner { if delete_test { if exists { - info!("[{}] still exists", resource.name); - } else { info!("[{}] confirmed deleted", resource.name); + } else { + info!("[{}] still exists after post-delete check", resource.name); } } else if exists { info!("[{}] exists", resource.name); @@ -482,21 +483,32 @@ impl CommandRunner { } } - /// Delete a resource. + /// Delete a resource and confirm deletion with an interleaved + /// delete-check-retry loop. + /// + /// When `delete_retries > 0` the loop is: + /// 1. Execute DELETE + /// 2. Run exists query — count==0 → done, count==1 → continue, else → error + /// 3. Wait `delete_retry_delay` seconds + /// 4. Run exists query again — count==0 → done, count==1 → re-delete + /// ... repeat up to `delete_retries` times + /// + /// When `delete_retries == 0`: single delete + single check, no retry. /// - /// Returns `Some(first_row)` when the delete query included `RETURNING *` - /// and the provider returned data; `None` otherwise. + /// Returns the RETURNING * row (if any) from the first successful delete. #[allow(clippy::too_many_arguments)] - pub fn delete_resource( + pub fn delete_and_confirm( &mut self, resource: &Resource, delete_query: &str, - retries: u32, - retry_delay: u32, + exists_query: &str, + delete_retries: u32, + delete_retry_delay: u32, dry_run: bool, show_queries: bool, ignore_errors: bool, - ) -> Option> { + ) -> (Option>, bool) { + // --- dry run path --- if dry_run { if has_returning_clause(delete_query) { info!( @@ -509,33 +521,190 @@ impl CommandRunner { resource.name, delete_query ); } - return None; + return (None, true); } - info!("deleting [{}]...", resource.name); - show_query(show_queries, delete_query); + let mut returning_row: Option> = None; + + // Helper closure: execute the DELETE statement once (no retries on the + // DML itself — retries are handled by the outer loop). + let execute_delete = |client: &mut crate::utils::pgwire::PgwireLite, + query: &str, + res_name: &str, + sq: bool, + ignore: bool| { + info!("deleting [{}]...", res_name); + show_query(sq, query); + if has_returning_clause(query) { + let (msg, row) = run_stackql_dml_returning(query, client, ignore, 0, 0); + debug!("Delete response: {}", msg); + row + } else { + let msg = run_stackql_command(query, client, ignore, 0, 0); + debug!("Delete response: {}", msg); + None + } + }; - if has_returning_clause(delete_query) { - let (msg, returning_row) = run_stackql_dml_returning( - delete_query, + // Helper closure: run the exists query and return the count. + // Returns Ok(count) or Err(msg) for unexpected results. + let run_exists_count = |client: &mut crate::utils::pgwire::PgwireLite, + query: &str, + res_name: &str, + sq: bool| + -> Result { + info!("running post-delete check for [{}]...", res_name); + show_query(sq, query); + let result = run_stackql_query(query, client, true, 0, 5); + if result.is_empty() { + return Ok(0); // no rows → resource gone + } + if result[0].contains_key("_stackql_deploy_error") || result[0].contains_key("error") { + return Ok(0); // error querying → treat as gone + } + if let Some(count_str) = result[0].get("count") { + if let Ok(count) = count_str.parse::() { + return Ok(count); + } + } + // No count field — check if all field values are null/empty + // (resource gone) or any non-null value (resource still exists). + let row = &result[0]; + let all_null = row.values().all(|v| v == "null" || v.is_empty()); + if all_null { + Ok(0) // all null/empty → resource gone + } else { + Ok(1) // non-null value → resource still exists + } + }; + + // --- no-retry path: single delete + single check --- + if delete_retries == 0 { + let row = execute_delete( &mut self.client, + delete_query, + &resource.name, + show_queries, ignore_errors, - retries, - retry_delay, ); - debug!("Delete response: {}", msg); - returning_row - } else { - let msg = run_stackql_command( - delete_query, + if returning_row.is_none() { + returning_row = row; + } + match run_exists_count(&mut self.client, exists_query, &resource.name, show_queries) { + Ok(0) => { + info!("[{}] confirmed deleted", resource.name); + return (returning_row, true); + } + Ok(1) => { + info!( + "[{}] delete dispatched (resource may still be deleting asynchronously)", + resource.name + ); + return (returning_row, false); + } + Ok(n) => { + catch_error_and_exit(&format!( + "Post-delete exists query for [{}] returned count={} (expected 0 or 1). \ + This indicates a query or logic error.", + resource.name, n + )); + } + Err(msg) => { + catch_error_and_exit(&msg); + } + } + } + + // --- retry path: interleaved delete + check loop --- + let start = std::time::Instant::now(); + + for attempt in 0..delete_retries { + // Step 1: execute DELETE + let row = execute_delete( &mut self.client, + delete_query, + &resource.name, + show_queries, ignore_errors, - retries, - retry_delay, ); - debug!("Delete response: {}", msg); - None + if returning_row.is_none() { + returning_row = row; + } + + // Step 2: immediate post-delete check + match run_exists_count(&mut self.client, exists_query, &resource.name, show_queries) { + Ok(0) => { + info!("[{}] confirmed deleted", resource.name); + return (returning_row, true); + } + Ok(1) => { + let elapsed = start.elapsed().as_secs(); + info!( + "[{}] still exists after delete, attempt {}/{} ({} seconds elapsed)", + resource.name, + attempt + 1, + delete_retries, + elapsed + ); + } + Ok(n) => { + catch_error_and_exit(&format!( + "Post-delete exists query for [{}] returned count={} (expected 0 or 1). \ + This indicates a query or logic error.", + resource.name, n + )); + } + Err(msg) => { + catch_error_and_exit(&msg); + } + } + + // Step 3: wait retry_delay + if delete_retry_delay > 0 { + info!( + "[{}] waiting {} seconds before next attempt...", + resource.name, delete_retry_delay + ); + std::thread::sleep(std::time::Duration::from_secs(delete_retry_delay as u64)); + } + + // Step 4: check again after the delay (maybe it cleaned up) + match run_exists_count(&mut self.client, exists_query, &resource.name, show_queries) { + Ok(0) => { + info!("[{}] confirmed deleted", resource.name); + return (returning_row, true); + } + Ok(1) => { + let elapsed = start.elapsed().as_secs(); + info!( + "[{}] still exists after delay, attempt {}/{} ({} seconds elapsed), re-issuing delete...", + resource.name, + attempt + 1, + delete_retries, + elapsed + ); + // Loop continues → next iteration will re-issue DELETE + } + Ok(n) => { + catch_error_and_exit(&format!( + "Post-delete exists query for [{}] returned count={} (expected 0 or 1). \ + This indicates a query or logic error.", + resource.name, n + )); + } + Err(msg) => { + catch_error_and_exit(&msg); + } + } } + + // Exhausted all retries + let elapsed = start.elapsed().as_secs(); + info!( + "[{}] delete could not be confirmed after {} attempts ({} seconds elapsed)", + resource.name, delete_retries, elapsed + ); + (returning_row, false) } // ----------------------------------------------------------------------- @@ -644,7 +813,13 @@ impl CommandRunner { info!("running command..."); show_query(show_queries, command_query); - run_stackql_command(command_query, &mut self.client, false, retries, retry_delay); + let result = + run_stackql_command(command_query, &mut self.client, false, retries, retry_delay); + if result.is_empty() { + debug!("Command response: no response"); + } else { + debug!("Command response:\n\n{}\n", result); + } } /// Process exports for a resource. @@ -909,20 +1084,17 @@ impl CommandRunner { output_file: Option<&str>, elapsed_time: &str, ) { - let output_file = match output_file { - Some(f) => f, - None => return, - }; - - info!("Processing stack exports..."); - let manifest_exports = &self.manifest.exports; + if manifest_exports.is_empty() { + return; + } + if dry_run { - let total_vars = manifest_exports.len() + 3; // +3 for stack_name, stack_env, elapsed_time + let total_vars = manifest_exports.len() + 3; info!( - "dry run: would export {} variables to {} (including automatic stack_name, stack_env, and elapsed_time)", - total_vars, output_file + "dry run: would export {} variables (including automatic stack_name, stack_env, and elapsed_time)", + total_vars ); return; } @@ -946,7 +1118,6 @@ impl CommandRunner { } if let Some(value) = self.global_context.get(var_name) { - // Try to parse as JSON if value.starts_with('[') || value.starts_with('{') { if let Ok(parsed) = serde_json::from_str::(value) { export_data.insert(var_name.clone(), parsed); @@ -972,30 +1143,115 @@ impl CommandRunner { serde_json::Value::String(elapsed_time.to_string()), ); - // Ensure directory exists - if let Some(parent) = Path::new(output_file).parent() { - if !parent.as_os_str().is_empty() && !parent.exists() { - if let Err(e) = fs::create_dir_all(parent) { - catch_error_and_exit(&format!( - "Failed to create directory for output file: {}", - e - )); + // Display stack exports table + print_unicode_box("stack exports", BorderColor::Cyan); + + // Build ASCII table + // Env var names: STACKQL_DEPLOY______ (hyphens -> underscores) + let sanitize = |s: &str| s.replace('-', "_"); + let prefix = format!( + "STACKQL_DEPLOY__{}__{}__", + sanitize(&self.stack_name), + sanitize(&self.stack_env) + ); + let mut rows: Vec<(String, String)> = Vec::new(); + let mut max_name_len = 8usize; // "variable" header + for (key, val) in &export_data { + let fq_name = format!("{}{}", prefix, sanitize(key)); + let val_str = match val { + serde_json::Value::String(s) => s.clone(), + other => other.to_string(), + }; + max_name_len = max_name_len.max(fq_name.len()); + rows.push((fq_name, val_str)); + } + let max_val_len = rows + .iter() + .map(|(_, v)| v.len()) + .max() + .unwrap_or(5) + .clamp(5, 80); // cap value display width + + let sep = format!( + "+-{}-+-{}-+", + "-".repeat(max_name_len), + "-".repeat(max_val_len) + ); + println!("{}", sep); + println!( + "| {: max_val_len { + format!("{}...", &val[..max_val_len - 3]) + } else { + val.clone() + }; + println!( + "| {: { + info!("{} variables written to {}", rows.len(), exports_file); + println!(); + println!("To load these variables into your shell:"); + if cfg!(target_os = "windows") { + println!( + " PowerShell: Get-Content {} | ForEach-Object {{ Invoke-Expression $_ }}", + exports_file + ); + println!(" Git Bash: source {}", exports_file); + } else { + println!(" source {}", exports_file); } + println!(); + } + Err(e) => { + error!("Failed to write exports file {}: {}", exports_file, e); } } - // Write JSON file - let json = serde_json::Value::Object(export_data.clone()); - match fs::write(output_file, serde_json::to_string_pretty(&json).unwrap()) { - Ok(_) => info!( - "Exported {} variables to {}", - export_data.len(), - output_file - ), - Err(e) => catch_error_and_exit(&format!( - "Failed to write exports file {}: {}", - output_file, e - )), + // Write JSON file if --output-file was specified + if let Some(output_file) = output_file { + if let Some(parent) = Path::new(output_file).parent() { + if !parent.as_os_str().is_empty() && !parent.exists() { + if let Err(e) = fs::create_dir_all(parent) { + catch_error_and_exit(&format!( + "Failed to create directory for output file: {}", + e + )); + } + } + } + + let json = serde_json::Value::Object(export_data); + match fs::write(output_file, serde_json::to_string_pretty(&json).unwrap()) { + Ok(_) => info!("Exports also written to {}", output_file), + Err(e) => catch_error_and_exit(&format!( + "Failed to write exports file {}: {}", + output_file, e + )), + } } } } diff --git a/src/commands/build.rs b/src/commands/build.rs index fc40caa..14867b3 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -722,6 +722,14 @@ fn run_build( } } + // If exports wasn't rendered yet (e.g. no exists query to + // trigger it), try now — the context may already contain all + // the variables the exports template needs. + if exports_query_str.is_none() { + exports_query_str = + render_exports!(runner, resource_queries, resource, &full_context); + } + debug!( "post-deploy for [{}]: is_correct_state={}, has_statecheck={}, exports_query_str={}", resource.name, @@ -824,6 +832,21 @@ fn run_build( } } + // If the resource has an exports anchor but we never resolved the query, + // that's a fatal error - variables that can't be resolved at this point + // indicate a missing dependency or misconfigured template. + if exports_query_str.is_none() + && resource_queries.contains_key("exports") + && !resource.exports.is_empty() + && !dry_run + { + catch_error_and_exit(&format!( + "exports query for [{}] could not be rendered - unresolved template variables. \ + Check that all referenced variables are defined in the manifest or exported by prior resources.", + resource.name + )); + } + if !dry_run { if res_type == "resource" { info!("successfully deployed {}", resource.name); diff --git a/src/commands/teardown.rs b/src/commands/teardown.rs index 849b8ca..f9c3a1e 100644 --- a/src/commands/teardown.rs +++ b/src/commands/teardown.rs @@ -224,43 +224,26 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ let resource_queries = runner.get_queries(resource, &full_context); // Get exists query (fallback to statecheck) - render JIT - let ( - exists_query_str, - exists_retries, - exists_retry_delay, - _postdelete_retries, - _postdelete_retry_delay, - ) = if let Some(eq) = resource_queries.get("exists") { - let rendered = - runner.render_query(&resource.name, "exists", &eq.template, &full_context); - ( - rendered, - eq.options.retries, - eq.options.retry_delay, - eq.options.postdelete_retries, - eq.options.postdelete_retry_delay, - ) - } else if let Some(sq) = resource_queries.get("statecheck") { - info!( - "exists query not defined for [{}], trying statecheck query as exists query.", - resource.name - ); - let rendered = - runner.render_query(&resource.name, "statecheck", &sq.template, &full_context); - ( - rendered, - sq.options.retries, - sq.options.retry_delay, - sq.options.postdelete_retries, - sq.options.postdelete_retry_delay, - ) - } else { - info!( - "No exists or statecheck query for [{}], skipping...", - resource.name - ); - continue; - }; + let (exists_query_str, exists_retries, exists_retry_delay) = + if let Some(eq) = resource_queries.get("exists") { + let rendered = + runner.render_query(&resource.name, "exists", &eq.template, &full_context); + (rendered, eq.options.retries, eq.options.retry_delay) + } else if let Some(sq) = resource_queries.get("statecheck") { + info!( + "exists query not defined for [{}], trying statecheck query as exists query.", + resource.name + ); + let rendered = + runner.render_query(&resource.name, "statecheck", &sq.template, &full_context); + (rendered, sq.options.retries, sq.options.retry_delay) + } else { + info!( + "No exists or statecheck query for [{}], skipping...", + resource.name + ); + continue; + }; // Check if delete query template exists (don't render yet — may need // this.* fields from the exists check). @@ -307,9 +290,10 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ // Delete if resource_exists { - let returning_row = runner.delete_resource( + let (returning_row, delete_confirmed) = runner.delete_and_confirm( resource, &delete_query, + &exists_query_str, delete_retries, delete_retry_delay, dry_run, @@ -353,6 +337,12 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ ); } } + + if delete_confirmed { + info!("successfully deleted {}", resource.name); + } else { + info!("[{}] delete could not be confirmed", resource.name); + } } else { info!( "resource [{}] does not exist, skipping delete", @@ -360,28 +350,6 @@ fn run_teardown(runner: &mut CommandRunner, dry_run: bool, show_queries: bool, _ ); continue; } - - // Confirm deletion - single check, don't poll excessively. - // Cloud Control deletes are async; if the resource is still - // visible on the first check that's expected, move on. - let (still_exists, _) = runner.check_if_resource_exists( - resource, - &exists_query_str, - 1, - 0, - dry_run, - show_queries, - true, // delete_test - ); - - if !still_exists { - info!("successfully deleted {}", resource.name); - } else { - info!( - "[{}] delete dispatched (resource may still be deleting asynchronously)", - resource.name - ); - } } let elapsed = start_time.elapsed(); diff --git a/src/commands/test.rs b/src/commands/test.rs index 094f838..aef7450 100644 --- a/src/commands/test.rs +++ b/src/commands/test.rs @@ -297,7 +297,5 @@ fn run_test( let elapsed_str = format!("{:.2?}", elapsed); info!("test completed in {}", elapsed_str); - if let Some(of) = output_file { - runner.process_stack_exports(dry_run, Some(of), &elapsed_str); - } + runner.process_stack_exports(dry_run, output_file, &elapsed_str); } diff --git a/src/core/errors.rs b/src/core/errors.rs new file mode 100644 index 0000000..03a9370 --- /dev/null +++ b/src/core/errors.rs @@ -0,0 +1,122 @@ +//! Fatal error detection for StackQL query execution. +//! +//! Maintains a list of error patterns that indicate unrecoverable failures +//! (network issues, auth failures, etc.) vs normal operational errors +//! (404 not found) that the retry/statecheck logic can handle. + +/// Error patterns that indicate a fatal, non-retryable failure. +/// +/// These are checked against the error message string returned by the +/// StackQL engine. If any pattern matches, the operation is aborted +/// immediately rather than retried. +/// +/// Two categories: +/// +/// 1. **Network errors** - The request never reached the API. Any result +/// from a query in this state is untrustworthy (e.g., an exists check +/// returning empty could cause a duplicate resource to be created). +/// +/// 2. **HTTP status errors** - The request reached the API but the response +/// indicates an unrecoverable problem (auth failure, forbidden, etc.). +/// 404 is explicitly excluded as it's normal for exists checks. +const FATAL_ERROR_PATTERNS: &[&str] = &[ + // Network-layer errors (Go net/http) + "dial tcp:", + "Client.Timeout exceeded", + "connection refused", + "no such host", + "request canceled while waiting for connection", + "request canceled (Client.Timeout", + "tls: handshake", + "certificate", + "network is unreachable", + "connection reset by peer", + "broken pipe", + "EOF", + // HTTP status codes that are never retryable + "http response status code: 401", + "http response status code: 403", +]; + +/// Patterns that indicate a non-fatal error, even if a fatal pattern +/// also matches. These take precedence over `FATAL_ERROR_PATTERNS`. +/// +/// For example, a 404 is normal for exists checks on resources that +/// don't exist yet. +const NON_FATAL_OVERRIDES: &[&str] = &[ + "http response status code: 404", + "ResourceNotFoundException", + "was not found", +]; + +/// Check if an error message indicates a fatal, non-retryable failure. +/// +/// Returns `Some(reason)` if the error is fatal, `None` if it's +/// a normal operational error that can be retried or handled. +pub fn check_fatal_error(error_msg: &str) -> Option<&'static str> { + // First check if any non-fatal override matches + for pattern in NON_FATAL_OVERRIDES { + if error_msg.contains(pattern) { + return None; + } + } + + // Then check for fatal patterns + FATAL_ERROR_PATTERNS + .iter() + .find(|&&pattern| error_msg.contains(pattern)) + .copied() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_network_timeout_is_fatal() { + let msg = r#"Query execution failed: query returns error: Post "https://cloudcontrolapi.us-east-1.amazonaws.com/?Action=GetResource&Version=2021-09-30": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)"#; + assert!(check_fatal_error(msg).is_some()); + } + + #[test] + fn test_dns_failure_is_fatal() { + let msg = r#"Query execution failed: query returns error: Post "https://cloudcontrolapi.us-east-1.amazonaws.com/": dial tcp: lookup cloudcontrolapi.us-east-1.amazonaws.com on 8.8.4.4:53: i/o timeout"#; + assert!(check_fatal_error(msg).is_some()); + } + + #[test] + fn test_403_is_fatal() { + let msg = r#"http response status code: 403, response body: {"message":"Access Denied"}"#; + assert!(check_fatal_error(msg).is_some()); + } + + #[test] + fn test_401_is_fatal() { + let msg = r#"http response status code: 401, response body: {"message":"Unauthorized"}"#; + assert!(check_fatal_error(msg).is_some()); + } + + #[test] + fn test_404_is_not_fatal() { + let msg = r#"http response status code: 404, response body: {"__type":"ResourceNotFoundException","Message":"Resource not found"}"#; + assert!(check_fatal_error(msg).is_none()); + } + + #[test] + fn test_resource_not_found_is_not_fatal() { + let msg = r#"Resource of type 'AWS::EC2::VPC' with identifier 'vpc-xxx' was not found"#; + assert!(check_fatal_error(msg).is_none()); + } + + #[test] + fn test_400_bad_request_is_not_fatal() { + let msg = r#"insert over HTTP error: 400 Bad Request"#; + assert!(check_fatal_error(msg).is_none()); + } + + #[test] + fn test_normal_query_error_is_not_fatal() { + let msg = r#"query returns error: no such column: foo"#; + assert!(check_fatal_error(msg).is_none()); + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 7ae50d1..a80ccbc 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -7,5 +7,6 @@ pub mod config; pub mod env; +pub mod errors; pub mod templating; pub mod utils; diff --git a/src/core/templating.rs b/src/core/templating.rs index 0dfb743..4a397db 100644 --- a/src/core/templating.rs +++ b/src/core/templating.rs @@ -300,6 +300,17 @@ pub fn render_query( let template_name = format!("{}__{}", res_name, anchor); match engine.render_with_filters(&template_name, &processed_query, &ctx) { Ok(rendered) => { + // Check for unresolved template variables in the final rendered output + let unresolved_re = Regex::new(r"\{\{[^}]+\}\}").unwrap(); + if let Some(m) = unresolved_re.find(&rendered) { + crate::core::utils::catch_error_and_exit(&format!( + "Unresolved template variable in [{}] [{}]: '{}'\n\nRendered query:\n{}\n", + res_name, + anchor, + m.as_str(), + rendered + )); + } debug!( "Rendered [{}] [{}] query:\n\n{}\n", res_name, anchor, rendered @@ -373,6 +384,15 @@ pub fn try_render_query( let template_name = format!("{}__{}", res_name, anchor); match engine.render_with_filters(&template_name, &processed_query, &ctx) { Ok(rendered) => { + // Check for unresolved template variables + let unresolved_re = Regex::new(r"\{\{[^}]+\}\}").unwrap(); + if unresolved_re.is_match(&rendered) { + debug!( + "Unresolved variables in [{}] [{}], deferring render", + res_name, anchor + ); + return None; + } debug!( "Rendered [{}] [{}] query:\n\n{}\n", res_name, anchor, rendered diff --git a/src/core/utils.rs b/src/core/utils.rs index d17c840..8f20085 100644 --- a/src/core/utils.rs +++ b/src/core/utils.rs @@ -13,6 +13,7 @@ use std::time::{Duration, Instant}; use log::{debug, error, info}; +use crate::core::errors::check_fatal_error; use crate::utils::pgwire::PgwireLite; use crate::utils::query::{execute_query, QueryResult}; @@ -95,6 +96,13 @@ pub fn run_stackql_query( if !result_maps.is_empty() { if let Some(err) = result_maps[0].get("error") { last_error = Some(err.clone()); + // Check for fatal errors even when suppressing + if let Some(pattern) = check_fatal_error(err) { + catch_error_and_exit(&format!( + "Fatal error (matched '{}'):\n\n{}\n", + pattern, err + )); + } if !suppress_errors { if attempt == retries { catch_error_and_exit(&format!( @@ -155,6 +163,13 @@ pub fn run_stackql_query( Err(e) => { last_error = Some(e.clone()); debug!("Query error on attempt {}: {}", attempt + 1, e); + // Check for fatal errors (network, auth) that should not be retried + if let Some(pattern) = check_fatal_error(&e) { + catch_error_and_exit(&format!( + "Fatal error (matched '{}'):\n\n{}\n", + pattern, e + )); + } if attempt == retries && !suppress_errors { catch_error_and_exit(&format!( "Exception during stackql query execution:\n\n{}\n", @@ -212,7 +227,11 @@ pub fn run_stackql_command( match execute_query(&processed_command, client) { Ok(result) => { match result { - QueryResult::Data { notices, .. } => { + QueryResult::Data { + notices, + columns, + rows, + } => { // Check for errors in notices for notice in ¬ices { if error_detected_in_notice(notice) && !ignore_errors { @@ -232,9 +251,31 @@ pub fn run_stackql_command( } } } + // Log returned data (e.g. from RETURNING clause) at debug level + if !rows.is_empty() { + let col_names: Vec<&str> = + columns.iter().map(|c| c.name.as_str()).collect(); + let result_maps: Vec> = rows + .iter() + .map(|row| { + col_names + .iter() + .enumerate() + .map(|(i, &name)| { + let val = + row.values.get(i).cloned().unwrap_or_default(); + (name.to_string(), val) + }) + .collect() + }) + .collect(); + if let Ok(json) = serde_json::to_string_pretty(&result_maps) { + debug!("Command returned data:\n\n{}\n", json); + } + } let msg = notices.join("\n"); if !msg.is_empty() { - debug!("Stackql command executed successfully:\n\n{}\n", msg); + debug!("Command notices:\n\n{}\n", msg); } return msg; } @@ -249,6 +290,13 @@ pub fn run_stackql_command( } } Err(e) => { + // Check for fatal errors (network, auth) before retrying + if let Some(pattern) = check_fatal_error(&e) { + catch_error_and_exit(&format!( + "Fatal error (matched '{}'):\n\n{}\n", + pattern, e + )); + } if !ignore_errors { if attempt < retries { debug!( @@ -759,6 +807,13 @@ pub fn run_stackql_dml_returning( } }, Err(e) => { + // Check for fatal errors (network, auth) before retrying + if let Some(pattern) = check_fatal_error(&e) { + catch_error_and_exit(&format!( + "Fatal error (matched '{}'):\n\n{}\n", + pattern, e + )); + } if !ignore_errors { if attempt < retries { debug!( diff --git a/template-hub/azure/starter/README.md.template b/template-hub/azure/starter/README.md.template index 070996a..3f59296 100644 --- a/template-hub/azure/starter/README.md.template +++ b/template-hub/azure/starter/README.md.template @@ -15,7 +15,7 @@ __`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-C ## Prerequisites -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `azure` provider, `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET` must be set (or their must be an authenticated session on the host using `az login`), for more information on authentication to `azure` see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). +This example requires `stackql-deploy` to be installed. Pre-built binaries are available from the [releases page](https://github.com/stackql/stackql-deploy-rs/releases). See`stackql-deploy` ([releases](https://github.com/stackql/stackql-deploy-rs/releases))__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `azure` provider, `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET` must be set (or their must be an authenticated session on the host using `az login`), for more information on authentication to `azure` see the [`azure` provider documentation](https://azure.stackql.io/providers/azure). ## Usage diff --git a/template-hub/google/starter/README.md.template b/template-hub/google/starter/README.md.template index 162d1f1..96fe42b 100644 --- a/template-hub/google/starter/README.md.template +++ b/template-hub/google/starter/README.md.template @@ -15,7 +15,7 @@ __`stackql-deploy`__ is a stateless, declarative, SQL driven Infrastructure-as-C ## Prerequisites -This example requires `stackql-deploy` to be installed using __`pip install stackql-deploy`__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `google` provider, `GOOGLE_CREDENTIALS` needs to be set at runtime (from the local machine using export GOOGLE_CREDENTIALS=cat creds/my-sa-key.json for example or as a CI variable/secret). +This example requires `stackql-deploy` to be installed. Pre-built binaries are available from the [releases page](https://github.com/stackql/stackql-deploy-rs/releases). See`stackql-deploy` ([releases](https://github.com/stackql/stackql-deploy-rs/releases))__. The host used to run `stackql-deploy` needs the necessary environment variables set to authenticate to your specific provider, in the case of the `google` provider, `GOOGLE_CREDENTIALS` needs to be set at runtime (from the local machine using export GOOGLE_CREDENTIALS=cat creds/my-sa-key.json for example or as a CI variable/secret). ## Usage diff --git a/tests/pgwire_replace_test.rs b/tests/pgwire_replace_test.rs deleted file mode 100644 index 798aa0d..0000000 --- a/tests/pgwire_replace_test.rs +++ /dev/null @@ -1,303 +0,0 @@ -//! Manual test: REPLACE ... RETURNING over pgwire -//! -//! Requires a running stackql server on localhost:5444 with -//! databricks_account provider configured. -//! -//! Run with: -//! cargo test --test pgwire_replace_test -- --nocapture --ignored - -use std::collections::HashMap; -use std::io::{Read, Write}; -use std::net::TcpStream; - -fn read_byte(stream: &mut TcpStream) -> u8 { - let mut buf = [0u8; 1]; - stream.read_exact(&mut buf).unwrap(); - buf[0] -} - -fn read_i32(stream: &mut TcpStream) -> i32 { - let mut buf = [0u8; 4]; - stream.read_exact(&mut buf).unwrap(); - i32::from_be_bytes(buf) -} - -fn read_bytes(stream: &mut TcpStream, n: usize) -> Vec { - let mut buf = vec![0u8; n]; - stream.read_exact(&mut buf).unwrap(); - buf -} - -fn parse_error_fields(data: &[u8]) -> HashMap { - let mut fields = HashMap::new(); - let mut pos = 0; - while pos < data.len() { - let field_type = data[pos]; - if field_type == 0 { - break; - } - pos += 1; - let end = data[pos..] - .iter() - .position(|&b| b == 0) - .unwrap_or(data.len() - pos); - let value = String::from_utf8_lossy(&data[pos..pos + end]).to_string(); - let key = match field_type { - b'S' => "severity", - b'V' => "severity_v", - b'C' => "code", - b'M' => "message", - b'D' => "detail", - b'H' => "hint", - b'P' => "position", - b'W' => "where", - _ => "unknown", - }; - fields.insert(key.to_string(), value); - pos += end + 1; - } - fields -} - -fn startup(stream: &mut TcpStream) { - const PROTOCOL_V3: i32 = 196608; - let params = b"user\0stackql\0database\0stackql\0\0"; - let total_len = 4 + 4 + params.len(); - let mut msg = Vec::with_capacity(total_len); - msg.extend_from_slice(&(total_len as i32).to_be_bytes()); - msg.extend_from_slice(&PROTOCOL_V3.to_be_bytes()); - msg.extend_from_slice(params); - stream.write_all(&msg).unwrap(); - - loop { - let msg_type = read_byte(stream); - let payload_len = read_i32(stream) as usize; - let _data = read_bytes(stream, payload_len.saturating_sub(4)); - match msg_type { - b'Z' => break, - b'E' => { - let fields = parse_error_fields(&_data); - panic!("Startup error: {:?}", fields); - } - _ => {} - } - } - println!(" [startup] Connected and ready"); -} - -fn send_query(stream: &mut TcpStream, sql: &str) { - let sql_bytes = sql.as_bytes(); - let payload_len = 4 + sql_bytes.len() + 1; - let mut msg = Vec::with_capacity(1 + payload_len); - msg.push(b'Q'); - msg.extend_from_slice(&(payload_len as i32).to_be_bytes()); - msg.extend_from_slice(sql_bytes); - msg.push(0u8); - stream.write_all(&msg).unwrap(); -} - -struct QueryResponse { - columns: Vec, - rows: Vec>, - notices: Vec, - errors: Vec>, - command_tag: Option, -} - -fn read_response(stream: &mut TcpStream) -> QueryResponse { - let mut columns = Vec::new(); - let mut rows = Vec::new(); - let mut notices = Vec::new(); - let mut errors = Vec::new(); - let mut command_tag = None; - - loop { - let msg_type = read_byte(stream); - let payload_len = read_i32(stream) as usize; - let data = read_bytes(stream, payload_len.saturating_sub(4)); - - match msg_type { - b'T' => { - // RowDescription - let num_fields = u16::from_be_bytes([data[0], data[1]]) as usize; - let mut pos = 2; - columns.clear(); - for _ in 0..num_fields { - let null_off = data[pos..].iter().position(|&b| b == 0).unwrap(); - let name = String::from_utf8_lossy(&data[pos..pos + null_off]).to_string(); - columns.push(name); - pos += null_off + 1 + 18; // skip field metadata - } - println!(" [T] RowDescription: {:?}", columns); - } - b'D' => { - // DataRow - let num_cols = u16::from_be_bytes([data[0], data[1]]) as usize; - let mut pos = 2; - let mut row = Vec::new(); - for _ in 0..num_cols { - let col_len = i32::from_be_bytes(data[pos..pos + 4].try_into().unwrap()); - pos += 4; - if col_len < 0 { - row.push("NULL".to_string()); - } else { - let val = - String::from_utf8_lossy(&data[pos..pos + col_len as usize]).to_string(); - row.push(val); - pos += col_len as usize; - } - } - println!(" [D] DataRow: {:?}", row); - rows.push(row); - } - b'C' => { - // CommandComplete - let tag = - String::from_utf8_lossy(data.strip_suffix(b"\0").unwrap_or(&data)).to_string(); - println!(" [C] CommandComplete: {}", tag); - command_tag = Some(tag); - } - b'N' => { - // NoticeResponse - let fields = parse_error_fields(&data); - let msg = fields.get("message").cloned().unwrap_or_default(); - println!(" [N] Notice: {}", msg); - notices.push(msg); - } - b'E' => { - // ErrorResponse - let fields = parse_error_fields(&data); - let msg = fields.get("message").cloned().unwrap_or_default(); - println!(" [E] ERROR: {}", msg); - errors.push(fields); - } - b'I' => { - println!(" [I] EmptyQueryResponse"); - } - b'Z' => { - let status = if data.is_empty() { - '?' - } else { - data[0] as char - }; - println!(" [Z] ReadyForQuery (status={})", status); - break; - } - _ => { - println!( - " [{}] Unknown message ({} bytes)", - msg_type as char, - data.len() - ); - } - } - } - - QueryResponse { - columns, - rows, - notices, - errors, - command_tag, - } -} - -#[test] -#[ignore] -fn test_replace_returning_over_pgwire() { - println!("\n=== REPLACE ... RETURNING over pgwire test ===\n"); - - let mut stream = TcpStream::connect("localhost:5444") - .expect("Failed to connect to stackql server on localhost:5444"); - - startup(&mut stream); - - // Test 1: Simple SELECT to confirm connection works - println!("\n--- Test 1: Simple SELECT ---"); - send_query(&mut stream, "SELECT 1 as test_val;"); - let resp = read_response(&mut stream); - assert!(resp.errors.is_empty(), "Simple SELECT should not error"); - assert_eq!(resp.rows.len(), 1, "Should return 1 row"); - println!(" PASS: Simple SELECT works\n"); - - // Test 2: REPLACE ... RETURNING (first attempt) - let replace_sql = r#"REPLACE databricks_account.iam.workspace_assignment -SET -permissions = '["ADMIN"]' -WHERE -account_id = 'ebfcc5a9-9d49-4c93-b651-b3ee6cf1c9ce' -AND workspace_id = '7474653260057820' -AND principal_id = 82893155042608 -RETURNING -error, -permissions, -principal;"#; - - println!("--- Test 2: REPLACE ... RETURNING (attempt 1) ---"); - send_query(&mut stream, replace_sql); - let resp1 = read_response(&mut stream); - println!(" Errors: {}", resp1.errors.len()); - println!(" Rows: {}", resp1.rows.len()); - println!(" Notices: {}", resp1.notices.len()); - println!(" Command tag: {:?}", resp1.command_tag); - - if !resp1.errors.is_empty() { - println!(" ** FIRST ATTEMPT FAILED (reproduces the bug) **"); - for (i, err) in resp1.errors.iter().enumerate() { - println!(" Error {}: {:?}", i, err); - } - } else { - println!(" ** FIRST ATTEMPT SUCCEEDED **"); - } - - // Test 3: Same REPLACE ... RETURNING (second attempt) - println!("\n--- Test 3: REPLACE ... RETURNING (attempt 2) ---"); - send_query(&mut stream, replace_sql); - let resp2 = read_response(&mut stream); - println!(" Errors: {}", resp2.errors.len()); - println!(" Rows: {}", resp2.rows.len()); - println!(" Notices: {}", resp2.notices.len()); - println!(" Command tag: {:?}", resp2.command_tag); - - if !resp2.errors.is_empty() { - println!(" ** SECOND ATTEMPT ALSO FAILED **"); - for (i, err) in resp2.errors.iter().enumerate() { - println!(" Error {}: {:?}", i, err); - } - } else { - println!(" ** SECOND ATTEMPT SUCCEEDED **"); - } - - // Test 4: Simple INSERT ... RETURNING on a CC resource for comparison - println!("\n--- Test 4: Simple SELECT for sanity ---"); - send_query(&mut stream, "SELECT 1 as still_alive;"); - let resp3 = read_response(&mut stream); - assert!(resp3.errors.is_empty(), "Connection should still be alive"); - println!(" PASS: Connection still alive after REPLACE tests\n"); - - // Summary - println!("=== SUMMARY ==="); - println!( - " Attempt 1: {}", - if resp1.errors.is_empty() { - "SUCCESS" - } else { - "FAILED" - } - ); - println!( - " Attempt 2: {}", - if resp2.errors.is_empty() { - "SUCCESS" - } else { - "FAILED" - } - ); - if !resp1.errors.is_empty() && resp2.errors.is_empty() { - println!(" CONCLUSION: Bug reproduced - first attempt fails, second succeeds"); - } else if resp1.errors.is_empty() && resp2.errors.is_empty() { - println!(" CONCLUSION: Both succeeded - bug not reproduced this time"); - } else { - println!(" CONCLUSION: Unexpected pattern"); - } -} diff --git a/website/docs/getting-started.md b/website/docs/getting-started.md index c6af74d..d698579 100644 --- a/website/docs/getting-started.md +++ b/website/docs/getting-started.md @@ -10,44 +10,42 @@ unlisted: false --- import File from '/src/components/File'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; `stackql-deploy` is a model driven, declarative framework for provisioning, de-provisioning and testing cloud resources. Heard enough and ready to get started? Jump to a [__Quick Start__](#quick-start). ## Installing `stackql-deploy` -`stackql-deploy` is distributed as a standalone binary with no runtime dependencies required. - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; +`stackql-deploy` is distributed as a standalone binary with no runtime dependencies required. You can also download directly from your browser at [__get-stackql-deploy.io__](https://get-stackql-deploy.io). - + -**Apple Silicon (ARM64):** +The canonical install URL detects your OS and redirects to the latest release asset automatically: ```bash -curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-macos-arm64.tar.gz | tar xz +curl -L https://get-stackql-deploy.io | tar xzf - sudo mv stackql-deploy /usr/local/bin/ ``` -**Intel (x86_64):** +Or download a specific platform build: + +**macOS Universal (Apple Silicon + Intel):** ```bash -curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-macos-x86_64.tar.gz | tar xz +curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-macos-universal.tar.gz | tar xz sudo mv stackql-deploy /usr/local/bin/ ``` - - - -**x86_64:** +**Linux x86_64:** ```bash curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-linux-x86_64.tar.gz | tar xz sudo mv stackql-deploy /usr/local/bin/ ``` -**ARM64:** +**Linux ARM64:** ```bash curl -L https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-linux-arm64.tar.gz | tar xz @@ -60,7 +58,7 @@ sudo mv stackql-deploy /usr/local/bin/ **PowerShell:** ```powershell -Invoke-WebRequest -Uri "https://github.com/stackql/stackql-deploy-rs/releases/latest/download/stackql-deploy-windows-x86_64.zip" -OutFile stackql-deploy.zip +Invoke-WebRequest https://get-stackql-deploy.io -OutFile stackql-deploy.zip Expand-Archive stackql-deploy.zip -DestinationPath . Move-Item stackql-deploy.exe "$env:LOCALAPPDATA\Microsoft\WindowsApps\" Remove-Item stackql-deploy.zip @@ -74,25 +72,27 @@ unzip stackql-deploy.zip ``` - + -If you have Rust installed (via [rustup](https://rustup.rs/)): +Pre-built binaries are attached to every release on the [__GitHub Releases__](https://github.com/stackql/stackql-deploy-rs/releases) page. A `SHA256SUMS` file is included for verification. -```bash -cargo install stackql-deploy -``` - -This builds from source and installs to `~/.cargo/bin/`. +| Platform | Asset | +|----------|-------| +| Linux x86_64 | `stackql-deploy-linux-x86_64.tar.gz` | +| Linux ARM64 | `stackql-deploy-linux-arm64.tar.gz` | +| macOS Universal (Apple Silicon + Intel) | `stackql-deploy-macos-universal.tar.gz` | +| Windows x86_64 | `stackql-deploy-windows-x86_64.zip` | -Use the [`stackql/setup-deploy`](https://github.com/marketplace/actions/stackql-deploy) action to install and run `stackql-deploy` in your CI/CD pipelines: +Use the [`stackql/setup-deploy`](https://github.com/marketplace/actions/stackql-deploy) action to install and run `stackql-deploy` in your CI/CD pipelines. The action automatically downloads the latest binary for the runner's platform. + +**Deploy a stack:** ```yaml steps: - uses: actions/checkout@v4 - - name: Deploy Stack uses: stackql/setup-deploy@v1.0.1 with: @@ -102,12 +102,39 @@ steps: env_vars: 'AWS_REGION=us-east-1' ``` -The action automatically downloads the latest binary for the runner's platform. See [__Deploying with GitHub Actions__](/github-actions) for the full reference. +**Deploy and capture outputs:** + +```yaml + - name: Deploy Stack + id: stackql-deploy + uses: stackql/setup-deploy@v1.0.1 + with: + command: 'build' + stack_dir: 'examples/my-stack' + stack_env: 'prod' + output_file: 'deployment-outputs.json' + env_vars: 'GOOGLE_PROJECT=my-project' + + - name: Use outputs + run: | + echo '${{ steps.stackql-deploy.outputs.deployment_outputs }}' | jq . +``` + +See [__Deploying with GitHub Actions__](/github-actions) for the full reference. - + + +If you have the Rust toolchain installed (via [rustup](https://rustup.rs/)): -All platform binaries are available on the [__GitHub Releases__](https://github.com/stackql/stackql-deploy-rs/releases) page. +```bash +cargo install stackql-deploy +``` + +This builds from source and installs to `~/.cargo/bin/`. + + + ## How `stackql-deploy` works @@ -255,7 +282,8 @@ For more detailed information see [`cli-reference/build`](/cli-reference/build), To get up and running quickly, `stackql-deploy` provides a set of quick start templates for common cloud providers. These templates include predefined configurations and resource queries tailored to AWS, Azure, and Google Cloud, among others. -- [**AWS Quick Start Template**](/template-library/aws/vpc-and-ec2-instance): A basic setup for deploying a VPC, including subnets and routing configurations. +- [**AWS Quick Start Template**](/template-library/aws/vpc-and-ec2-instance): A complete VPC networking stack with an EC2 web server using the `awscc` Cloud Control provider. +- [**Databricks Quick Start Template**](/template-library/databricks/serverless-workspace): A multi-provider stack deploying a Databricks serverless workspace on AWS with Unity Catalog. - [**Azure Quick Start Template**](/template-library/azure/simple-vnet-and-vm): A setup for creating a Resource Group with associated resources. - [**Google Cloud Quick Start Template**](/template-library/google/k8s-the-hard-way): A configuration for deploying a VPC with network and firewall rules. diff --git a/website/docs/manifest_fields/exports.mdx b/website/docs/manifest_fields/exports.mdx index 04d945b..b60915b 100644 --- a/website/docs/manifest_fields/exports.mdx +++ b/website/docs/manifest_fields/exports.mdx @@ -1,25 +1,65 @@ -**Type**: `array of strings` (optional) - -**Description**: List of variable names to export to a JSON file after deployment completion. Variables must exist in the deployment context (from globals or resource exports). Use with the `--output-file` CLI argument to specify the destination file. - -**Usage**: Use this to extract key deployment outputs for use in CI/CD pipelines, downstream processes, or for record-keeping. - -**Example**: - -```yaml -exports: - - databricks_workspace_name - - databricks_workspace_id - - aws_iam_role_arn - - deployment_timestamp -``` - -**Notes**: -- `stack_name`, `stack_env`, and `elapsed_time` are automatically included in exports and do not need to be listed -- Export order: automatic exports (`stack_name`, `stack_env`) first, then user-defined exports, then timing (`elapsed_time`) last -- `elapsed_time` is formatted as a string showing the total deployment duration (e.g., "0:01:23.456789") -- Variables are exported exactly as they exist in the deployment context -- Complex objects and arrays are preserved as JSON structures -- If a listed variable doesn't exist in the context, deployment will fail -- Requires `--output-file` CLI argument to be specified, otherwise exports are skipped -- Exported JSON file contains a flat object with variable names as keys \ No newline at end of file +**Type**: `array of strings` (optional) + +**Description**: List of variable names to export after a successful `build` or `test` operation. Variables must exist in the deployment context (from globals or resource exports). Exported variables are displayed in a table, written to a sourceable shell file (`.stackql-deploy-exports`), and optionally to a JSON file when `--output-file` is specified. + +**Usage**: Use this to extract key deployment outputs for use in CI/CD pipelines, downstream processes, or interactive shell sessions. + +**Example**: + +```yaml +exports: + - workspace_name + - workspace_id + - deployment_name + - workspace_status + - workspace_url +``` + +**Output after a successful `build` or `test`**: + +``` +┌───────────────┐ +│ stack exports │ +└───────────────┘ ++------------------------------------------------------+--------------------------------------------------+ +| variable | value | ++------------------------------------------------------+--------------------------------------------------+ +| STACKQL_DEPLOY__my_stack__dev__workspace_name | my-stack-dev-workspace | +| STACKQL_DEPLOY__my_stack__dev__workspace_id | 7474653260057820 | +| STACKQL_DEPLOY__my_stack__dev__deployment_name | dbc-c83e88ed-1ad3 | +| STACKQL_DEPLOY__my_stack__dev__workspace_status | RUNNING | +| STACKQL_DEPLOY__my_stack__dev__workspace_url | https://dbc-c83e88ed-1ad3.cloud.databricks.com | +| STACKQL_DEPLOY__my_stack__dev__elapsed_time | 49.22s | +| STACKQL_DEPLOY__my_stack__dev__stack_env | dev | +| STACKQL_DEPLOY__my_stack__dev__stack_name | my-stack | ++------------------------------------------------------+--------------------------------------------------+ + +To load these variables into your shell: + source .stackql-deploy-exports +``` + +**Loading exports into the shell**: + +```bash +# Linux / macOS / Git Bash +source .stackql-deploy-exports +echo $STACKQL_DEPLOY__my_stack__dev__workspace_url +``` + +```powershell +# PowerShell +Get-Content .stackql-deploy-exports | ForEach-Object { Invoke-Expression $_ } +``` + +**Environment variable naming**: + +Exported variable names follow the pattern `STACKQL_DEPLOY______`, where hyphens in stack name or env are replaced with underscores (since shell environment variables don't support hyphens). + +**Notes**: +- `stack_name`, `stack_env`, and `elapsed_time` are automatically included in exports and do not need to be listed +- Stack exports are displayed on both `build` and `test` operations +- The `.stackql-deploy-exports` file is written to the current working directory and contains `export KEY='value'` lines +- If `--output-file` is also specified, exports are additionally written as a JSON file +- Variables are exported exactly as they exist in the deployment context +- Complex objects and arrays are preserved as JSON structures +- If a listed variable doesn't exist in the context, the operation will fail diff --git a/website/docs/template-library/databricks/serverless-workspace.md b/website/docs/template-library/databricks/serverless-workspace.md new file mode 100644 index 0000000..286ad02 --- /dev/null +++ b/website/docs/template-library/databricks/serverless-workspace.md @@ -0,0 +1,391 @@ +--- +id: serverless-workspace +title: Databricks Serverless Workspace on AWS +hide_title: false +hide_table_of_contents: false +description: Deploy a complete Databricks serverless workspace on AWS including cross-account IAM roles, S3 storage, workspace provisioning, Unity Catalog storage credentials, and workspace admin group configuration. +tags: [] +draft: false +unlisted: false +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This example demonstrates a multi-provider stack that provisions a complete Databricks serverless workspace on AWS using `stackql-deploy`. The stack spans three providers: `awscc` for AWS Cloud Control resources, `databricks_account` for Databricks account-level operations, and `databricks_workspace` for workspace-level configuration. + +## Stack Overview + +The stack provisions the following resources in order: + +```mermaid +flowchart TD + subgraph AWS["AWS Resources"] + Role["Cross-Account\nIAM Role"] + S3W["Workspace\nS3 Bucket"] + S3WP["Bucket Policy"] + S3M["Metastore\nS3 Bucket"] + MRole["Metastore Access\nIAM Role"] + end + subgraph DBXAcct["Databricks Account"] + Creds["Account\nCredentials"] + Storage["Storage\nConfiguration"] + WS["Workspace"] + Group["Admin Group"] + Users["User Lookup"] + Membership["Group\nMembership"] + Assignment["Workspace\nAssignment"] + end + subgraph DBXWorkspace["Databricks Workspace"] + StorageCred["Storage\nCredential"] + UpdateRole["Update Metastore\nRole Trust Policy"] + CredGrants["Credential\nGrants"] + ExtLoc["External\nLocation"] + LocGrants["Location\nGrants"] + end + Role --> Creds + Creds --> Storage + S3W --> S3WP + S3W --> Storage + Storage --> WS + Creds --> WS + WS --> Group + Group --> Users + Users --> Membership + Group --> Assignment + S3M --> MRole + MRole --> StorageCred + StorageCred --> UpdateRole + StorageCred --> CredGrants + StorageCred --> ExtLoc + ExtLoc --> LocGrants +``` + +| # | Resource | Provider | Description | +|---|----------|----------|-------------| +| 1 | `aws_cross_account_role` | `awscc.iam.roles` | IAM role for Databricks cross-account access | +| 2 | `databricks_account_credentials` | `databricks_account.provisioning.credentials` | Registers the IAM role as a credential configuration | +| 3 | `aws_s3_workspace_bucket` | `awscc.s3.buckets` | Root S3 bucket for workspace storage | +| 4 | `aws_s3_workspace_bucket_policy` | `awscc.s3.bucket_policies` | Grants Databricks access to the workspace bucket | +| 5 | `databricks_storage_configuration` | `databricks_account.provisioning.storage` | Registers the S3 bucket as a storage configuration | +| 6 | `aws_s3_metastore_bucket` | `awscc.s3.buckets` | S3 bucket for Unity Catalog metastore | +| 7 | `aws_metastore_access_role` | `awscc.iam.roles` | IAM role for Unity Catalog metastore S3 access | +| 8 | `databricks_workspace` | `databricks_account.provisioning.workspaces` | The Databricks workspace itself | +| 9 | `workspace_admins_group` | `databricks_account.iam.account_groups` | Admin group for the workspace | +| 10 | `get_databricks_users` | `databricks_account.iam.users` | Looks up user IDs for group membership *(query)* | +| 11 | `databricks_account/update_group_membership` | `databricks_account.iam.account_groups` | Adds users to the admin group *(command)* | +| 12 | `databricks_account/workspace_assignment` | `databricks_account.iam.workspace_assignment` | Assigns the admin group to the workspace *(command)* | +| 13 | `databricks_workspace/storage_credentials` | `databricks_workspace.catalog.storage_credentials` | Unity Catalog storage credential | +| 14 | `aws/iam/update_metastore_access_role` | `awscc.iam.roles` | Updates the metastore role trust policy with the external ID *(command)* | +| 15 | `databricks_credential_grants` | `databricks_workspace.catalog.grants` | Grants privileges on the storage credential *(command)* | +| 16 | `external_location` | `databricks_workspace.catalog.external_locations` | Unity Catalog external location | +| 17 | `databricks_workspace/unitycatalog/location_grants` | `databricks_workspace.catalog.grants` | Grants privileges on the external location *(command)* | + +## Prerequisites + +- `stackql-deploy` installed ([releases](https://github.com/stackql/stackql-deploy-rs/releases)) +- Environment variables: + + ```bash + export AWS_ACCESS_KEY_ID=your_aws_access_key + export AWS_SECRET_ACCESS_KEY=your_aws_secret_key + export AWS_REGION=us-east-1 + export AWS_ACCOUNT_ID=your_aws_account_id + export DATABRICKS_ACCOUNT_ID=your_databricks_account_id + export DATABRICKS_AWS_ACCOUNT_ID=414351767826 + export DATABRICKS_CLIENT_ID=your_databricks_client_id + export DATABRICKS_CLIENT_SECRET=your_databricks_client_secret + ``` + +## Deploying the Stack + +```bash +stackql-deploy build examples/databricks/serverless dev +``` + +Dry run: + +```bash +stackql-deploy build examples/databricks/serverless dev --dry-run --show-queries +``` + +Testing the stack: + +```bash +stackql-deploy test examples/databricks/serverless dev +``` + +Tearing down: + +```bash +stackql-deploy teardown examples/databricks/serverless dev +``` + +## stackql_manifest.yml + +The manifest demonstrates several advanced features including multi-provider stacks, version-pinned providers, `file()` directives for externalized policy documents, the `return_vals` construct for capturing identifiers from `RETURNING` clauses, and `command` and `query` resource types alongside standard `resource` types. + +
+ Click to expand the stackql_manifest.yml file + +```yaml +version: 1 +name: "stackql-serverless" +description: creates a serverless databricks workspace +providers: + - awscc::v26.03.00379 + - databricks_account::v26.03.00381 + - databricks_workspace::v26.03.00381 +globals: + - name: databricks_account_id + description: databricks account id + value: "{{ DATABRICKS_ACCOUNT_ID }}" + - name: databricks_aws_account_id + description: databricks AWS account id + value: "{{ DATABRICKS_AWS_ACCOUNT_ID }}" + - name: aws_account + description: aws_account id + value: "{{ AWS_ACCOUNT_ID }}" + - name: region + description: aws region + value: "{{ AWS_REGION }}" + - name: global_tags + value: + - Key: 'stackql:stack-name' + Value: "{{ stack_name }}" + - Key: 'stackql:stack-env' + Value: "{{ stack_env }}" + - Key: 'stackql:resource-name' + Value: "{{ resource_name }}" + +# ... resources defined in order of dependencies +# see full manifest in the examples/databricks/serverless directory + +exports: + - workspace_name + - workspace_id + - deployment_name + - workspace_status + - workspace_url +``` + +
+ +## Resource Query Files + + + + +This resource demonstrates the `create`/`update`/`statecheck`/`exports` pattern with Cloud Control, using `generate_patch_document` for updates and `AWS_POLICY_EQUAL` for policy comparison in the statecheck. + +```sql +/*+ exists */ +SELECT count(*) as count +FROM awscc.iam.roles +WHERE region = 'us-east-1' AND +Identifier = '{{ role_name }}'; + +/*+ create */ +INSERT INTO awscc.iam.roles ( + AssumeRolePolicyDocument, Description, ManagedPolicyArns, + MaxSessionDuration, Path, PermissionsBoundary, + Policies, RoleName, Tags, region +) +SELECT + '{{ assume_role_policy_document }}', '{{ description }}', + '{{ managed_policy_arns }}', '{{ max_session_duration }}', + '{{ path }}', '{{ permissions_boundary }}', + '{{ policies }}', '{{ role_name }}', '{{ tags }}', 'us-east-1'; + +/*+ update */ +UPDATE awscc.iam.roles +SET PatchDocument = string('{{ { + "AssumeRolePolicyDocument": assume_role_policy_document, + "Description": description, + "ManagedPolicyArns": managed_policy_arns, + "MaxSessionDuration": max_session_duration, + "PermissionsBoundary": permissions_boundary, + "Path": path, + "Policies": policies, + "Tags": tags +} | generate_patch_document }}') +WHERE region = 'us-east-1' +AND Identifier = '{{ role_name }}'; + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT COUNT(*) as count FROM ( + SELECT + max_session_duration, path, + AWS_POLICY_EQUAL(assume_role_policy_document, + '{{ assume_role_policy_document }}') as test_assume_role_policy_doc, + AWS_POLICY_EQUAL(policies, '{{ policies }}') as test_policies + FROM awscc.iam.roles + WHERE Identifier = '{{ role_name }}' AND region = 'us-east-1')t +WHERE test_assume_role_policy_doc = 1 +AND test_policies = 1 +AND path = '{{ path }}'; + +/*+ exports */ +SELECT arn, role_name +FROM awscc.iam.roles +WHERE region = 'us-east-1' AND +Identifier = '{{ role_name }}'; + +/*+ delete */ +DELETE FROM awscc.iam.roles +WHERE Identifier = '{{ role_name }}' +AND region = 'us-east-1'; +``` + + + + +The workspace resource uses the standard `count`-based exists pattern with Databricks account-level APIs. The `exports` query constructs the `workspace_url` using string concatenation. + +```sql +/*+ exists */ +SELECT count(*) as count +FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ account_id }}' +AND workspace_name = '{{ workspace_name }}'; + +/*+ create */ +INSERT INTO databricks_account.provisioning.workspaces ( + aws_region, credentials_id, pricing_tier, + storage_configuration_id, workspace_name, account_id +) +SELECT + '{{ aws_region }}', '{{ credentials_id }}', + '{{ pricing_tier }}', '{{ storage_configuration_id }}', + '{{ workspace_name }}', '{{ account_id }}'; + +/*+ statecheck, retries=5, retry_delay=10 */ +SELECT count(*) as count +FROM databricks_account.provisioning.workspaces +WHERE credentials_id = '{{ credentials_id }}' +AND storage_configuration_id = '{{ storage_configuration_id }}' +AND workspace_name = '{{ workspace_name }}' +AND aws_region = '{{ aws_region }}' +AND pricing_tier = '{{ pricing_tier }}' +AND account_id = '{{ account_id }}'; + +/*+ exports */ +SELECT workspace_name, workspace_id, deployment_name, + workspace_status, + 'https://' || deployment_name || '.cloud.databricks.com' AS workspace_url +FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ account_id }}' +AND workspace_name = '{{ workspace_name }}'; + +/*+ delete */ +DELETE FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ account_id }}' +AND workspace_id = '{{ workspace_id }}'; +``` + + + + +This resource demonstrates the **identifier capture** pattern with `return_vals`. The `exists` query returns a named field (`databricks_group_id`), and the `create` uses `RETURNING id` with `return_vals` in the manifest to map the provider's `id` field to `databricks_group_id`. + +```sql +/*+ exists */ +SELECT id AS databricks_group_id +FROM databricks_account.iam.account_groups +WHERE account_id = '{{ databricks_account_id }}' +AND filter = 'displayName Eq "{{ display_name }}"'; + +/*+ create */ +INSERT INTO databricks_account.iam.account_groups ( + displayName, account_id +) +SELECT '{{ display_name }}', '{{ databricks_account_id }}' +RETURNING id; + +/*+ exports */ +SELECT '{{ this.databricks_group_id }}' as databricks_group_id, +'{{ display_name }}' as display_name; + +/*+ delete */ +DELETE FROM databricks_account.iam.account_groups +WHERE account_id = '{{ databricks_account_id }}' +AND id = '{{ databricks_group_id }}'; +``` + +Manifest `return_vals` configuration: + +```yaml +return_vals: + create: + - id: databricks_group_id +``` + + + + +## Key Patterns + +### Multi-Provider Stacks + +This stack uses three providers with version pinning: + +```yaml +providers: + - awscc::v26.03.00379 + - databricks_account::v26.03.00381 + - databricks_workspace::v26.03.00381 +``` + +### Externalized Policy Documents + +Complex IAM policy statements are stored as JSON files and loaded using the `file()` directive: + +```yaml +policies: + - PolicyDocument: + Statement: + - file(aws/iam/policy_statements/cross_account_role/ec2_permissions.json) + - file(aws/iam/policy_statements/cross_account_role/iam_service_linked_role.json) + Version: '2012-10-17' + PolicyName: "{{ stack_name }}-{{ stack_env }}-policy" +``` + +### `return_vals` for Identifier Capture + +When a provider returns an identifier during creation that can't be predicted (e.g. auto-generated IDs), use `return_vals` to capture it: + +```yaml +return_vals: + create: + - id: databricks_group_id # maps provider field 'id' to 'databricks_group_id' +``` + +### Stack-Level Exports + +The manifest defines stack-level exports that are displayed after a successful `build` or `test` and written to `.stackql-deploy-exports` for sourcing into the shell: + +```yaml +exports: + - workspace_name + - workspace_id + - deployment_name + - workspace_status + - workspace_url +``` + +## More Information + +The complete code for this example stack is available [__here__](https://github.com/stackql/stackql-deploy-rs/tree/main/examples/databricks/serverless). For more information: + +- [`databricks_account` provider docs](https://databricks-account.stackql.io/providers/databricks_account/) +- [`databricks_workspace` provider docs](https://databricks-workspace.stackql.io/providers/databricks_workspace/) +- [`awscc` provider docs](https://awscc.stackql.io/providers/awscc/) +- [`stackql`](https://github.com/stackql/stackql) +- [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy-rs)