Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .copywrite.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,7 @@ project {
header_ignore = [
# "vendors/**",
# "**autogen**",
".venv",
"__pycache__"
]
}
13 changes: 13 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,16 @@ my.*.tfvars
*.env
*.kubeconfig

## Python

__pycache__/
*.pyc
*.pyo
*.pyd
*.pyw
*.pyz
*.pywz
*.pyzw
*.pyzwz

.venv/
2 changes: 0 additions & 2 deletions .husky/commit-msg
Original file line number Diff line number Diff line change
@@ -1,3 +1 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This addresses a deprecation warning

npx --no -- commitlint --edit ''
39 changes: 39 additions & 0 deletions Taskfile.yml
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because I refuse to work with Makefile after learning about Taskfiles 😅

Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0

version: "3"

dotenv: [".env"]

tasks:
init:
desc: Install dependencies and setup the project
cmds:
- echo "initializing npm dependencies"
- npm ci
- npx husky install

help:
desc: Display this help
cmds:
- task --list

build-docker:
desc: Build docker image
cmds:
- echo "Building docker image"
- |
docker build --build-arg PALETTE_VERSION=$PALETTE_VERSION \
--build-arg PALETTE_CLI_VERSION=$PALETTE_CLI_VERION \
--build-arg PALETTE_EDGE_VERSION=$PALETTE_EDGE_VERSION \
--build-arg PACKER_VERSION=$PACKER_VERSION \
--build-arg ORAS_VERSION=$ORAS_VERSION \
--build-arg TERRAFORM_VERSION=$TERRAFORM_VERSION \
--build-arg K9S_VERSION=$K9S_VERSION \
-t tutorials .

license:
desc: Adds a license header to all files. Reference https://github.com/hashicorp/copywrite to learn more.
cmds:
- echo "Applying license headers..."
- copywrite headers
1 change: 1 addition & 0 deletions ai/palette-mcp/integrate-palette-mcp/.python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.12
26 changes: 26 additions & 0 deletions ai/palette-mcp/integrate-palette-mcp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Integrate Palette MCP

This folder contains the demo code for the Integrate Palette MCP in an Agentic Workflow tutorial. The user will learn how to integrate Palette MCP into a LangChain agent workflow.

The workflow is as follows: it identifies if a specific pack is present in your environment's cluster profiles and deployed clusters. If the pack is present, the workflow will ask you what tags you want to apply to the cluster profiles containing the pack and any active clusters using cluster
profiles containing the pack. This will allow to more readily identify the cluster profiles and active clusters that are
using the pack.

## Get Started

Follow the instructions in the tutorial to get started.

## Environment Variables

| Variable | Description | Example |
| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- |
| `OPENAI_API_KEY` | OpenAI API key. Required. | `sk-...` |
| `OPENAI_MODEL` | Default model used for all agents. Overridden per-agent by the model-specific variables below. | `gpt-4o` |
| `OPENAI_ACTIVE_CLUSTER_MODEL` | Model for the active cluster finder agent. Falls back to `OPENAI_MODEL`. | `gpt-4o-mini` |
| `OPENAI_REPORTER_MODEL` | Model for the reporter agent. Falls back to `OPENAI_MODEL`. | `gpt-4o-mini` |
| `OPENAI_TAGGING_MODEL` | Model for the tagging agent. Falls back to `OPENAI_MODEL`. | `gpt-4o-mini` |
| `PACK_NAME` | Target pack name to search for in cluster profiles. Can also be set via `--pack`. | `nginx` |
| `DEBUG` | Log level. Accepted values: `warn`, `info`, `debug`, `verbose`. Defaults to `info`. Can also be set via `--log-level`. | `debug` |
| `PALETTE_MCP_ENV_FILE` | Path to the env file passed to the Palette MCP container. Defaults to `~/.palette/.env-mcp`. | `/home/user/.palette/.env-mcp` |
| `PALETTE_MCP_KUBECONFIG_DIR` | Path to a local directory to bind-mount into the container as `/tmp/kubeconfig`. Omitted if not set. | `/home/user/.kube` |
| `PALETTE_MCP_IMAGE` | Palette MCP container image to use. Defaults to `public.ecr.aws/palette-ai/palette-mcp-server:latest`. | `public.ecr.aws/palette-ai/palette-mcp-server:v1.2.0` |
13 changes: 13 additions & 0 deletions ai/palette-mcp/integrate-palette-mcp/Taskfile.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
version: "3"

dotenv: [".env", "../../../.env"]

tasks:
start-agent:
desc: "Start the Palette MCP LangChain agent. Usage: task start-agent -- --pack <name> [--model <model>]"
env:
DEBUG: "info"
PACK_NAME: "{{.PACK_NAME}}"
OPENAI_MODEL: "{{.OPENAI_MODEL}}"
cmds:
- uv run python main.py {{.CLI_ARGS}}
2 changes: 2 additions & 0 deletions ai/palette-mcp/integrate-palette-mcp/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0

"""Palette-focused agent for active cluster mapping."""

from __future__ import annotations

import importlib
import json
from typing import Any

from pydantic import BaseModel

from helpers import suppress_console_output

ACTIVE_CLUSTER_SYSTEM_PROMPT = (
"You are a Palette active-cluster mapping specialist. "
"Use only Palette MCP tools to identify active clusters that use a provided set of cluster profile UIDs. "
"Return factual results only."
)


class ActiveCluster(BaseModel):
uid: str
name: str
cluster_profile_uid: str
cluster_profile_name: str
evidence_field_path: str
evidence: str


class ActiveClusterOutput(BaseModel):
pack_name: str
target_profile_uids: list[str]
total_active_clusters_scanned: int
active_clusters_using_matched_profiles: list[ActiveCluster]
checked_active_cluster_uids: list[str]
notes: str


# async def initialize_active_cluster_agent(
# model: str,
# mcp_tools: list,
# ) -> Any:
# from langchain.agents import create_agent
# from langchain_openai import ChatOpenAI

# checkpoint_module = importlib.import_module("langgraph.checkpoint.memory")
# InMemorySaver = checkpoint_module.InMemorySaver

# llm = ChatOpenAI(model=model)
# return create_agent(
# model=llm,
# tools=mcp_tools,
# system_prompt=ACTIVE_CLUSTER_SYSTEM_PROMPT,
# response_format=ActiveClusterOutput,
# checkpointer=InMemorySaver(),
# )


# async def invoke_active_cluster_agent(
# agent: Any,
# pack_name: str,
# matched_profiles_output: str,
# debug_level: str,
# run_id: str,
# ) -> str:
# hide_mcp_output = debug_level != "verbose"
# schema = json.dumps(ActiveClusterOutput.model_json_schema(), indent=2)
# active_cluster_prompt = (
# f"Given this profile discovery result for pack '{pack_name}':\n"
# f"{matched_profiles_output}\n\n"
# "Required process:\n"
# "1) Extract matched profile UIDs from the input JSON.\n"
# "2) Call gather_or_delete_clusters with action='list' and active_only=true.\n"
# "3) For each active cluster uid from step 2, call gather_or_delete_clusters with action='get'.\n"
# "4) Match clusters using explicit profile UID fields only.\n"
# "5) If no clusters match, return an empty list and include every checked active cluster uid.\n\n"
# "Return a response that conforms to this JSON schema:\n"
# f"{schema}\n"
# )
# run_config = {
# "configurable": {"thread_id": f"active-cluster:{pack_name.lower()}:{run_id}"}
# }
# with suppress_console_output(hide_mcp_output):
# result = await agent.ainvoke(
# {"messages": [{"role": "user", "content": active_cluster_prompt}]},
# config=run_config,
# )
# structured = result.get("structured_response")
# if isinstance(structured, ActiveClusterOutput):
# return structured.model_dump_json()
# messages = result.get("messages", [])
# for message in reversed(messages):
# content = getattr(message, "content", None)
# if isinstance(content, str) and content.strip():
# return content
# return str(result)
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0

"""Palette-focused agent for cluster profile discovery."""

from __future__ import annotations

import importlib
import json
from typing import Any, Literal

from pydantic import BaseModel

from helpers import suppress_console_output

PROFILE_FINDER_SYSTEM_PROMPT = (
"You are a Palette profile discovery specialist. "
"Use only Palette MCP tools to find cluster profiles that include a target pack. "
"First list cluster profiles, then inspect details when needed. "
"Always capture and return cluster profile scope. "
"Return factual results only."
)


class MatchedProfile(BaseModel):
uid: str
name: str
scope: Literal["tenant", "project", "system", "unknown"]
pack_references: list[str]
evidence: str


class ProfileDiscoveryOutput(BaseModel):
pack_name: str
total_profiles_scanned: int
matched_profiles: list[MatchedProfile]
notes: str


# async def initialize_profile_finder_agent(
# model: str,
# mcp_tools: list,
# ) -> Any:
# from langchain.agents import create_agent
# from langchain_openai import ChatOpenAI

# checkpoint_module = importlib.import_module("langgraph.checkpoint.memory")
# InMemorySaver = checkpoint_module.InMemorySaver

# llm = ChatOpenAI(model=model)
# return create_agent(
# model=llm,
# tools=mcp_tools,
# system_prompt=PROFILE_FINDER_SYSTEM_PROMPT,
# response_format=ProfileDiscoveryOutput,
# checkpointer=InMemorySaver(),
# )


# async def invoke_profile_finder_agent(
# agent: Any,
# pack_name: str,
# debug_level: str,
# run_id: str,
# ) -> str:
# hide_mcp_output = debug_level != "verbose"
# schema = json.dumps(ProfileDiscoveryOutput.model_json_schema(), indent=2)
# profile_finder_prompt = (
# "Find all cluster profiles in Palette that use the pack named "
# f"'{pack_name}'. Use Palette MCP tools only.\n\n"
# "Required process:\n"
# "1) Call gather_or_delete_clusterprofiles with action='list'.\n"
# "2) If list output lacks pack details, call action='get' for relevant cluster profile uids.\n"
# "3) Match pack name case-insensitively.\n"
# "4) For each matched profile, include scope from metadata.annotations.scope when available.\n"
# "5) If scope is missing, set scope to 'unknown' and mention in notes.\n\n"
# "Important:\n"
# "- Return only profile-level results. Do not query clusters in this agent.\n\n"
# "Return a response that conforms to this JSON schema:\n"
# f"{schema}\n"
# )
# run_config = {
# "configurable": {"thread_id": f"profile-finder:{pack_name.lower()}:{run_id}"}
# }
# with suppress_console_output(hide_mcp_output):
# result = await agent.ainvoke(
# {"messages": [{"role": "user", "content": profile_finder_prompt}]},
# config=run_config,
# )
# structured = result.get("structured_response")
# if isinstance(structured, ProfileDiscoveryOutput):
# return structured.model_dump_json()
# messages = result.get("messages", [])
# for message in reversed(messages):
# content = getattr(message, "content", None)
# if isinstance(content, str) and content.strip():
# return content
# return str(result)
Loading
Loading