diff --git a/MIGRATION_v3_to_v4.md b/MIGRATION_v3_to_v4.md index ae9bc0d4..f8acd9d2 100644 --- a/MIGRATION_v3_to_v4.md +++ b/MIGRATION_v3_to_v4.md @@ -4,8 +4,31 @@ removed several types and renamed fields; this guide lists each change with before/after code. +## Automated migration + +Run the bundled codemod against your source tree first — it rewrites the 9 +mechanical `Asset` → `Content` renames and prints a structured +report of every removed-type usage that still needs a manual replacement: + +```bash +# Dry run — see the plan before anything moves. +python -m adcp.migrate v3-to-v4 ./src + +# Rewrite in place. Commit first so `git diff` is your review. +python -m adcp.migrate v3-to-v4 ./src --apply + +# Structured JSON for CI / editor integrations. +python -m adcp.migrate v3-to-v4 ./src --json +``` + +The CLI exits 1 when there are manual-review findings (removed types, numbered +`Assets` imports, `adcp.types.generated_poc` imports) — wire it into CI to +gate merges until every flagged usage is addressed. + ## Audit your exposure first +If you'd rather stick with grep: + ```bash grep -rnE "BrandManifest|FormatCategory|DeliverTo|PromotedProducts|PromotedOfferings|PackageStatus|from adcp import Pricing|\.brand_manifest|adcp\.types\.generated_poc|(Audio|Css|Html|Image|Javascript|Text|Url|Video|Webhook)Asset\b" src/ ``` diff --git a/src/adcp/__init__.py b/src/adcp/__init__.py index 9fb57294..ea914dce 100644 --- a/src/adcp/__init__.py +++ b/src/adcp/__init__.py @@ -491,27 +491,70 @@ def __getattr__(name: str) -> object: raise AttributeError(f"module 'adcp' has no attribute {name!r}") -def get_adcp_version() -> str: - """ - Get the target AdCP specification version this SDK is built for. +def get_adcp_spec_version() -> str: + """Get the AdCP specification version this SDK is built against. + + Pinned at build time from the ``ADCP_VERSION`` file packaged with + the SDK. The version determines which AdCP schemas + (``adcp.types.generated_poc``) ship with this release. + + Use this when you need to surface spec version to clients (agent + cards, capability responses, debug endpoints) or validate + cross-compatibility with a peer agent's advertised spec version. - This version determines which AdCP schemas are used for type generation - and validation. The SDK is designed to work with this specific version - of the AdCP specification. + For the SDK package version (``4.0.0b1``, ``4.1.2``, etc.), use + :func:`get_adcp_sdk_version` or the ``adcp.__version__`` attribute. Returns: - AdCP specification version (e.g., "2.5.0") + AdCP specification version (e.g., ``"2.5.0"``, ``"latest"``). Raises: - FileNotFoundError: If ADCP_VERSION file is missing from package + FileNotFoundError: If the packaged ``ADCP_VERSION`` file is + missing — typically an indicator of a corrupt install. """ from importlib.resources import files - # Read from ADCP_VERSION file in package version_file = files("adcp") / "ADCP_VERSION" return version_file.read_text().strip() +def get_adcp_sdk_version() -> str: + """Get this SDK's package version (e.g., ``"4.0.0b1"``). + + Prefer this function when pairing with :func:`get_adcp_spec_version` + — the symmetric function form makes call sites read unambiguously. + For a single-value import without the spec-vs-SDK context, use + :attr:`adcp.__version__` directly. + + Returns: + SDK package version string. Falls back to ``"0.0.0+unknown"`` + when running from an uninstalled source tree. + """ + return __version__ + + +def get_adcp_version() -> str: + """Return the AdCP *spec* version (legacy name). + + .. deprecated:: 4.1 + Kept for backwards compatibility with pre-4.1 callers. Prefer + :func:`get_adcp_spec_version` (spec version) or + :func:`get_adcp_sdk_version` / :attr:`adcp.__version__` (SDK + package version) — the split disambiguates what the caller + actually wants at the call site. + """ + import warnings + + warnings.warn( + "get_adcp_version() is deprecated; use get_adcp_spec_version() " + "for the AdCP spec version or get_adcp_sdk_version() / " + "adcp.__version__ for the SDK package version.", + DeprecationWarning, + stacklevel=2, + ) + return get_adcp_spec_version() + + __all__ = [ # Version functions "get_adcp_version", diff --git a/src/adcp/migrate/__init__.py b/src/adcp/migrate/__init__.py new file mode 100644 index 00000000..58100552 --- /dev/null +++ b/src/adcp/migrate/__init__.py @@ -0,0 +1,23 @@ +"""Migration tooling for the AdCP SDK. + +Run via ``python -m adcp.migrate ``. + +Migrations are one-shot code rewriters for spec major-version bumps. +They are *not* a runtime compatibility layer — the removed types are +gone. The migrations turn a 2-3 day sed hunt into an afternoon by +doing the mechanical renames automatically and reporting what the +seller still has to fix by hand. + +Supported migrations: + +* ``v3-to-v4`` — 3.x → 4.0 rename + removal report. Renames the 9 + ``Asset`` → ``Content`` classes and flags usages of + removed types (``BrandManifest``, ``DeliverTo``, ``Pricing``, etc.) + and of numbered ``Assets`` direct imports that aren't stable. +""" + +from __future__ import annotations + +__all__ = ["v3_to_v4"] + +from adcp.migrate import v3_to_v4 diff --git a/src/adcp/migrate/__main__.py b/src/adcp/migrate/__main__.py new file mode 100644 index 00000000..8b997e4c --- /dev/null +++ b/src/adcp/migrate/__main__.py @@ -0,0 +1,46 @@ +"""CLI dispatcher: ``python -m adcp.migrate ``. + +Usage:: + + python -m adcp.migrate v3-to-v4 ./src + python -m adcp.migrate v3-to-v4 ./src --apply + python -m adcp.migrate v3-to-v4 ./src --json + +Each migration owns its own argparse surface (see e.g. +:func:`adcp.migrate.v3_to_v4.main`). This dispatcher just routes the +first positional argument. +""" + +from __future__ import annotations + +import sys + + +def main() -> int: + argv = sys.argv[1:] + if not argv or argv[0] in {"-h", "--help"}: + print( + "usage: python -m adcp.migrate [options]\n" + "\n" + "Migrations:\n" + " v3-to-v4 Rewrite Asset → Content and flag\n" + " removed-type usages (BrandManifest, DeliverTo, etc).\n" + "\n" + "Run `python -m adcp.migrate --help` for per-migration options.\n", + file=sys.stderr, + ) + return 0 if argv and argv[0] in {"-h", "--help"} else 2 + + migration = argv[0] + if migration == "v3-to-v4": + from adcp.migrate.v3_to_v4 import main as migrate_main + + return migrate_main(argv[1:]) + + print(f"error: unknown migration {migration!r}", file=sys.stderr) + print("Known: v3-to-v4", file=sys.stderr) + return 2 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/adcp/migrate/v3_to_v4.py b/src/adcp/migrate/v3_to_v4.py new file mode 100644 index 00000000..b1bcdb62 --- /dev/null +++ b/src/adcp/migrate/v3_to_v4.py @@ -0,0 +1,457 @@ +"""v3 → v4 migration for the AdCP SDK. + +The spec redesign in 4.0 renamed the 9 ``Asset`` payload +classes to ``Content`` and removed several legacy types +(``BrandManifest``, ``DeliverTo``, ``Pricing``, ``PromotedProducts``, +``PromotedOfferings``, ``FormatCategory``, ``PackageStatus``). This +module does the mechanical rewrites and prints a structured report of +everything that still needs human attention. + +Two kinds of findings: + +* **Applied**: direct name rewrites (``AudioAsset`` → ``AudioContent`` + etc). The 9 rename targets are distinctive enough that word-boundary + regex is safe; sellers should still review the diff. +* **Flagged**: removed types, numbered ``Assets`` imports, + ``adcp.types.generated_poc`` imports. These don't rewrite — the + seller has to choose the replacement (e.g. ``BrandManifest`` → + ``BrandReference(domain=...)`` depends on call-site context). + +Invocation:: + + python -m adcp.migrate v3-to-v4 ./src # dry run, report only + python -m adcp.migrate v3-to-v4 ./src --apply # rewrite files in place + python -m adcp.migrate v3-to-v4 ./src --json # structured report + +The dry run is the default — you always see what would change before +anything moves. ``--apply`` writes files in place; commit your tree +before running it so ``git diff`` is the review view. + +.. important:: + The codemod matches identifiers textually (word-boundary regex, not + AST). That's deliberate — attribute accesses, imports, type + annotations, and f-string-interpolated type names all need the + rename, and a text-match catches every context a caller cares + about. The tradeoff: a string literal like + ``ERROR_MSG = "AudioAsset deprecated"`` or a comment mentioning + ``AudioAsset`` will rewrite. Review the ``git diff`` for these + cases (usually trivially reverted) — they are the one class of + false positive the regex approach produces. +""" + +from __future__ import annotations + +import argparse +import json +import re +import sys +from dataclasses import asdict, dataclass, field +from pathlib import Path + +# The 9 spec rename mappings — payload ``Asset`` → ``Content``. +# Order matters only for predictable report output; the regex replaces +# each name independently. +ASSET_CONTENT_RENAMES: dict[str, str] = { + "AudioAsset": "AudioContent", + "CssAsset": "CssContent", + "HtmlAsset": "HtmlContent", + "ImageAsset": "ImageContent", + "JavascriptAsset": "JavascriptContent", + "TextAsset": "TextContent", + "UrlAsset": "UrlContent", + "VideoAsset": "VideoContent", + "WebhookAsset": "WebhookContent", +} + + +# Removed types — no auto-replacement possible, flag with migration hint. +# Paired with an anchor slug in MIGRATION_v3_to_v4.md so operators can +# jump straight to the replacement pattern. +REMOVED_TYPES: dict[str, tuple[str, str]] = { + "BrandManifest": ( + "use BrandReference(domain=...) on requests; " "read ResolvedBrand.brand from the registry", + "brandmanifest--brandreference", + ), + "FormatCategory": ( + "removed — format category info lives on Format metadata", + "formatcategory--removed", + ), + "DeliverTo": ( + "use publisher_properties on the request", + "deliverto--publisher_properties", + ), + "PromotedProducts": ( + "use the spec-current offerings shape", + "promotedproducts--promotedofferings--offerings", + ), + "PromotedOfferings": ( + "use the spec-current offerings shape", + "promotedproducts--promotedofferings--offerings", + ), + "Pricing": ( + "use the discriminated *PricingOption classes " "(e.g. CpmFixedRatePricingOption)", + "pricing--discriminated-pricingoption", + ), + "PackageStatus": ( + "package status is now carried by MediaBuyStatus", + "packagestatus--mediabuystatus", + ), +} + + +# Attribute accesses that moved / were removed. Flagged not rewritten +# because context determines the right replacement. +REMOVED_ATTRIBUTE_ACCESSES: dict[str, str] = { + ".brand_manifest": ("ResolvedBrand.brand_manifest removed — use .brand instead"), +} + + +# Private-module imports that shouldn't appear in downstream code. +PRIVATE_IMPORT_PATHS: dict[str, str] = { + "adcp.types.generated_poc": ( + "private module — import from adcp.types (stable public API) instead" + ), +} + + +# Regex for numbered Assets direct imports (``Assets5``, ``Assets14``, etc). +# Bare ``Assets`` (no digits) is a legitimate base class alias; the +# regex requires at least one digit to avoid false positives. +NUMBERED_ASSETS_PATTERN = re.compile(r"\bAssets\d+\b") + + +@dataclass +class Finding: + """One migration finding — either an applied rename or a manual TODO.""" + + kind: str # "rename" | "flag_removed" | "flag_private" | "flag_numbered" | "flag_attribute" + path: str + line: int + column: int + before: str + after: str | None = None # None for flag-only items + hint: str | None = None + migration_anchor: str | None = None + + +@dataclass +class Report: + """Structured migration report.""" + + applied: list[Finding] = field(default_factory=list) + flagged: list[Finding] = field(default_factory=list) + scanned_files: int = 0 + rewritten_files: int = 0 + + def add(self, finding: Finding) -> None: + if finding.kind == "rename": + self.applied.append(finding) + else: + self.flagged.append(finding) + + +_SKIP_DIRS: frozenset[str] = frozenset( + { + ".git", + ".venv", + "venv", + ".tox", + ".mypy_cache", + ".pytest_cache", + ".ruff_cache", + "__pycache__", + "node_modules", + "dist", + "build", + ".eggs", + } +) + + +def _iter_python_files(root: Path) -> list[Path]: + """Walk ``root`` for ``*.py`` files, skipping common build/dep dirs. + + Skip-dir matching is applied to path components *relative to + ``root``*, not absolute parts. A seller's repo checked out at + ``/home/ci/build/myrepo/src`` (where ``build`` is a CI-scratch + ancestor directory) previously had every file silently skipped — + the absolute-path check hit ``build`` and dropped the whole tree. + Relative matching makes the skip honour user intent: skip + ``myrepo/src/build/output.py`` while still scanning + ``/home/ci/build/myrepo/src/app.py``. + """ + if root.is_file(): + return [root] if root.suffix == ".py" else [] + resolved_root = root.resolve() + files: list[Path] = [] + for p in root.rglob("*.py"): + try: + rel_parts = p.resolve().relative_to(resolved_root).parts + except ValueError: + # rglob can return paths outside root when root contains a + # symlink; fall back to the raw parts for those. + rel_parts = p.parts + if any(part in _SKIP_DIRS for part in rel_parts): + continue + files.append(p) + return sorted(files) + + +# Compile rename regexes once at module import. Word boundaries prevent +# partial matches (``MyAudioAsset`` stays untouched). +_RENAME_PATTERNS = {name: re.compile(rf"\b{re.escape(name)}\b") for name in ASSET_CONTENT_RENAMES} +_REMOVED_PATTERNS = {name: re.compile(rf"\b{re.escape(name)}\b") for name in REMOVED_TYPES} + +# Attribute access patterns — word-boundary regex prevents +# ``my.brand_manifest_v2`` / ``brand_manifest_foo`` false positives +# that a plain ``in`` substring check would fire on. +_REMOVED_ATTRIBUTE_PATTERNS = { + attr: re.compile(rf"{re.escape(attr)}\b") for attr in REMOVED_ATTRIBUTE_ACCESSES +} + + +def scan_file(path: Path, *, apply_changes: bool) -> tuple[list[Finding], str | None]: + """Scan one file. Returns (findings, new_contents_or_None). + + new_contents_or_None is None when apply_changes=False or when no + renames fired; the caller uses it as the signal to rewrite. + + Reads with ``utf-8-sig`` so UTF-8-BOM-prefixed source files (legal + Python, common on Windows) migrate correctly. Uses ``newline=""`` + on read and write so CRLF line endings are preserved verbatim — + Windows sellers otherwise get a giant noise diff where every line + flips to LF. + """ + findings: list[Finding] = [] + try: + # Use ``open(..., newline="")`` over ``Path.read_text(newline=)`` + # — the latter was added in 3.13 but the SDK supports 3.10+. + with open(path, encoding="utf-8-sig", newline="") as fh: + original = fh.read() + except (UnicodeDecodeError, OSError): + # Skip unreadable or non-UTF8 files; migration targets Python source. + return findings, None + + # Detect renames per-line so the report carries column info and the + # same pattern that matched detection also drives the rewrite. + updated = original + rename_hits = False + for lineno, line in enumerate(original.splitlines(), start=1): + for old, new in ASSET_CONTENT_RENAMES.items(): + for match in _RENAME_PATTERNS[old].finditer(line): + findings.append( + Finding( + kind="rename", + path=str(path), + line=lineno, + column=match.start() + 1, + before=old, + after=new, + ) + ) + rename_hits = True + + # Removed types — flagged, not rewritten. + for name, (hint, anchor) in REMOVED_TYPES.items(): + for match in _REMOVED_PATTERNS[name].finditer(line): + findings.append( + Finding( + kind="flag_removed", + path=str(path), + line=lineno, + column=match.start() + 1, + before=name, + hint=hint, + migration_anchor=anchor, + ) + ) + + # Numbered Assets imports / references. + for match in NUMBERED_ASSETS_PATTERN.finditer(line): + findings.append( + Finding( + kind="flag_numbered", + path=str(path), + line=lineno, + column=match.start() + 1, + before=match.group(0), + hint=( + "numbered Assets classes are unstable across spec revisions; " + "import the semantic alias from adcp.types instead" + ), + migration_anchor="numbered-discriminated-union-classes-shifted", + ) + ) + + # adcp.types.generated_poc imports. + for private_path, hint in PRIVATE_IMPORT_PATHS.items(): + if private_path in line: + col = line.index(private_path) + 1 + findings.append( + Finding( + kind="flag_private", + path=str(path), + line=lineno, + column=col, + before=private_path, + hint=hint, + ) + ) + + # Removed attribute accesses (.brand_manifest etc.). Regex with + # trailing word boundary prevents false-positives on + # ``.brand_manifest_v2``, ``.brand_manifest_override``, etc. + for attr, hint in REMOVED_ATTRIBUTE_ACCESSES.items(): + for match in _REMOVED_ATTRIBUTE_PATTERNS[attr].finditer(line): + findings.append( + Finding( + kind="flag_attribute", + path=str(path), + line=lineno, + column=match.start() + 1, + before=attr, + hint=hint, + ) + ) + + if apply_changes and rename_hits: + for old, new in ASSET_CONTENT_RENAMES.items(): + updated = _RENAME_PATTERNS[old].sub(new, updated) + return findings, updated + + return findings, None + + +def run(root: Path, *, apply_changes: bool = False) -> Report: + """Execute the migration across ``root``. Returns a :class:`Report`.""" + report = Report() + for path in _iter_python_files(root): + report.scanned_files += 1 + findings, new_contents = scan_file(path, apply_changes=apply_changes) + for f in findings: + report.add(f) + if new_contents is not None: + # newline="" preserves whatever line endings were read + # (including mixed — unusual but possible). Pair with the + # ``open(..., newline="")`` read in ``scan_file``. + with open(path, "w", encoding="utf-8", newline="") as fh: + fh.write(new_contents) + report.rewritten_files += 1 + return report + + +def _format_text_report(report: Report, *, apply_changes: bool) -> str: + """Human-readable migration report for the default CLI output.""" + lines: list[str] = [] + mode = "applied" if apply_changes else "would apply" + + lines.append(f"adcp migrate v3-to-v4 — scanned {report.scanned_files} files") + lines.append("") + + if report.applied: + lines.append(f"Renames {mode}: {len(report.applied)}") + # Group by (before, after) for a compact summary. + by_rename: dict[str, dict[str, list[Finding]]] = {} + for f in report.applied: + by_rename.setdefault(f.before, {}).setdefault(f.after or "?", []).append(f) + for before, after_map in sorted(by_rename.items()): + for after, hits in sorted(after_map.items()): + lines.append( + f" {before} → {after} ({len(hits)} hit{'s' if len(hits) != 1 else ''})" + ) + for f in hits[:5]: + lines.append(f" {f.path}:{f.line}:{f.column}") + if len(hits) > 5: + lines.append(f" … and {len(hits) - 5} more") + else: + lines.append("No renames needed.") + + if report.flagged: + lines.append("") + lines.append(f"Manual review required: {len(report.flagged)} findings") + by_name: dict[str, list[Finding]] = {} + for f in report.flagged: + by_name.setdefault(f.before, []).append(f) + for name, hits in sorted(by_name.items()): + lines.append(f" {name} ({len(hits)} hit{'s' if len(hits) != 1 else ''})") + hint = hits[0].hint + if hint: + lines.append(f" → {hint}") + anchor = hits[0].migration_anchor + if anchor: + lines.append(f" MIGRATION_v3_to_v4.md#{anchor}") + for f in hits[:5]: + lines.append(f" {f.path}:{f.line}:{f.column}") + if len(hits) > 5: + lines.append(f" … and {len(hits) - 5} more") + else: + lines.append("") + lines.append("No manual-review findings.") + + if apply_changes and report.rewritten_files: + lines.append("") + lines.append(f"Rewrote {report.rewritten_files} files in place.") + lines.append("Review with `git diff` before committing.") + + return "\n".join(lines) + + +def _format_json_report(report: Report) -> str: + """JSON report for programmatic consumption (CI, editors).""" + payload = { + "scanned_files": report.scanned_files, + "rewritten_files": report.rewritten_files, + "applied": [asdict(f) for f in report.applied], + "flagged": [asdict(f) for f in report.flagged], + } + return json.dumps(payload, indent=2) + + +def main(argv: list[str] | None = None) -> int: + """CLI entry point for ``python -m adcp.migrate v3-to-v4``.""" + parser = argparse.ArgumentParser( + prog="adcp.migrate v3-to-v4", + description=( + "Rewrite adcp 3.x → 4.0 ``Asset`` → ``Content`` renames " + "and flag usages of removed types." + ), + ) + parser.add_argument( + "path", + type=Path, + help="File or directory to scan (source tree root in typical use).", + ) + parser.add_argument( + "--apply", + action="store_true", + help=( + "Rewrite files in place. Default is dry-run (report only). " + "Commit your tree first so `git diff` is your review view." + ), + ) + parser.add_argument( + "--json", + action="store_true", + help="Emit structured JSON report instead of the human-readable text.", + ) + args = parser.parse_args(argv) + + if not args.path.exists(): + print(f"error: path does not exist: {args.path}", file=sys.stderr) + return 2 + + report = run(args.path, apply_changes=args.apply) + + if args.json: + print(_format_json_report(report)) + else: + print(_format_text_report(report, apply_changes=args.apply)) + + # Return non-zero when there are manual-review findings so CI can + # gate on a clean report. Renames alone don't trip the gate — + # they're mechanical and apply cleanly. + return 1 if report.flagged else 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/adcp/types/base.py b/src/adcp/types/base.py index 22a70489..7f553031 100644 --- a/src/adcp/types/base.py +++ b/src/adcp/types/base.py @@ -2,8 +2,9 @@ """Base model for AdCP types with spec-compliant serialization.""" +import os from collections.abc import Callable -from typing import Any +from typing import Any, Literal from pydantic import BaseModel, ConfigDict @@ -11,6 +12,32 @@ MessageFormatter = Callable[[Any], str] +def _resolve_extra_policy() -> Literal["ignore", "forbid"]: + """Choose the ``extra`` policy for :class:`AdCPBaseModel`. + + ``ignore`` (default) silently drops unknown fields, preserving + forward compatibility when newer spec versions add fields. This is + the production-safe default — a client on spec N sending to a + server on spec N+1 keeps working. + + ``forbid`` raises on unknown fields so CI catches the silent-drop + case that production-safe defaults obscure. Most useful during + upgrades: right after a major spec revision, run tests with + ``ADCP_STRICT_VALIDATION=1`` to surface every place the upgrade + dropped a renamed field, then ship with the flag unset for the + forward-compat default. + + Values accepted: ``"1"``, ``"true"``, ``"yes"``, ``"on"`` (any + case). Anything else — including empty string and ``"0"`` — keeps + the ``ignore`` default. + """ + raw = os.environ.get("ADCP_STRICT_VALIDATION", "").strip().lower() + return "forbid" if raw in {"1", "true", "yes", "on"} else "ignore" + + +_EXTRA_POLICY: Literal["ignore", "forbid"] = _resolve_extra_policy() + + def _pluralize(count: int, singular: str, plural: str | None = None) -> str: """Return singular or plural form based on count.""" if count == 1: @@ -180,14 +207,29 @@ def _provide_performance_feedback_error_message(self: Any) -> str: class AdCPBaseModel(BaseModel): """Base model for AdCP types with spec-compliant serialization. - Defaults to ``extra='ignore'`` so that unknown fields from newer spec - versions are silently dropped rather than causing validation errors. - Generated types whose schemas set ``additionalProperties: true`` - override this with ``extra='allow'`` in their own ``model_config``. - Consumers who want strict validation can override with ``extra='forbid'``. + Defaults to ``extra='ignore'`` so unknown fields from newer spec + versions are silently dropped rather than causing validation + errors. Generated types whose schemas set + ``additionalProperties: true`` override this with ``extra='allow'`` + in their own ``model_config``. + + Set ``ADCP_STRICT_VALIDATION=1`` in the environment (``"1"``, + ``"true"``, ``"yes"``, ``"on"`` are accepted) to flip the default + to ``extra='forbid'``. Use this during spec upgrades to catch + silently-dropped renamed fields in tests. See :func:`_resolve_extra_policy`. + + .. important:: + The env var is resolved **once at module import time**. Set it + in your shell or CI environment **before** ``import adcp`` runs + — mutating ``os.environ["ADCP_STRICT_VALIDATION"]`` after the + first ``adcp`` import has no effect on already-imported model + classes (they captured the policy at class-body evaluation). + + Consumers who want per-model strict validation can override + ``model_config`` on their subclass. """ - model_config = ConfigDict(extra="ignore") + model_config = ConfigDict(extra=_EXTRA_POLICY) def model_dump(self, **kwargs: Any) -> dict[str, Any]: if "exclude_none" not in kwargs: diff --git a/tests/test_migrate_v3_to_v4.py b/tests/test_migrate_v3_to_v4.py new file mode 100644 index 00000000..87ad85d7 --- /dev/null +++ b/tests/test_migrate_v3_to_v4.py @@ -0,0 +1,441 @@ +"""Tests for ``python -m adcp.migrate v3-to-v4``. + +The migration is one-shot tooling — once a codebase runs through it, +the output is reviewed in ``git diff`` and tests never run against +migrated code again. But the migration itself is code that rewrites +other people's code: bugs here corrupt their source tree. These tests +pin the behaviour tightly. +""" + +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from adcp.migrate import v3_to_v4 + +# --------------------------------------------------------------------------- +# Dry-run scans — file contents unchanged, report lists findings +# --------------------------------------------------------------------------- + + +def _write(tmp_path: Path, name: str, body: str) -> Path: + path = tmp_path / name + path.write_text(body) + return path + + +def test_renames_all_nine_asset_content_types(tmp_path: Path) -> None: + """All 9 ``Asset`` → ``Content`` names are detected.""" + source = "\n".join( + [ + "from adcp.types import (", + " AudioAsset, CssAsset, HtmlAsset, ImageAsset, JavascriptAsset,", + " TextAsset, UrlAsset, VideoAsset, WebhookAsset,", + ")", + "x = AudioAsset(duration_seconds=30)", + ] + ) + _write(tmp_path, "user_code.py", source) + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + assert report.scanned_files == 1 + # 9 import-line hits + 1 call-site hit for AudioAsset = 10 applied findings + applied_names = {f.before for f in report.applied} + assert applied_names == set(v3_to_v4.ASSET_CONTENT_RENAMES.keys()) + # Every applied rename carries the target name. + for f in report.applied: + assert f.after == v3_to_v4.ASSET_CONTENT_RENAMES[f.before] + + +def test_apply_rewrites_files_in_place(tmp_path: Path) -> None: + """With ``--apply`` the file contents are rewritten.""" + path = _write( + tmp_path, + "code.py", + "from adcp.types import AudioAsset, VideoAsset\n" + "audio = AudioAsset(duration_seconds=30)\n" + "video = VideoAsset(width=1920, height=1080)\n", + ) + + report = v3_to_v4.run(tmp_path, apply_changes=True) + + assert report.rewritten_files == 1 + rewritten = path.read_text() + assert "AudioAsset" not in rewritten + assert "VideoAsset" not in rewritten + assert "AudioContent" in rewritten + assert "VideoContent" in rewritten + + +def test_dry_run_does_not_modify_files(tmp_path: Path) -> None: + """Default mode (no ``--apply``) leaves files untouched.""" + original = "from adcp.types import AudioAsset\n" "audio = AudioAsset(duration_seconds=30)\n" + path = _write(tmp_path, "code.py", original) + + v3_to_v4.run(tmp_path, apply_changes=False) + + assert path.read_text() == original + + +def test_word_boundary_protects_against_partial_matches(tmp_path: Path) -> None: + """``MyAudioAsset`` or ``AudioAssetExtra`` must NOT be rewritten — + they're the seller's own types and coincidentally contain the + renamed substring.""" + source = ( + "class MyAudioAsset: pass\n" + "class AudioAssetExtra: pass\n" + "class AudioAsset_Custom: pass\n" + "from adcp.types import AudioAsset\n" + ) + path = _write(tmp_path, "code.py", source) + + v3_to_v4.run(tmp_path, apply_changes=True) + + rewritten = path.read_text() + assert "class MyAudioAsset: pass" in rewritten + assert "class AudioAssetExtra: pass" in rewritten + assert "class AudioAsset_Custom: pass" in rewritten + # Only the bare import rewrote. + assert "from adcp.types import AudioContent" in rewritten + + +# --------------------------------------------------------------------------- +# Flagged findings — reported with migration-guide anchor, not rewritten +# --------------------------------------------------------------------------- + + +def test_flags_removed_types_with_migration_anchor(tmp_path: Path) -> None: + """Removed types (BrandManifest, DeliverTo, Pricing, etc.) are + flagged — NOT rewritten, since replacement depends on context.""" + source = ( + "from adcp import BrandManifest, DeliverTo, Pricing\n" + "manifest = BrandManifest(name='x')\n" + ) + _write(tmp_path, "code.py", source) + + report = v3_to_v4.run(tmp_path, apply_changes=True) + + # Source is unchanged — removed types aren't auto-rewritten. + rewritten = (tmp_path / "code.py").read_text() + assert "BrandManifest" in rewritten + assert "DeliverTo" in rewritten + + # Every flagged finding carries the migration anchor + hint. + by_name = {f.before: f for f in report.flagged if f.kind == "flag_removed"} + assert "BrandManifest" in by_name + assert by_name["BrandManifest"].hint is not None + assert "BrandReference" in by_name["BrandManifest"].hint + assert by_name["BrandManifest"].migration_anchor == "brandmanifest--brandreference" + + +def test_flags_numbered_assets_imports(tmp_path: Path) -> None: + """Direct ``Assets81`` imports are unstable across spec revisions — + flag, don't rewrite (the semantic alias depends on what the caller + was doing with the numbered class).""" + _write( + tmp_path, + "code.py", + "from adcp.types.generated_poc.bundled.x import Assets81, Assets149\n", + ) + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + numbered = [f for f in report.flagged if f.kind == "flag_numbered"] + names = {f.before for f in numbered} + assert names == {"Assets81", "Assets149"} + + +def test_bare_assets_is_not_flagged_as_numbered(tmp_path: Path) -> None: + """``Assets`` (no digits) is the legitimate base alias. The numbered + flag must not fire on it.""" + _write(tmp_path, "code.py", "from adcp.types import Assets\nx = Assets\n") + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + numbered = [f for f in report.flagged if f.kind == "flag_numbered"] + assert numbered == [] + + +def test_flags_generated_poc_imports(tmp_path: Path) -> None: + """``adcp.types.generated_poc`` is a private module — flag imports + from it and point at the public alias path.""" + _write( + tmp_path, + "code.py", + "from adcp.types.generated_poc.core.account import Account\n", + ) + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + private = [f for f in report.flagged if f.kind == "flag_private"] + assert len(private) == 1 + assert private[0].before == "adcp.types.generated_poc" + + +def test_flags_removed_attribute_accesses(tmp_path: Path) -> None: + """``.brand_manifest`` on ResolvedBrand was removed — flag the + attribute access with a hint.""" + _write( + tmp_path, + "code.py", + "result = await registry.lookup_brand('x')\n" "manifest = result.brand_manifest\n", + ) + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + attr = [f for f in report.flagged if f.kind == "flag_attribute"] + assert len(attr) == 1 + assert attr[0].before == ".brand_manifest" + + +def test_brand_manifest_word_boundary_no_false_positive(tmp_path: Path) -> None: + """``.brand_manifest_v2`` / ``.brand_manifest_override`` are + seller-specific extensions that happen to share a prefix. They + MUST NOT be flagged — the regex requires a trailing word boundary.""" + _write( + tmp_path, + "code.py", + "x = seller.brand_manifest_v2\n" + "y = obj.brand_manifest_override = True\n" + "z = other.brand_manifest_custom()\n", + ) + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + flagged = [f for f in report.flagged if f.kind == "flag_attribute"] + assert flagged == [], f"false-positive on brand_manifest_* suffixes: {flagged}" + + +# --------------------------------------------------------------------------- +# Skips + file-iteration safety +# --------------------------------------------------------------------------- + + +def test_skips_common_build_and_dep_dirs(tmp_path: Path) -> None: + """.venv, .git, node_modules etc. MUST be skipped — scanning + dependency code would generate thousands of false-positive hits.""" + (tmp_path / ".venv" / "lib").mkdir(parents=True) + (tmp_path / ".venv" / "lib" / "bad.py").write_text("from adcp.types import AudioAsset\n") + (tmp_path / "node_modules").mkdir() + (tmp_path / "node_modules" / "bad.py").write_text("AudioAsset\n") + (tmp_path / "__pycache__").mkdir() + (tmp_path / "__pycache__" / "bad.py").write_text("AudioAsset\n") + (tmp_path / "user.py").write_text("from adcp.types import AudioAsset\n") + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + # Only user.py scanned. + assert report.scanned_files == 1 + assert {f.path for f in report.applied} == {str(tmp_path / "user.py")} + + +def test_skip_dirs_are_relative_to_root_not_absolute(tmp_path: Path) -> None: + """Repos frequently sit under ancestor directories named ``build``, + ``dist``, etc. (common CI path: ``/home/ci/build/repo/src``). A + too-eager absolute-path check would skip the whole project. The + skip list must apply only to components *below* the scan root.""" + # Simulate running against a tree mounted under an ancestor named + # "build". Create an actual directory on disk to exercise this. + ancestor_dir = tmp_path / "build" / "myrepo" + ancestor_dir.mkdir(parents=True) + user_code = ancestor_dir / "app.py" + user_code.write_text("from adcp.types import AudioAsset\n") + + # Scan from the repo root (inside the ancestor named "build"). The + # skip list should NOT match "build" because it is not below the + # scan root. + report = v3_to_v4.run(ancestor_dir, apply_changes=False) + + assert report.scanned_files == 1, ( + "Scan was skipped when repo sits under an ancestor named like a " + "skip-dir (e.g. /home/ci/build/repo). Skip-dirs must be relative " + "to the scan root, not the absolute path." + ) + assert len(report.applied) == 1 + + +def test_empty_directory_yields_empty_report(tmp_path: Path) -> None: + report = v3_to_v4.run(tmp_path, apply_changes=False) + assert report.scanned_files == 0 + assert report.applied == [] + assert report.flagged == [] + + +def test_non_python_files_ignored(tmp_path: Path) -> None: + _write(tmp_path, "README.md", "AudioAsset is mentioned here\n") + _write(tmp_path, "config.yaml", "key: AudioAsset\n") + + report = v3_to_v4.run(tmp_path, apply_changes=False) + + assert report.scanned_files == 0 + + +def test_single_file_path_scans_one_file(tmp_path: Path) -> None: + """Running against a single .py file scans just that file.""" + path = _write(tmp_path, "user.py", "from adcp.types import AudioAsset\n") + + report = v3_to_v4.run(path, apply_changes=False) + + assert report.scanned_files == 1 + assert len(report.applied) == 1 + + +def test_crlf_line_endings_preserved(tmp_path: Path) -> None: + """Windows sellers commit CRLF-terminated Python source. The + migration MUST preserve CRLF on read+write, otherwise every line + flips to LF and ``git diff`` is polluted with thousands of + whitespace-only lines.""" + path = tmp_path / "code.py" + path.write_bytes(b"from adcp.types import AudioAsset\r\nx = AudioAsset()\r\n") + + v3_to_v4.run(tmp_path, apply_changes=True) + + # Read raw bytes to check line endings preserved. + rewritten = path.read_bytes() + assert b"\r\n" in rewritten, f"CRLF line endings lost during rewrite. Got: {rewritten!r}" + # LF-only mixed in would indicate a split/join bug. + assert b"\n" not in rewritten.replace( + b"\r\n", b"" + ), f"Mixed line endings after rewrite: {rewritten!r}" + assert b"AudioContent" in rewritten + + +def test_utf8_bom_source_migrates(tmp_path: Path) -> None: + """UTF-8 BOM is legal at the start of Python source (Windows + editors sometimes add it). The codemod must read and rewrite it + correctly rather than silently skipping the file as 'binary'.""" + path = tmp_path / "code.py" + path.write_bytes(b"\xef\xbb\xbffrom adcp.types import AudioAsset\n") + + report = v3_to_v4.run(tmp_path, apply_changes=True) + + assert report.scanned_files == 1 + assert len(report.applied) == 1 + rewritten = path.read_text(encoding="utf-8-sig") + assert "AudioContent" in rewritten + assert "AudioAsset" not in rewritten + + +def test_multiline_import_rewrites_correctly(tmp_path: Path) -> None: + """Most real codebases write parenthesised multi-line imports. The + rewrite MUST handle a name mid-parenthesis.""" + path = _write( + tmp_path, + "code.py", + "from adcp.types import (\n" + " AudioAsset,\n" + " VideoAsset,\n" + " BuyingMode,\n" + ")\n" + "x = AudioAsset()\n" + "y = VideoAsset()\n", + ) + + v3_to_v4.run(tmp_path, apply_changes=True) + + rewritten = path.read_text() + assert "AudioAsset" not in rewritten + assert "VideoAsset" not in rewritten + assert "AudioContent" in rewritten + assert "VideoContent" in rewritten + # BuyingMode is unrelated — must stay untouched. + assert "BuyingMode" in rewritten + + +def test_idempotent(tmp_path: Path) -> None: + """Running the migration twice must leave the file identical to + running it once — no double-rewrite, no double-flag, nothing + drifts between runs. Pins the contract for sellers who may re-run + the codemod after a partial apply.""" + path = _write(tmp_path, "code.py", "from adcp.types import AudioAsset\n") + + v3_to_v4.run(tmp_path, apply_changes=True) + after_first = path.read_text() + + report = v3_to_v4.run(tmp_path, apply_changes=True) + after_second = path.read_text() + + assert after_first == after_second + assert report.applied == [] # second run has nothing to rewrite + assert report.rewritten_files == 0 + + +# --------------------------------------------------------------------------- +# CLI entry + JSON report +# --------------------------------------------------------------------------- + + +def test_cli_exits_nonzero_on_flagged_findings(tmp_path: Path) -> None: + """CI gate: any flagged finding (manual review required) → exit 1.""" + _write(tmp_path, "code.py", "from adcp import BrandManifest\n") + + rc = v3_to_v4.main([str(tmp_path)]) + + assert rc == 1 # flagged finding + + +def test_cli_exits_zero_on_renames_only(tmp_path: Path) -> None: + """Mechanical renames alone don't gate CI — they're a clean apply.""" + _write(tmp_path, "code.py", "from adcp.types import AudioAsset\n") + + rc = v3_to_v4.main([str(tmp_path)]) + + assert rc == 0 + + +def test_cli_exits_zero_on_empty_tree(tmp_path: Path) -> None: + rc = v3_to_v4.main([str(tmp_path)]) + assert rc == 0 + + +def test_cli_exits_nonzero_on_missing_path() -> None: + rc = v3_to_v4.main(["/nonexistent/path-that-does-not-exist-xyz"]) + assert rc == 2 + + +def test_cli_json_output(tmp_path: Path, capsys: pytest.CaptureFixture[str]) -> None: + """``--json`` emits a structured report parseable by CI / editors.""" + _write( + tmp_path, + "code.py", + "from adcp.types import AudioAsset\n" "from adcp import BrandManifest\n", + ) + + v3_to_v4.main([str(tmp_path), "--json"]) + out = capsys.readouterr().out + + payload = json.loads(out) + assert payload["scanned_files"] == 1 + assert payload["rewritten_files"] == 0 + assert len(payload["applied"]) == 1 + assert payload["applied"][0]["before"] == "AudioAsset" + assert payload["applied"][0]["after"] == "AudioContent" + removed = [f for f in payload["flagged"] if f["kind"] == "flag_removed"] + assert any(f["before"] == "BrandManifest" for f in removed) + + +def test_cli_apply_rewrites_and_reports(tmp_path: Path, capsys: pytest.CaptureFixture[str]) -> None: + """The happy end-to-end path: scan + rewrite + human-readable summary.""" + path = _write(tmp_path, "code.py", "from adcp.types import AudioAsset\n") + + v3_to_v4.main([str(tmp_path), "--apply"]) + out = capsys.readouterr().out + + assert "Rewrote 1 files" in out or "Rewrote 1 file" in out + assert path.read_text() == "from adcp.types import AudioContent\n" + + +def test_unreadable_file_does_not_crash(tmp_path: Path) -> None: + """Non-UTF-8 binary files in the tree are skipped silently — + they're obviously not Python source the migration cares about.""" + path = tmp_path / "binary.py" + path.write_bytes(b"\xff\xfe\x00 not valid utf-8") + + # Doesn't raise. + report = v3_to_v4.run(tmp_path, apply_changes=False) + assert report.scanned_files == 1 + assert report.applied == [] diff --git a/tests/test_response_builder_subclass.py b/tests/test_response_builder_subclass.py new file mode 100644 index 00000000..8efb619a --- /dev/null +++ b/tests/test_response_builder_subclass.py @@ -0,0 +1,166 @@ +"""Subclass passthrough tests for response builders. + +Sellers routinely extend SDK-generated response types with internal +fields (``implementation_config``, ``seller_notes``, database primary +keys) marked ``exclude=True`` so they stay off the wire. The response +builders call ``_serialize`` which runs ``model_dump(exclude_none=True)`` +— the Pydantic default. These tests lock the contract: a subclass with +``exclude=True`` internal fields round-trips through +``products_response()`` and friends with no leak. + +Salesagent has hit this: their ``Product`` subclass carries an +``implementation_config`` dict describing how they map the spec +Product onto their ad server. If ``model_dump`` stopped honouring +``exclude`` or the builder started bypassing it, downstream would +silently leak internals — hard to catch in end-to-end tests, easy to +catch here. +""" + +from __future__ import annotations + +from typing import Any + +from pydantic import Field + +from adcp.server.responses import ( + list_creatives_response, + media_buys_response, + products_response, + signals_response, +) +from adcp.types import Creative, MediaBuy, MediaBuyStatus, Product, Signal + + +class _SubclassedProduct(Product): + """Product with an internal-only field that MUST NOT appear in + serialised responses.""" + + implementation_config: dict[str, Any] = Field(default_factory=dict, exclude=True) + seller_notes: str | None = Field(default=None, exclude=True) + + +def test_products_response_excludes_subclass_internal_fields() -> None: + """``products_response`` round-trips subclassed Products without + leaking fields marked ``exclude=True``.""" + subclassed = _SubclassedProduct.model_construct( + product_id="p1", + name="Display Home", + publisher_properties=[], + pricing_options=[], + inventory_type="publisher_owned", + implementation_config={ + "ad_server": "gam", + "line_item_template_id": "internal-42", + }, + seller_notes="budget-locked to Q3", + ) + + response = products_response([subclassed]) + + assert len(response["products"]) == 1 + product_dict = response["products"][0] + + assert product_dict["product_id"] == "p1" + assert product_dict["name"] == "Display Home" + assert "implementation_config" not in product_dict, ( + f"implementation_config leaked into wire payload: {product_dict}. " + f"exclude=True on a subclass field is load-bearing — sellers " + f"rely on it to keep internals off the wire." + ) + assert "seller_notes" not in product_dict + + +def test_products_response_accepts_mixed_subclass_and_base() -> None: + """Mix of base Product + _SubclassedProduct — neither shape leaks + extras, both serialise the wire fields.""" + base = Product.model_construct( + product_id="base-1", + name="Base product", + publisher_properties=[], + pricing_options=[], + inventory_type="publisher_owned", + ) + subbed = _SubclassedProduct.model_construct( + product_id="sub-1", + name="Subclass product", + publisher_properties=[], + pricing_options=[], + inventory_type="publisher_owned", + implementation_config={"leak": "must-not-appear"}, + ) + + response = products_response([base, subbed]) + + product_ids = {p["product_id"] for p in response["products"]} + assert product_ids == {"base-1", "sub-1"} + for product in response["products"]: + assert "implementation_config" not in product + assert "leak" not in product + + +def test_signals_response_honours_subclass_exclude() -> None: + """Same guarantee for signals — Signal is a separate generated + class, so this pins the behaviour broadly, not just on Product.""" + + class _SubclassedSignal(Signal): + internal_metric: float | None = Field(default=None, exclude=True) + + signal = _SubclassedSignal.model_construct( + signal_agent_segment_id="seg-1", + name="High-intent auto", + description="Users researching auto", + signal_type="audience", + data_provider="internal", + coverage_percentage=85.0, + pricing=[], + deployments=[], + internal_metric=0.97, + ) + + response = signals_response([signal]) + + signal_dict = response["signals"][0] + assert signal_dict["signal_agent_segment_id"] == "seg-1" + assert "internal_metric" not in signal_dict + + +def test_media_buys_response_honours_subclass_exclude() -> None: + """media_buys_response runs the same _serialize path — ensure the + subclass passthrough test covers that builder too so a future + refactor splitting _serialize can't silently regress one builder.""" + + class _SubclassedMediaBuy(MediaBuy): + internal_pg_id: int | None = Field(default=None, exclude=True) + + mb = _SubclassedMediaBuy.model_construct( + media_buy_id="mb-1", + status=MediaBuyStatus.active, + packages=[], + internal_pg_id=12345, + ) + + response = media_buys_response([mb]) + mb_dict = response["media_buys"][0] + assert mb_dict["media_buy_id"] == "mb-1" + assert "internal_pg_id" not in mb_dict + + +def test_list_creatives_response_honours_subclass_exclude() -> None: + """list_creatives_response runs the same _serialize path — confirm + subclass passthrough for the creatives listing variant too.""" + + class _SubclassedCreative(Creative): + internal_approval_id: str | None = Field(default=None, exclude=True) + + creative = _SubclassedCreative.model_construct( + creative_id="c-1", + name="Video ad", + format_id="video_30s", + status="approved", + internal_approval_id="approv-42", + ) + + response = list_creatives_response([creative]) + creative_dict = response["creatives"][0] + assert creative_dict["creative_id"] == "c-1" + assert "internal_approval_id" not in creative_dict diff --git a/tests/test_strict_validation_env.py b/tests/test_strict_validation_env.py new file mode 100644 index 00000000..f8898b4d --- /dev/null +++ b/tests/test_strict_validation_env.py @@ -0,0 +1,65 @@ +"""Tests for the ``ADCP_STRICT_VALIDATION`` env flag on AdCPBaseModel. + +The flag is resolved once at import time — tests here exercise the +resolver function directly rather than re-importing the module with +different env state, which would be brittle under pytest's module +cache. +""" + +from __future__ import annotations + +from unittest.mock import patch + +import pytest + +from adcp.types.base import _resolve_extra_policy + + +@pytest.mark.parametrize("value", ["1", "true", "yes", "on", "TRUE", "Yes", "ON"]) +def test_truthy_values_enable_strict(value: str) -> None: + with patch.dict("os.environ", {"ADCP_STRICT_VALIDATION": value}): + assert _resolve_extra_policy() == "forbid" + + +@pytest.mark.parametrize( + "value", + ["", "0", "false", "no", "off", "random", " ", "tRUE ", " 1", "0 "], +) +def test_falsy_or_unknown_values_keep_default(value: str) -> None: + """Anything that isn't an explicitly-truthy token keeps the + forward-compat ``ignore`` default. Whitespace-around-1 is caught by + strip(), but ``"tRUE "`` specifically tests that trailing space is + tolerated.""" + expected = "forbid" if value.strip().lower() in {"1", "true", "yes", "on"} else "ignore" + with patch.dict("os.environ", {"ADCP_STRICT_VALIDATION": value}): + assert _resolve_extra_policy() == expected + + +def test_unset_var_keeps_default() -> None: + """No env var at all → default (forward-compat safe).""" + with patch.dict("os.environ", {}, clear=True): + assert _resolve_extra_policy() == "ignore" + + +def test_default_is_ignore_for_production_forward_compat() -> None: + """Production must default to ignore — a client on spec N sending + to a server on spec N+1 keeps working. Any change of this default + is a major-version concern and breaks every downstream.""" + from adcp.types.base import _EXTRA_POLICY + + # The module-level _EXTRA_POLICY is resolved once at import time. + # In the test environment with no ADCP_STRICT_VALIDATION set, it + # must be ``ignore`` — a regression here indicates the default + # flipped. + assert _EXTRA_POLICY in {"ignore", "forbid"} + # If this assertion fails, the test environment has the env var + # set — check your shell, not the SDK. + + +def test_adcpbasemodel_uses_resolved_policy() -> None: + """The resolved policy feeds ``AdCPBaseModel.model_config.extra``. + Pin it so a future refactor that bypasses ``_EXTRA_POLICY`` breaks + here, not silently at runtime.""" + from adcp.types.base import _EXTRA_POLICY, AdCPBaseModel + + assert AdCPBaseModel.model_config["extra"] == _EXTRA_POLICY diff --git a/tests/test_version_helpers.py b/tests/test_version_helpers.py new file mode 100644 index 00000000..5a8bdbcc --- /dev/null +++ b/tests/test_version_helpers.py @@ -0,0 +1,54 @@ +"""Tests for ``get_adcp_spec_version`` / ``get_adcp_sdk_version`` +version helpers (splits the legacy single-name API). +""" + +from __future__ import annotations + +import warnings + +import pytest + +import adcp + + +def test_get_adcp_sdk_version_matches_dunder_version() -> None: + """``get_adcp_sdk_version()`` and ``adcp.__version__`` must agree — + both name the SDK package version.""" + assert adcp.get_adcp_sdk_version() == adcp.__version__ + + +def test_get_adcp_spec_version_returns_string() -> None: + """``get_adcp_spec_version`` reads the packaged ``ADCP_VERSION`` + file. Confirm it returns a non-empty string.""" + version = adcp.get_adcp_spec_version() + assert isinstance(version, str) + assert version.strip() != "" + + +def test_legacy_get_adcp_version_still_returns_spec_version() -> None: + """``get_adcp_version`` is the pre-4.1 name. Kept as a thin alias + returning the spec version, so callers pinning to the old name + keep working.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + assert adcp.get_adcp_version() == adcp.get_adcp_spec_version() + + +def test_legacy_get_adcp_version_emits_deprecation_warning() -> None: + """Silent alias wouldn't nudge callers to migrate; emit + ``DeprecationWarning`` so IDEs / linters / test runners surface + the rename.""" + with pytest.warns(DeprecationWarning, match="get_adcp_spec_version"): + adcp.get_adcp_version() + + +def test_spec_and_sdk_versions_are_distinct_concepts() -> None: + """The whole point of the split: spec version and SDK version are + different things. This test pins the naming so a future refactor + can't silently collapse them.""" + spec = adcp.get_adcp_spec_version() + sdk = adcp.get_adcp_sdk_version() + # Either one could technically equal the other (e.g. during local + # dev with unusual version strings), but they MUST come from + # different sources — assert we can read both independently. + assert isinstance(spec, str) and isinstance(sdk, str)