From 7c832f2dbcf3e1849f104ec6c43c4446d22a5e2c Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:03:03 +0100 Subject: [PATCH 1/6] feat: add official Twitter API v2 auth mode --- README.md | 66 ++++- tests/test_api_client.py | 147 ++++++++++ tests/test_auth.py | 2 + tests/test_cli.py | 63 ++++ tests/test_timeutil.py | 17 ++ twitter_cli/api_client.py | 596 ++++++++++++++++++++++++++++++++++++++ twitter_cli/cli.py | 52 +++- twitter_cli/exceptions.py | 6 + twitter_cli/timeutil.py | 17 +- 9 files changed, 945 insertions(+), 21 deletions(-) create mode 100644 tests/test_api_client.py create mode 100644 twitter_cli/api_client.py diff --git a/README.md b/README.md index a110a17..b5da676 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ A terminal-first CLI for Twitter/X: read timelines, bookmarks, and user profiles **Auth & Anti-Detection:** - Cookie auth: use browser cookies or environment variables +- Official API v2 auth: opt into stable `--auth-mode api` with bearer/user access tokens - Full cookie forwarding: extracts ALL browser cookies for richer browser context - TLS fingerprint impersonation: `curl_cffi` with dynamic Chrome version matching - `x-client-transaction-id` header generation @@ -90,6 +91,9 @@ twitter feed -t following # Enable ranking filter explicitly twitter feed --filter + +# Use official API auth mode for supported commands +twitter --auth-mode api search "AI agent" --json ``` ### Usage @@ -169,13 +173,36 @@ twitter follow elonmusk --json ### Authentication -twitter-cli uses this auth priority: +twitter-cli supports two auth backends: -1. **Environment variables**: `TWITTER_AUTH_TOKEN` + `TWITTER_CT0` -2. **Browser cookies** (recommended): auto-extract from Arc/Chrome/Edge/Firefox/Brave +1. **Cookie auth**: `TWITTER_AUTH_TOKEN` + `TWITTER_CT0`, or browser cookies auto-extracted from Arc/Chrome/Edge/Firefox/Brave +2. **Official API v2 auth**: `TWITTER_API_BEARER_TOKEN` for read-only official endpoints, or `TWITTER_API_ACCESS_TOKEN` for OAuth 2.0 user-context commands Browser extraction is recommended — it forwards ALL Twitter cookies (not just `auth_token` + `ct0`) and aligns request headers with your local runtime, which is closer to normal browser traffic than minimal cookie auth. +Choose the backend with `--auth-mode auto|cookie|api` or `TWITTER_AUTH_MODE=auto|cookie|api`. + +**Official API mode setup:** + +```bash +# Read-only official API mode +export TWITTER_AUTH_MODE=api +export TWITTER_API_BEARER_TOKEN=... + +# OAuth 2.0 user-context mode (required for whoami/status/post/like/retweet/follow) +export TWITTER_API_ACCESS_TOKEN=... +# Optional: skip /users/me lookup for write actions +export TWITTER_API_USER_ID=... +``` + +**Official API mode currently supports:** +- Read: `user`, `user-posts`, `search`, `followers`, `following`, `status`, `whoami` +- Write: `post`, `reply`, `quote`, `delete`, `like`, `unlike`, `retweet`, `unretweet`, `follow`, `unfollow` + +**Official API mode does not support yet:** +- `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `likes`, `bookmark`, `unbookmark` +- Image upload in `post` / `reply` / `quote` + **Chrome multi-profile**: All Chrome profiles are scanned automatically. To specify a profile: ```bash @@ -396,6 +423,7 @@ git clone git@github.com:jackwener/twitter-cli.git .agents/skills/twitter-cli **认证与反风控:** - Cookie 认证:支持环境变量和浏览器自动提取 +- 官方 API v2 认证:支持通过 `--auth-mode api` 切到稳定的官方令牌模式 - 完整 Cookie 转发:提取浏览器中所有 Twitter Cookie,保留更多浏览器上下文 - TLS 指纹伪装:`curl_cffi` 动态匹配 Chrome 版本 - `x-client-transaction-id` 请求头生成 @@ -428,6 +456,9 @@ twitter feed -t following twitter feed --filter twitter feed --full-text +# 官方 API 模式 +twitter --auth-mode api search "AI agent" --json + # 收藏 twitter bookmarks twitter bookmarks --full-text @@ -490,13 +521,36 @@ twitter follow elonmusk --json ### 认证说明 -认证优先级: +twitter-cli 现在支持两套认证后端: -1. **环境变量**:`TWITTER_AUTH_TOKEN` + `TWITTER_CT0` -2. **浏览器提取**(推荐):Arc/Chrome/Edge/Firefox/Brave 全量 Cookie 提取 +1. **Cookie 认证**:`TWITTER_AUTH_TOKEN` + `TWITTER_CT0`,或从 Arc/Chrome/Edge/Firefox/Brave 自动提取浏览器 Cookie +2. **官方 API v2 认证**:`TWITTER_API_BEARER_TOKEN` 用于官方只读接口;`TWITTER_API_ACCESS_TOKEN` 用于 OAuth 2.0 user context 写操作/当前用户接口 推荐使用浏览器提取方式,会转发所有 Twitter Cookie,并按本机运行环境生成语言和平台请求头;它比仅发送 `auth_token` + `ct0` 更接近普通浏览器流量,但不等于完整浏览器自动化。 +可通过 `--auth-mode auto|cookie|api` 或 `TWITTER_AUTH_MODE=auto|cookie|api` 显式选择后端。 + +**官方 API 模式配置:** + +```bash +# 官方只读模式 +export TWITTER_AUTH_MODE=api +export TWITTER_API_BEARER_TOKEN=... + +# OAuth 2.0 user context(whoami/status/post/like/retweet/follow 等需要) +export TWITTER_API_ACCESS_TOKEN=... +# 可选:避免写操作前额外请求 /users/me +export TWITTER_API_USER_ID=... +``` + +**官方 API 模式当前支持:** +- 读取:`user`、`user-posts`、`search`、`followers`、`following`、`status`、`whoami` +- 写入:`post`、`reply`、`quote`、`delete`、`like`、`unlike`、`retweet`、`unretweet`、`follow`、`unfollow` + +**官方 API 模式暂不支持:** +- `feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`likes`、`bookmark`、`unbookmark` +- `post` / `reply` / `quote` 的图片上传 + **Chrome 多 Profile 支持**:会自动遍历所有 Chrome profile。也可以通过环境变量指定: ```bash diff --git a/tests/test_api_client.py b/tests/test_api_client.py new file mode 100644 index 0000000..2b980a3 --- /dev/null +++ b/tests/test_api_client.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import json + +import pytest + +from twitter_cli.api_client import TwitterAPIv2Client +from twitter_cli.exceptions import AuthenticationError, NotFoundError + + +class DummyResponse: + def __init__(self, status_code: int, payload: dict) -> None: + self.status_code = status_code + self._payload = payload + self.text = json.dumps(payload) + + def json(self) -> dict: + return self._payload + + +class DummySession: + def __init__(self, responses: list[DummyResponse]) -> None: + self._responses = responses + self.calls: list[tuple[str, str, dict | None, str | None, dict]] = [] + + def get(self, url: str, headers=None, params=None, timeout=None): + self.calls.append(("GET", url, params, None, headers or {})) + return self._responses.pop(0) + + def post(self, url: str, headers=None, params=None, data=None, timeout=None): + self.calls.append(("POST", url, params, data, headers or {})) + return self._responses.pop(0) + + def delete(self, url: str, headers=None, params=None, timeout=None): + self.calls.append(("DELETE", url, params, None, headers or {})) + return self._responses.pop(0) + + +def test_api_client_fetch_search_parses_expansions(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [ + { + "id": "1", + "text": "hello world", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "lang": "en", + "public_metrics": { + "like_count": 1, + "retweet_count": 2, + "reply_count": 3, + "quote_count": 4, + }, + "entities": { + "urls": [{"expanded_url": "https://example.com"}], + }, + "attachments": {"media_keys": ["m1"]}, + "referenced_tweets": [{"type": "quoted", "id": "2"}], + } + ], + "includes": { + "users": [ + {"id": "u1", "name": "Alice", "username": "alice", "verified": True}, + {"id": "u2", "name": "Bob", "username": "bob"}, + ], + "media": [ + { + "media_key": "m1", + "type": "photo", + "url": "https://img.example/photo.jpg", + "width": 800, + "height": 600, + } + ], + "tweets": [ + { + "id": "2", + "text": "quoted tweet", + "author_id": "u2", + "created_at": "2026-03-07T12:00:00.000Z", + "public_metrics": {}, + } + ], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_search("python", count=1, product="Photos") + + assert len(tweets) == 1 + assert tweets[0].author.screen_name == "alice" + assert tweets[0].media[0].url == "https://img.example/photo.jpg" + assert tweets[0].quoted_tweet is not None + assert tweets[0].quoted_tweet.author.screen_name == "bob" + assert tweets[0].created_at == "2026-03-08T12:00:00.000Z" + assert session.calls[0][0] == "GET" + assert session.calls[0][2]["query"] == "python has:images" + assert session.calls[0][2]["sort_order"] == "recency" + + +def test_api_client_fetch_me_requires_user_context(monkeypatch) -> None: + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + + with pytest.raises(AuthenticationError): + client.fetch_me() + + +def test_api_client_create_tweet_uses_access_token(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession([DummyResponse(200, {"data": {"id": "123", "text": "hi"}})]) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweet_id = client.create_tweet("hi") + + assert tweet_id == "123" + method, url, _params, data, headers = session.calls[0] + assert method == "POST" + assert url.endswith("/tweets") + assert headers["Authorization"] == "Bearer access-token" + assert json.loads(data) == {"text": "hi"} + + +def test_api_client_fetch_user_404_maps_to_not_found(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + session = DummySession([DummyResponse(404, {"title": "Not Found Error", "detail": "User not found"})]) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + + with pytest.raises(NotFoundError, match="User not found"): + client.fetch_user("missing") diff --git a/tests/test_auth.py b/tests/test_auth.py index 8010c08..9a13f47 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -109,6 +109,7 @@ def __init__(self, domain: str, name: str, value: str) -> None: brave=lambda: pytest.fail("brave should not be used when arc succeeds"), ) monkeypatch.setitem(sys.modules, "browser_cookie3", fake_module) + monkeypatch.setattr(auth, "_iter_chrome_cookie_files", lambda browser_name: []) cookies, diagnostics = auth._extract_in_process() @@ -407,6 +408,7 @@ class BrowserError(Exception): brave=lambda: (_ for _ in ()).throw(BrowserError("Brave not found")), ) monkeypatch.setitem(sys.modules, "browser_cookie3", fake_module) + monkeypatch.setattr(auth, "_iter_chrome_cookie_files", lambda browser_name: []) cookies, diagnostics = auth._extract_in_process() diff --git a/tests/test_cli.py b/tests/test_cli.py index 1e7e928..f7c114e 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -6,6 +6,7 @@ from click.testing import CliRunner import pytest from rich.console import Console +from twitter_cli.exceptions import UnsupportedFeatureError import yaml from twitter_cli.cli import cli @@ -274,6 +275,68 @@ def fetch_me(self) -> UserProfile: assert payload["data"]["user"]["username"] == "testuser" +def test_cli_status_explicit_api_auth_mode(monkeypatch) -> None: + class FakeAPIClient: + def __init__(self, rate_limit_config=None) -> None: + assert isinstance(rate_limit_config, dict) + + def fetch_me(self) -> UserProfile: + return UserProfile(id="42", name="API User", screen_name="apiuser") + + monkeypatch.setattr("twitter_cli.cli.TwitterAPIv2Client", FakeAPIClient) + runner = CliRunner() + + result = runner.invoke(cli, ["--auth-mode", "api", "status", "--json"]) + + assert result.exit_code == 0 + payload = yaml.safe_load(result.output) + assert payload["ok"] is True + assert payload["data"]["user"]["username"] == "apiuser" + + +def test_cli_auto_auth_prefers_api_when_credentials_present(monkeypatch) -> None: + class FakeAPIClient: + def __init__(self, rate_limit_config=None) -> None: + pass + + def fetch_me(self) -> UserProfile: + return UserProfile(id="42", name="Auto API", screen_name="autoapi") + + monkeypatch.setattr("twitter_cli.cli.has_api_credentials", lambda: True) + monkeypatch.setattr("twitter_cli.cli.TwitterAPIv2Client", FakeAPIClient) + monkeypatch.setattr( + "twitter_cli.cli.get_cookies", + lambda: pytest.fail("cookie auth should not be used when API credentials are present"), + ) + runner = CliRunner() + + result = runner.invoke(cli, ["status", "--json"]) + + assert result.exit_code == 0 + payload = yaml.safe_load(result.output) + assert payload["data"]["user"]["username"] == "autoapi" + + +def test_cli_api_mode_unsupported_feed_returns_structured_error(monkeypatch) -> None: + class FakeAPIClient: + def __init__(self, rate_limit_config=None) -> None: + pass + + def fetch_home_timeline(self, count: int): + raise UnsupportedFeatureError("feed unsupported in api mode") + + monkeypatch.setattr("twitter_cli.cli.TwitterAPIv2Client", FakeAPIClient) + monkeypatch.setattr("twitter_cli.cli.load_config", lambda: {"fetch": {"count": 5}, "filter": {}, "rateLimit": {}}) + runner = CliRunner() + + result = runner.invoke(cli, ["--auth-mode", "api", "feed", "--json"]) + + assert result.exit_code == 1 + payload = yaml.safe_load(result.output) + assert payload["ok"] is False + assert payload["error"]["code"] == "unsupported_operation" + + def test_cli_whoami_auto_yaml(monkeypatch) -> None: class FakeClient: def fetch_me(self) -> UserProfile: diff --git a/tests/test_timeutil.py b/tests/test_timeutil.py index 49a42f8..ffd6381 100644 --- a/tests/test_timeutil.py +++ b/tests/test_timeutil.py @@ -6,6 +6,7 @@ SAMPLE_TIMESTAMP = "Sat Mar 08 12:00:00 +0000 2026" +ISO_TIMESTAMP = "2026-03-08T12:00:00.000Z" # ── format_local_time ──────────────────────────────────────────────────── @@ -26,6 +27,12 @@ def test_format_local_time_invalid() -> None: assert format_local_time("not a date") == "not a date" +def test_format_local_time_iso8601() -> None: + result = format_local_time(ISO_TIMESTAMP) + assert result.startswith("2026-03-") + assert ":" in result + + # ── format_relative_time ───────────────────────────────────────────────── @@ -45,6 +52,10 @@ def test_format_relative_time_invalid() -> None: assert format_relative_time("garbage") == "garbage" +def test_format_relative_time_iso_future() -> None: + assert format_relative_time("2999-01-01T00:00:00Z") == "just now" + + # ── format_iso8601 ─────────────────────────────────────────────────────── @@ -62,6 +73,12 @@ def test_format_iso8601_invalid() -> None: assert format_iso8601("not a date") == "not a date" +def test_format_iso8601_keeps_iso_timestamp_parseable() -> None: + result = format_iso8601(ISO_TIMESTAMP) + assert result.startswith("2026-03-08T12:00:00") + assert "+00:00" in result + + def test_format_iso8601_roundtrip() -> None: """ISO 8601 output should be parseable by datetime.fromisoformat.""" from datetime import datetime diff --git a/twitter_cli/api_client.py b/twitter_cli/api_client.py new file mode 100644 index 0000000..2caaf9c --- /dev/null +++ b/twitter_cli/api_client.py @@ -0,0 +1,596 @@ +"""Official X/Twitter API v2 client.""" + +from __future__ import annotations + +import json +import logging +import os +import time +import urllib.parse +from typing import Any, Dict, List, Optional + +from curl_cffi import requests as _cffi_requests + +from .exceptions import ( + AuthenticationError, + NetworkError, + NotFoundError, + TwitterAPIError, + UnsupportedFeatureError, +) +from .models import Author, Metrics, Tweet, TweetMedia, UserProfile + +logger = logging.getLogger(__name__) + +_API_BASE_URL = "https://api.x.com/2" +_ABSOLUTE_MAX_COUNT = 500 +_USER_FIELDS = "created_at,description,entities,location,profile_image_url,public_metrics,verified" +_TWEET_FIELDS = "attachments,author_id,created_at,entities,lang,public_metrics,referenced_tweets" +_MEDIA_FIELDS = "media_key,preview_image_url,type,url,width,height" +_TWEET_EXPANSIONS = "author_id,attachments.media_keys,referenced_tweets.id,referenced_tweets.id.author_id" +_COOKIE_HINT = ( + "Use --auth-mode cookie for home feed, bookmarks, tweet detail, article, list timeline, " + "or media upload commands." +) +_api_session: Any = None + + +def has_api_credentials() -> bool: + """Return True when official API credentials are configured.""" + return bool( + os.environ.get("TWITTER_API_ACCESS_TOKEN", "").strip() + or os.environ.get("TWITTER_API_BEARER_TOKEN", "").strip() + ) + + +def _get_api_session() -> Any: + """Return a shared session for official API requests.""" + global _api_session + if _api_session is None: + proxy = os.environ.get("TWITTER_PROXY", "").strip() + _api_session = _cffi_requests.Session( + proxies={"https": proxy, "http": proxy} if proxy else None, + ) + return _api_session + + +class TwitterAPIv2Client: + """Official X/Twitter API v2 client for a supported subset of commands.""" + + def __init__(self, rate_limit_config: Optional[Dict[str, Any]] = None) -> None: + self._access_token = os.environ.get("TWITTER_API_ACCESS_TOKEN", "").strip() + self._bearer_token = os.environ.get("TWITTER_API_BEARER_TOKEN", "").strip() + self._configured_user_id = os.environ.get("TWITTER_API_USER_ID", "").strip() + if not self._access_token and not self._bearer_token: + raise AuthenticationError( + "Official API mode requires TWITTER_API_ACCESS_TOKEN or TWITTER_API_BEARER_TOKEN." + ) + + rl = rate_limit_config or {} + self._request_delay = float(rl.get("requestDelay", 2.5)) + self._max_retries = int(rl.get("maxRetries", 3)) + self._retry_base_delay = float(rl.get("retryBaseDelay", 5.0)) + self._max_count = min(int(rl.get("maxCount", 200)), _ABSOLUTE_MAX_COUNT) + self._me_cache: Optional[UserProfile] = None + + # ── Read operations ────────────────────────────────────────────── + + def fetch_user(self, screen_name: str) -> UserProfile: + data = self._api_request( + "GET", + "/users/by/username/%s" % urllib.parse.quote(screen_name), + params={"user.fields": _USER_FIELDS}, + ) + user = data.get("data") + if not isinstance(user, dict): + raise NotFoundError("User @%s not found" % screen_name) + return self._parse_user(user) + + def fetch_me(self) -> UserProfile: + if self._me_cache is not None: + return self._me_cache + data = self._api_request( + "GET", + "/users/me", + params={"user.fields": _USER_FIELDS}, + require_user_context=True, + ) + user = data.get("data") + if not isinstance(user, dict): + raise TwitterAPIError(0, "Failed to fetch current user info") + self._me_cache = self._parse_user(user) + return self._me_cache + + def resolve_user_id(self, identifier: str) -> str: + if identifier.isdigit(): + return identifier + return self.fetch_user(identifier).id + + def fetch_user_tweets(self, user_id: str, count: int = 20) -> List[Tweet]: + return self._paginate_tweets( + "/users/%s/tweets" % user_id, + count, + { + "exclude": "replies", + "tweet.fields": _TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) + + def fetch_search(self, query: str, count: int = 20, product: str = "Top") -> List[Tweet]: + search_query = query + sort_order = "relevancy" + normalized_product = (product or "Top").strip().lower() + if normalized_product == "latest": + sort_order = "recency" + elif normalized_product == "photos": + sort_order = "recency" + search_query = "%s has:images" % query + elif normalized_product == "videos": + sort_order = "recency" + search_query = "%s has:videos" % query + + return self._paginate_tweets( + "/tweets/search/recent", + count, + { + "query": search_query, + "sort_order": sort_order, + "tweet.fields": _TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) + + def fetch_followers(self, user_id: str, count: int = 20) -> List[UserProfile]: + return self._paginate_users( + "/users/%s/followers" % user_id, + count, + {"user.fields": _USER_FIELDS}, + ) + + def fetch_following(self, user_id: str, count: int = 20) -> List[UserProfile]: + return self._paginate_users( + "/users/%s/following" % user_id, + count, + {"user.fields": _USER_FIELDS}, + ) + + # ── Write operations ───────────────────────────────────────────── + + def create_tweet( + self, + text: str, + reply_to_id: Optional[str] = None, + media_ids: Optional[List[str]] = None, + ) -> str: + if media_ids: + raise UnsupportedFeatureError( + "Official API mode does not support media upload yet. %s" % _COOKIE_HINT + ) + body: Dict[str, Any] = {"text": text} + if reply_to_id: + body["reply"] = {"in_reply_to_tweet_id": reply_to_id} + data = self._api_request("POST", "/tweets", json_body=body, require_user_context=True) + created = data.get("data") or {} + tweet_id = str(created.get("id") or "") + if not tweet_id: + raise TwitterAPIError(0, "Failed to create tweet") + self._write_delay() + return tweet_id + + def quote_tweet(self, tweet_id: str, text: str, media_ids: Optional[List[str]] = None) -> str: + if media_ids: + raise UnsupportedFeatureError( + "Official API mode does not support media upload yet. %s" % _COOKIE_HINT + ) + data = self._api_request( + "POST", + "/tweets", + json_body={"text": text, "quote_tweet_id": tweet_id}, + require_user_context=True, + ) + created = data.get("data") or {} + created_id = str(created.get("id") or "") + if not created_id: + raise TwitterAPIError(0, "Failed to create quote tweet") + self._write_delay() + return created_id + + def delete_tweet(self, tweet_id: str) -> bool: + self._api_request("DELETE", "/tweets/%s" % tweet_id, require_user_context=True) + self._write_delay() + return True + + def like_tweet(self, tweet_id: str) -> bool: + self._api_request( + "POST", + "/users/%s/likes" % self._authenticated_user_id(), + json_body={"tweet_id": tweet_id}, + require_user_context=True, + ) + self._write_delay() + return True + + def unlike_tweet(self, tweet_id: str) -> bool: + self._api_request( + "DELETE", + "/users/%s/likes/%s" % (self._authenticated_user_id(), tweet_id), + require_user_context=True, + ) + self._write_delay() + return True + + def retweet(self, tweet_id: str) -> bool: + self._api_request( + "POST", + "/users/%s/retweets" % self._authenticated_user_id(), + json_body={"tweet_id": tweet_id}, + require_user_context=True, + ) + self._write_delay() + return True + + def unretweet(self, tweet_id: str) -> bool: + self._api_request( + "DELETE", + "/users/%s/retweets/%s" % (self._authenticated_user_id(), tweet_id), + require_user_context=True, + ) + self._write_delay() + return True + + def follow_user(self, user_id: str) -> bool: + self._api_request( + "POST", + "/users/%s/following" % self._authenticated_user_id(), + json_body={"target_user_id": user_id}, + require_user_context=True, + ) + self._write_delay() + return True + + def unfollow_user(self, user_id: str) -> bool: + self._api_request( + "DELETE", + "/users/%s/following/%s" % (self._authenticated_user_id(), user_id), + require_user_context=True, + ) + self._write_delay() + return True + + # ── Unsupported cookie-only operations ─────────────────────────── + + def fetch_home_timeline(self, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError("Official API mode does not expose the home timeline. %s" % _COOKIE_HINT) + + def fetch_following_feed(self, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError( + "Official API mode does not expose the following feed timeline. %s" % _COOKIE_HINT + ) + + def fetch_bookmarks(self, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError("Official API mode does not expose bookmarks. %s" % _COOKIE_HINT) + + def fetch_user_likes(self, user_id: str, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError( + "Official API mode does not support the likes timeline command yet. %s" % _COOKIE_HINT + ) + + def fetch_tweet_detail(self, tweet_id: str, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError( + "Official API mode does not support tweet detail plus replies yet. %s" % _COOKIE_HINT + ) + + def fetch_article(self, tweet_id: str) -> Tweet: + raise UnsupportedFeatureError("Official API mode does not support Twitter Articles yet. %s" % _COOKIE_HINT) + + def fetch_list_timeline(self, list_id: str, count: int = 20) -> List[Tweet]: + raise UnsupportedFeatureError("Official API mode does not support list timelines yet. %s" % _COOKIE_HINT) + + def bookmark_tweet(self, tweet_id: str) -> bool: + raise UnsupportedFeatureError( + "Official API mode does not expose bookmark write endpoints. %s" % _COOKIE_HINT + ) + + def unbookmark_tweet(self, tweet_id: str) -> bool: + raise UnsupportedFeatureError( + "Official API mode does not expose bookmark write endpoints. %s" % _COOKIE_HINT + ) + + def upload_media(self, path: str) -> str: + raise UnsupportedFeatureError( + "Official API mode does not support media upload yet. %s" % _COOKIE_HINT + ) + + # ── Internals ──────────────────────────────────────────────────── + + def _authenticated_user_id(self) -> str: + if self._configured_user_id: + return self._configured_user_id + return self.fetch_me().id + + def _paginate_tweets(self, path: str, count: int, params: Dict[str, Any]) -> List[Tweet]: + if count <= 0: + return [] + count = min(count, self._max_count) + tweets: List[Tweet] = [] + seen_ids = set() + next_token: Optional[str] = None + + while len(tweets) < count: + page_params = dict(params) + page_params["max_results"] = max(10, min(100, count - len(tweets))) + if next_token: + page_params["pagination_token"] = next_token + + data = self._api_request("GET", path, params=page_params) + page_items = data.get("data") if isinstance(data.get("data"), list) else [] + includes = data.get("includes") if isinstance(data.get("includes"), dict) else {} + for tweet in self._parse_tweets(page_items, includes): + if tweet.id and tweet.id not in seen_ids: + seen_ids.add(tweet.id) + tweets.append(tweet) + if len(tweets) >= count: + break + + meta = data.get("meta") or {} + next_token = str(meta.get("next_token") or "") + if not next_token or len(tweets) >= count: + break + self._sleep_between_pages() + + return tweets[:count] + + def _paginate_users(self, path: str, count: int, params: Dict[str, Any]) -> List[UserProfile]: + if count <= 0: + return [] + count = min(count, self._max_count) + users: List[UserProfile] = [] + seen_ids = set() + next_token: Optional[str] = None + + while len(users) < count: + page_params = dict(params) + page_params["max_results"] = max(10, min(100, count - len(users))) + if next_token: + page_params["pagination_token"] = next_token + + data = self._api_request("GET", path, params=page_params) + items = data.get("data") if isinstance(data.get("data"), list) else [] + for item in items: + if not isinstance(item, dict): + continue + profile = self._parse_user(item) + if profile.id and profile.id not in seen_ids: + seen_ids.add(profile.id) + users.append(profile) + if len(users) >= count: + break + + meta = data.get("meta") or {} + next_token = str(meta.get("next_token") or "") + if not next_token or len(users) >= count: + break + self._sleep_between_pages() + + return users[:count] + + def _sleep_between_pages(self) -> None: + if self._request_delay > 0: + time.sleep(self._request_delay) + + def _write_delay(self) -> None: + if self._request_delay > 0: + time.sleep(min(self._request_delay, 2.0)) + + def _api_request( + self, + method: str, + path: str, + *, + params: Optional[Dict[str, Any]] = None, + json_body: Optional[Dict[str, Any]] = None, + require_user_context: bool = False, + ) -> Dict[str, Any]: + token = self._access_token if require_user_context else (self._access_token or self._bearer_token) + if require_user_context and not token: + raise AuthenticationError( + "Official API user-context commands require TWITTER_API_ACCESS_TOKEN." + ) + if not token: + raise AuthenticationError( + "Official API mode requires TWITTER_API_ACCESS_TOKEN or TWITTER_API_BEARER_TOKEN." + ) + + headers = { + "Authorization": "Bearer %s" % token, + "Accept": "application/json", + "User-Agent": "twitter-cli", + } + if json_body is not None: + headers["Content-Type"] = "application/json" + + url = "%s%s" % (_API_BASE_URL, path) + body = json.dumps(json_body) if json_body is not None else None + session = _get_api_session() + last_error: Optional[Exception] = None + + for attempt in range(max(self._max_retries, 1)): + try: + if method == "GET": + response = session.get(url, headers=headers, params=params, timeout=30) + elif method == "POST": + response = session.post(url, headers=headers, params=params, data=body, timeout=30) + elif method == "DELETE": + response = session.delete(url, headers=headers, params=params, timeout=30) + else: + raise RuntimeError("Unsupported HTTP method: %s" % method) + except Exception as exc: + last_error = exc + if attempt + 1 >= max(self._max_retries, 1): + break + time.sleep(self._retry_base_delay * (attempt + 1)) + continue + + payload = self._safe_json(response) + if response.status_code < 400: + if isinstance(payload, dict): + return payload + raise NetworkError("Official API returned a non-JSON response") + + message = self._extract_error_message(payload, response.text) + if response.status_code == 404: + raise NotFoundError(message) + if response.status_code == 429 and attempt + 1 < max(self._max_retries, 1): + time.sleep(self._retry_base_delay * (attempt + 1)) + continue + raise TwitterAPIError(response.status_code, message) + + raise NetworkError("Official API request failed: %s" % last_error) + + def _safe_json(self, response: Any) -> Any: + try: + return response.json() + except Exception: + return None + + def _extract_error_message(self, payload: Any, fallback_text: str) -> str: + if isinstance(payload, dict): + errors = payload.get("errors") + if isinstance(errors, list) and errors: + first = errors[0] + if isinstance(first, dict): + detail = first.get("detail") or first.get("message") or first.get("title") + if detail: + return str(detail) + title = payload.get("title") + detail = payload.get("detail") + if title and detail: + return "%s: %s" % (title, detail) + if detail: + return str(detail) + if title: + return str(title) + return fallback_text or "Official API request failed" + + def _parse_user(self, user: Dict[str, Any]) -> UserProfile: + metrics = user.get("public_metrics") or {} + entities = user.get("entities") or {} + url_entity = entities.get("url") or {} + urls = url_entity.get("urls") or [] + expanded_url = "" + if urls and isinstance(urls[0], dict): + expanded_url = str(urls[0].get("expanded_url") or urls[0].get("url") or "") + + return UserProfile( + id=str(user.get("id") or ""), + name=str(user.get("name") or ""), + screen_name=str(user.get("username") or ""), + bio=str(user.get("description") or ""), + location=str(user.get("location") or ""), + url=expanded_url, + followers_count=int(metrics.get("followers_count") or 0), + following_count=int(metrics.get("following_count") or 0), + tweets_count=int(metrics.get("tweet_count") or 0), + likes_count=int(metrics.get("like_count") or 0), + verified=bool(user.get("verified", False)), + profile_image_url=str(user.get("profile_image_url") or ""), + created_at=str(user.get("created_at") or ""), + ) + + def _parse_tweets(self, data: List[Any], includes: Dict[str, Any]) -> List[Tweet]: + user_map = { + str(user.get("id")): user + for user in includes.get("users", []) + if isinstance(user, dict) and user.get("id") + } + media_map = { + str(media.get("media_key")): media + for media in includes.get("media", []) + if isinstance(media, dict) and media.get("media_key") + } + tweet_map = { + str(tweet.get("id")): tweet + for tweet in includes.get("tweets", []) + if isinstance(tweet, dict) and tweet.get("id") + } + return [ + self._parse_tweet(tweet, user_map, media_map, tweet_map) + for tweet in data + if isinstance(tweet, dict) + ] + + def _parse_tweet( + self, + tweet: Dict[str, Any], + user_map: Dict[str, Dict[str, Any]], + media_map: Dict[str, Dict[str, Any]], + tweet_map: Dict[str, Dict[str, Any]], + ) -> Tweet: + author_data = user_map.get(str(tweet.get("author_id")), {}) + metrics = tweet.get("public_metrics") or {} + attachments = tweet.get("attachments") or {} + entities = tweet.get("entities") or {} + media_items: List[TweetMedia] = [] + for media_key in attachments.get("media_keys") or []: + media = media_map.get(str(media_key)) + if not media: + continue + media_items.append( + TweetMedia( + type=str(media.get("type") or ""), + url=str(media.get("url") or media.get("preview_image_url") or ""), + width=int(media.get("width")) if media.get("width") is not None else None, + height=int(media.get("height")) if media.get("height") is not None else None, + ) + ) + + urls: List[str] = [] + for item in entities.get("urls") or []: + if not isinstance(item, dict): + continue + expanded = item.get("expanded_url") or item.get("unwound_url") or item.get("url") + if expanded: + urls.append(str(expanded)) + + quoted_tweet = None + is_retweet = False + for ref in tweet.get("referenced_tweets") or []: + if not isinstance(ref, dict): + continue + ref_type = str(ref.get("type") or "") + ref_id = str(ref.get("id") or "") + if ref_type == "quoted" and ref_id in tweet_map: + quoted_tweet = self._parse_tweet(tweet_map[ref_id], user_map, media_map, {}) + if ref_type == "retweeted": + is_retweet = True + + return Tweet( + id=str(tweet.get("id") or ""), + text=str(tweet.get("text") or ""), + author=Author( + id=str(author_data.get("id") or tweet.get("author_id") or ""), + name=str(author_data.get("name") or ""), + screen_name=str(author_data.get("username") or ""), + profile_image_url=str(author_data.get("profile_image_url") or ""), + verified=bool(author_data.get("verified", False)), + ), + metrics=Metrics( + likes=int(metrics.get("like_count") or 0), + retweets=int(metrics.get("retweet_count") or 0), + replies=int(metrics.get("reply_count") or 0), + quotes=int(metrics.get("quote_count") or 0), + views=int(metrics.get("impression_count") or 0), + bookmarks=int(metrics.get("bookmark_count") or 0), + ), + created_at=str(tweet.get("created_at") or ""), + media=media_items, + urls=urls, + is_retweet=is_retweet, + lang=str(tweet.get("lang") or ""), + quoted_tweet=quoted_tweet, + ) diff --git a/twitter_cli/cli.py b/twitter_cli/cli.py index fac7a55..d1b9cc6 100644 --- a/twitter_cli/cli.py +++ b/twitter_cli/cli.py @@ -31,6 +31,7 @@ from __future__ import annotations import logging +import os import re import sys import time @@ -42,6 +43,7 @@ from rich.console import Console from . import __version__ +from .api_client import TwitterAPIv2Client, has_api_credentials from .auth import get_cookies from .cache import resolve_cached_tweet, save_tweet_cache from .exceptions import TwitterError @@ -84,7 +86,7 @@ OptionalPath = Optional[str] StructuredMode = Optional[str] WritePayload = Dict[str, Any] -WriteOperation = Callable[[TwitterClient], WritePayload] +WriteOperation = Callable[[Any], WritePayload] logger = logging.getLogger(__name__) console = Console(stderr=True) @@ -92,6 +94,7 @@ SEARCH_PRODUCTS = ["Top", "Latest", "Photos", "Videos"] SEARCH_HAS_CHOICES = ["links", "images", "videos", "media"] SEARCH_EXCLUDE_CHOICES = ["retweets", "replies", "links"] +AUTH_MODES = ["auto", "cookie", "api"] def _agent_user_profile(profile: UserProfile) -> dict: @@ -139,13 +142,30 @@ def _load_tweets_from_json(path): raise RuntimeError("Invalid tweet JSON file %s: %s" % (path, exc)) +def _resolve_auth_mode() -> str: + ctx = click.get_current_context(silent=True) + if ctx is not None and ctx.obj and ctx.obj.get("auth_mode"): + return str(ctx.obj["auth_mode"]) + env_mode = os.environ.get("TWITTER_AUTH_MODE", "auto").strip().lower() + return env_mode if env_mode in AUTH_MODES else "auto" + + def _get_client(config=None, quiet=False): - # type: (Optional[Dict[str, Any]], bool) -> TwitterClient + # type: (Optional[Dict[str, Any]], bool) -> Any """Create an authenticated API client.""" + mode = _resolve_auth_mode() + if mode == "auto": + mode = "api" if has_api_credentials() else "cookie" + + rate_limit_config = (config or {}).get("rateLimit") + if mode == "api": + if not quiet: + console.print("\n🔐 Using official X API auth...") + return TwitterAPIv2Client(rate_limit_config) + if not quiet: console.print("\n🔐 Getting Twitter cookies...") cookies = get_cookies() - rate_limit_config = (config or {}).get("rateLimit") return TwitterClient( cookies["auth_token"], cookies["ct0"], @@ -290,15 +310,23 @@ def _run_write_command( @click.group() @click.option("--verbose", "-v", is_flag=True, help="Enable debug logging.") @click.option("--compact", "-c", is_flag=True, help="Compact output (minimal fields, LLM-friendly).") +@click.option( + "--auth-mode", + type=click.Choice(AUTH_MODES, case_sensitive=False), + default=None, + help="Auth backend: auto, cookie, or api.", +) @click.version_option(version=__version__) @click.pass_context -def cli(ctx, verbose, compact): - # type: (Any, bool, bool) -> None +def cli(ctx, verbose, compact, auth_mode): + # type: (Any, bool, bool, Optional[str]) -> None """twitter — Twitter/X CLI tool 🐦""" ensure_utf8_streams() _setup_logging(verbose) ctx.ensure_object(dict) ctx.obj["compact"] = compact + resolved = (auth_mode or os.environ.get("TWITTER_AUTH_MODE", "auto")).strip().lower() + ctx.obj["auth_mode"] = resolved if resolved in AUTH_MODES else "auto" def _fetch_and_display(fetch_fn, label, emoji, max_count, as_json, as_yaml, output_file, do_filter, config=None, compact=False, full_text=False): @@ -888,7 +916,7 @@ def following(screen_name, max_count, as_json, as_yaml): def _upload_images(client, image_paths, rich_output=True): - # type: (TwitterClient, tuple, bool) -> list + # type: (Any, tuple, bool) -> list """Upload images and return list of media_id strings.""" if not image_paths: return [] @@ -910,7 +938,7 @@ def _write_action(emoji, action_desc, client_method, tweet_id, as_json=False, as """ action_name = action_desc.lower().replace(" ", "_") - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: getattr(client, client_method)(tweet_id) return {"success": True, "action": action_name, "id": tweet_id} @@ -942,7 +970,7 @@ def post(text, reply_to, images, as_json, as_yaml): action = "Replying to %s" % reply_to if reply_to else "Posting tweet" rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: media_ids = _upload_images(client, images, rich_output=rich_output) tweet_id = client.create_tweet(text, reply_to_id=reply_to, media_ids=media_ids or None) return {"success": True, "action": "post", "id": tweet_id, "url": "https://x.com/i/status/%s" % tweet_id} @@ -969,7 +997,7 @@ def reply_tweet(tweet_id, text, images, as_json, as_yaml): """Reply to a tweet. TWEET_ID is the tweet to reply to, TEXT is the reply content.""" tweet_id = _normalize_tweet_id(tweet_id) rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: media_ids = _upload_images(client, images, rich_output=rich_output) new_id = client.create_tweet(text, reply_to_id=tweet_id, media_ids=media_ids or None) return { @@ -1002,7 +1030,7 @@ def quote_tweet(tweet_id, text, images, as_json, as_yaml): """Quote-tweet a tweet. TWEET_ID is the tweet to quote, TEXT is the commentary.""" tweet_id = _normalize_tweet_id(tweet_id) rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: media_ids = _upload_images(client, images, rich_output=rich_output) new_id = client.quote_tweet(tweet_id, text, media_ids=media_ids or None) return { @@ -1080,7 +1108,7 @@ def follow_user(screen_name, as_json, as_yaml): """Follow a user. SCREEN_NAME is the @handle (without @).""" screen_name = screen_name.lstrip("@") - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: user_id = client.resolve_user_id(screen_name) client.follow_user(user_id) return {"success": True, "action": "follow", "screenName": screen_name, "userId": user_id} @@ -1103,7 +1131,7 @@ def unfollow_user(screen_name, as_json, as_yaml): """Unfollow a user. SCREEN_NAME is the @handle (without @).""" screen_name = screen_name.lstrip("@") - def operation(client: TwitterClient) -> WritePayload: + def operation(client: Any) -> WritePayload: user_id = client.resolve_user_id(screen_name) client.unfollow_user(user_id) return {"success": True, "action": "unfollow", "screenName": screen_name, "userId": user_id} diff --git a/twitter_cli/exceptions.py b/twitter_cli/exceptions.py index 75be7ac..abfbae7 100644 --- a/twitter_cli/exceptions.py +++ b/twitter_cli/exceptions.py @@ -60,6 +60,12 @@ class InvalidInputError(TwitterError): error_code = "invalid_input" +class UnsupportedFeatureError(TwitterError): + """Raised when the selected auth backend does not support a command yet.""" + + error_code = "unsupported_operation" + + class TwitterAPIError(TwitterError): """Raised on non-OK Twitter API responses with HTTP status + message.""" diff --git a/twitter_cli/timeutil.py b/twitter_cli/timeutil.py index 0df2b60..aa99f44 100644 --- a/twitter_cli/timeutil.py +++ b/twitter_cli/timeutil.py @@ -1,7 +1,7 @@ """Time formatting utilities for twitter-cli. -Converts Twitter API timestamps (e.g. "Sat Mar 08 12:00:00 +0000 2026") -into human-friendly local time and relative time strings. +Supports both legacy Twitter/X timestamps (e.g. "Sat Mar 08 12:00:00 +0000 2026") +and official API v2 ISO-8601 timestamps (e.g. "2026-03-08T12:00:00.000Z"). """ from __future__ import annotations @@ -17,14 +17,25 @@ def _parse_twitter_time(created_at: str) -> Optional[datetime]: - """Parse a Twitter API timestamp into a timezone-aware datetime.""" + """Parse legacy or ISO-8601 Twitter timestamps into a timezone-aware datetime.""" if not created_at: return None try: return datetime.strptime(created_at, _TWITTER_TIME_FORMAT) + except (ValueError, TypeError): + pass + + iso_candidate = created_at + if iso_candidate.endswith("Z"): + iso_candidate = iso_candidate[:-1] + "+00:00" + try: + dt = datetime.fromisoformat(iso_candidate) except (ValueError, TypeError): logger.debug("Failed to parse Twitter timestamp: %s", created_at) return None + if dt.tzinfo is None: + return dt.replace(tzinfo=timezone.utc) + return dt def format_local_time(created_at: str) -> str: From 27380b702c904a8b1aaa2985f87498d29d7ace83 Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:24:11 +0100 Subject: [PATCH 2/6] Add official API mode support for timelines and media --- README.md | 15 +- tests/test_api_client.py | 298 +++++++++++++++++++++++++++++++++++++ tests/test_cli.py | 35 ++++- twitter_cli/api_client.py | 302 +++++++++++++++++++++++++++++++++----- twitter_cli/cli.py | 3 +- 5 files changed, 600 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index b5da676..c1e4df8 100644 --- a/README.md +++ b/README.md @@ -189,19 +189,20 @@ Choose the backend with `--auth-mode auto|cookie|api` or `TWITTER_AUTH_MODE=auto export TWITTER_AUTH_MODE=api export TWITTER_API_BEARER_TOKEN=... -# OAuth 2.0 user-context mode (required for whoami/status/post/like/retweet/follow) +# OAuth 2.0 user-context mode (required for whoami/status/post/like/retweet/follow/bookmarks) export TWITTER_API_ACCESS_TOKEN=... # Optional: skip /users/me lookup for write actions export TWITTER_API_USER_ID=... ``` -**Official API mode currently supports:** -- Read: `user`, `user-posts`, `search`, `followers`, `following`, `status`, `whoami` -- Write: `post`, `reply`, `quote`, `delete`, `like`, `unlike`, `retweet`, `unretweet`, `follow`, `unfollow` +**twitter-cli API mode currently supports:** +- Read: `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `likes`, `search`, `user`, `user-posts`, `followers`, `following`, `status`, `whoami` +- Write: `post`, `reply`, `quote`, `delete`, `like`, `unlike`, `retweet`, `unretweet`, `follow`, `unfollow`, `bookmark`, `unbookmark` +- Media: image upload in `post` / `reply` / `quote` -**Official API mode does not support yet:** -- `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `likes`, `bookmark`, `unbookmark` -- Image upload in `post` / `reply` / `quote` +**API mode notes:** +- `feed` and `feed -t following` both use the official reverse-chronological home timeline endpoint exposed by the authenticated user's API access. +- `article` uses the official tweet lookup response and renders article metadata/content when the API returns article fields for that post. **Chrome multi-profile**: All Chrome profiles are scanned automatically. To specify a profile: diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 2b980a3..677bce5 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -136,6 +136,22 @@ def test_api_client_create_tweet_uses_access_token(monkeypatch) -> None: assert json.loads(data) == {"text": "hi"} +def test_api_client_create_tweet_supports_media_ids(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession([DummyResponse(200, {"data": {"id": "123", "text": "hi"}})]) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweet_id = client.create_tweet("hi", media_ids=["m1", "m2"]) + + assert tweet_id == "123" + assert json.loads(session.calls[0][3]) == { + "text": "hi", + "media": {"media_ids": ["m1", "m2"]}, + } + + def test_api_client_fetch_user_404_maps_to_not_found(monkeypatch) -> None: monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") session = DummySession([DummyResponse(404, {"title": "Not Found Error", "detail": "User not found"})]) @@ -145,3 +161,285 @@ def test_api_client_fetch_user_404_maps_to_not_found(monkeypatch) -> None: with pytest.raises(NotFoundError, match="User not found"): client.fetch_user("missing") + + +def test_api_client_fetch_home_timeline_uses_reverse_chronological_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse(200, {"data": {"id": "me", "name": "Alice", "username": "alice"}}), + DummyResponse( + 200, + { + "data": [ + { + "id": "10", + "text": "timeline post", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"like_count": 2}, + } + ], + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + "meta": {"result_count": 1}, + }, + ), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_home_timeline(count=1) + + assert [tweet.id for tweet in tweets] == ["10"] + assert session.calls[0][1].endswith("/users/me") + assert session.calls[1][1].endswith("/users/me/timelines/reverse_chronological") + + +def test_api_client_fetch_list_timeline_uses_lists_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [ + { + "id": "11", + "text": "from a list", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"like_count": 5}, + } + ], + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_list_timeline("123", count=1) + + assert [tweet.id for tweet in tweets] == ["11"] + assert session.calls[0][1].endswith("/lists/123/tweets") + + +def test_api_client_fetch_tweet_detail_uses_lookup_and_search(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": { + "id": "123", + "text": "root", + "author_id": "u1", + "conversation_id": "123", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"reply_count": 1}, + }, + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + }, + ), + DummyResponse( + 200, + { + "data": [ + { + "id": "123", + "text": "root", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"reply_count": 1}, + }, + { + "id": "124", + "text": "reply", + "author_id": "u2", + "created_at": "2026-03-08T12:05:00.000Z", + "public_metrics": {"like_count": 1}, + }, + ], + "includes": { + "users": [ + {"id": "u1", "name": "Alice", "username": "alice"}, + {"id": "u2", "name": "Bob", "username": "bob"}, + ], + }, + "meta": {"result_count": 2}, + }, + ), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_tweet_detail("123", count=5) + + assert [tweet.id for tweet in tweets] == ["123", "124"] + assert session.calls[0][1].endswith("/tweets/123") + assert session.calls[1][1].endswith("/tweets/search/recent") + assert session.calls[1][2]["query"] == "conversation_id:123" + + +def test_api_client_fetch_bookmarks_requires_user_context(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": {"id": "me", "name": "Alice", "username": "alice"}, + }, + ), + DummyResponse( + 200, + { + "data": [ + { + "id": "21", + "text": "saved post", + "author_id": "u2", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"bookmark_count": 3}, + } + ], + "includes": { + "users": [{"id": "u2", "name": "Bob", "username": "bob"}], + }, + "meta": {"result_count": 1}, + }, + ), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_bookmarks(count=1) + + assert [tweet.id for tweet in tweets] == ["21"] + assert session.calls[0][1].endswith("/users/me") + assert session.calls[1][1].endswith("/users/me/bookmarks") + assert session.calls[1][4]["Authorization"] == "Bearer access-token" + + +def test_api_client_fetch_article_parses_article_fields(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": { + "id": "55", + "text": "article teaser", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"like_count": 4}, + "article": {"title": "Title", "text": "Body text"}, + }, + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweet = client.fetch_article("55") + + assert tweet.id == "55" + assert tweet.article_title == "Title" + assert tweet.article_text == "Body text" + + +def test_api_client_fetch_user_likes_uses_liked_tweets_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [ + { + "id": "31", + "text": "liked post", + "author_id": "u3", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"like_count": 7}, + } + ], + "includes": { + "users": [{"id": "u3", "name": "Cara", "username": "cara"}], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_user_likes("42", count=1) + + assert [tweet.id for tweet in tweets] == ["31"] + assert session.calls[0][1].endswith("/users/42/liked_tweets") + + +def test_api_client_bookmark_write_endpoints_use_user_context(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse(200, {"data": {"id": "me", "name": "Alice", "username": "alice"}}), + DummyResponse(200, {"data": {"bookmarked": True}}), + DummyResponse(200, {"data": {"removed": True}}), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + + assert client.bookmark_tweet("99") is True + assert client.unbookmark_tweet("99") is True + assert session.calls[1][0] == "POST" + assert session.calls[1][1].endswith("/users/me/bookmarks") + assert json.loads(session.calls[1][3]) == {"tweet_id": "99"} + assert session.calls[2][0] == "DELETE" + assert session.calls[2][1].endswith("/users/me/bookmarks/99") + + +def test_api_client_upload_media_uses_v2_media_endpoint(monkeypatch, tmp_path) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + image_path = tmp_path / "image.png" + image_path.write_bytes(b"\x89PNG\r\n\x1a\n") + session = DummySession([DummyResponse(200, {"data": {"id": "m1"}})]) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + media_id = client.upload_media(str(image_path)) + + assert media_id == "m1" + assert session.calls[0][1].endswith("/media/upload") + payload = json.loads(session.calls[0][3]) + assert payload["media_category"] == "tweet_image" + assert payload["media_type"] == "image/png" diff --git a/tests/test_cli.py b/tests/test_cli.py index f7c114e..f8b11fe 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -6,7 +6,6 @@ from click.testing import CliRunner import pytest from rich.console import Console -from twitter_cli.exceptions import UnsupportedFeatureError import yaml from twitter_cli.cli import cli @@ -317,13 +316,14 @@ def fetch_me(self) -> UserProfile: assert payload["data"]["user"]["username"] == "autoapi" -def test_cli_api_mode_unsupported_feed_returns_structured_error(monkeypatch) -> None: +def test_cli_api_mode_feed_returns_structured_success(monkeypatch) -> None: class FakeAPIClient: def __init__(self, rate_limit_config=None) -> None: pass def fetch_home_timeline(self, count: int): - raise UnsupportedFeatureError("feed unsupported in api mode") + assert count == 5 + return [] monkeypatch.setattr("twitter_cli.cli.TwitterAPIv2Client", FakeAPIClient) monkeypatch.setattr("twitter_cli.cli.load_config", lambda: {"fetch": {"count": 5}, "filter": {}, "rateLimit": {}}) @@ -331,10 +331,10 @@ def fetch_home_timeline(self, count: int): result = runner.invoke(cli, ["--auth-mode", "api", "feed", "--json"]) - assert result.exit_code == 1 + assert result.exit_code == 0 payload = yaml.safe_load(result.output) - assert payload["ok"] is False - assert payload["error"]["code"] == "unsupported_operation" + assert payload["ok"] is True + assert payload["data"] == [] def test_cli_whoami_auto_yaml(monkeypatch) -> None: @@ -426,6 +426,29 @@ def create_tweet(self, text: str, reply_to_id=None, media_ids=None) -> str: assert payload["data"]["id"] == "999" +def test_cli_post_with_images_passes_media_ids(monkeypatch, tmp_path) -> None: + image_path = tmp_path / "photo.png" + image_path.write_bytes(b"png") + calls = [] + + class FakeClient: + def upload_media(self, path: str) -> str: + assert path == str(image_path) + return "m1" + + def create_tweet(self, text: str, reply_to_id=None, media_ids=None) -> str: + calls.append({"text": text, "reply_to_id": reply_to_id, "media_ids": media_ids}) + return "999" + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + runner = CliRunner() + + result = runner.invoke(cli, ["post", "hello", "--image", str(image_path), "--json"]) + + assert result.exit_code == 0 + assert calls == [{"text": "hello", "reply_to_id": None, "media_ids": ["m1"]}] + + def test_cli_like_yaml_output(monkeypatch) -> None: class FakeClient: def like_tweet(self, tweet_id: str) -> bool: diff --git a/twitter_cli/api_client.py b/twitter_cli/api_client.py index 2caaf9c..291cacc 100644 --- a/twitter_cli/api_client.py +++ b/twitter_cli/api_client.py @@ -2,21 +2,23 @@ from __future__ import annotations +import base64 import json import logging +import mimetypes import os import time import urllib.parse -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, cast from curl_cffi import requests as _cffi_requests from .exceptions import ( AuthenticationError, + MediaUploadError, NetworkError, NotFoundError, TwitterAPIError, - UnsupportedFeatureError, ) from .models import Author, Metrics, Tweet, TweetMedia, UserProfile @@ -28,9 +30,13 @@ _TWEET_FIELDS = "attachments,author_id,created_at,entities,lang,public_metrics,referenced_tweets" _MEDIA_FIELDS = "media_key,preview_image_url,type,url,width,height" _TWEET_EXPANSIONS = "author_id,attachments.media_keys,referenced_tweets.id,referenced_tweets.id.author_id" -_COOKIE_HINT = ( - "Use --auth-mode cookie for home feed, bookmarks, tweet detail, article, list timeline, " - "or media upload commands." +_DETAIL_TWEET_FIELDS = ( + "article,attachments,author_id,conversation_id,created_at,entities,in_reply_to_user_id," + "lang,note_tweet,public_metrics,referenced_tweets" +) +_DETAIL_TWEET_EXPANSIONS = ( + "article.cover_media,article.media_entities,author_id,attachments.media_keys," + "referenced_tweets.id,referenced_tweets.id.author_id" ) _api_session: Any = None @@ -57,6 +63,16 @@ def _get_api_session() -> Any: class TwitterAPIv2Client: """Official X/Twitter API v2 client for a supported subset of commands.""" + _SUPPORTED_IMAGE_TYPES = { + "image/bmp", + "image/jpeg", + "image/pjpeg", + "image/png", + "image/tiff", + "image/webp", + } + _MAX_IMAGE_SIZE = 5 * 1024 * 1024 + def __init__(self, rate_limit_config: Optional[Dict[str, Any]] = None) -> None: self._access_token = os.environ.get("TWITTER_API_ACCESS_TOKEN", "").strip() self._bearer_token = os.environ.get("TWITTER_API_BEARER_TOKEN", "").strip() @@ -167,13 +183,11 @@ def create_tweet( reply_to_id: Optional[str] = None, media_ids: Optional[List[str]] = None, ) -> str: - if media_ids: - raise UnsupportedFeatureError( - "Official API mode does not support media upload yet. %s" % _COOKIE_HINT - ) body: Dict[str, Any] = {"text": text} if reply_to_id: body["reply"] = {"in_reply_to_tweet_id": reply_to_id} + if media_ids: + body["media"] = {"media_ids": media_ids} data = self._api_request("POST", "/tweets", json_body=body, require_user_context=True) created = data.get("data") or {} tweet_id = str(created.get("id") or "") @@ -183,14 +197,13 @@ def create_tweet( return tweet_id def quote_tweet(self, tweet_id: str, text: str, media_ids: Optional[List[str]] = None) -> str: + body: Dict[str, Any] = {"text": text, "quote_tweet_id": tweet_id} if media_ids: - raise UnsupportedFeatureError( - "Official API mode does not support media upload yet. %s" % _COOKIE_HINT - ) + body["media"] = {"media_ids": media_ids} data = self._api_request( "POST", "/tweets", - json_body={"text": text, "quote_tweet_id": tweet_id}, + json_body=body, require_user_context=True, ) created = data.get("data") or {} @@ -262,49 +275,161 @@ def unfollow_user(self, user_id: str) -> bool: self._write_delay() return True - # ── Unsupported cookie-only operations ─────────────────────────── + # ── Timeline and bookmark operations ───────────────────────────── def fetch_home_timeline(self, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError("Official API mode does not expose the home timeline. %s" % _COOKIE_HINT) + return self._paginate_tweets( + "/users/%s/timelines/reverse_chronological" % self._authenticated_user_id(), + count, + { + "tweet.fields": _DETAIL_TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + require_user_context=True, + ) def fetch_following_feed(self, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError( - "Official API mode does not expose the following feed timeline. %s" % _COOKIE_HINT - ) + return self.fetch_home_timeline(count) def fetch_bookmarks(self, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError("Official API mode does not expose bookmarks. %s" % _COOKIE_HINT) + return self._paginate_tweets( + "/users/%s/bookmarks" % self._authenticated_user_id(), + count, + { + "tweet.fields": _TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + require_user_context=True, + ) def fetch_user_likes(self, user_id: str, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError( - "Official API mode does not support the likes timeline command yet. %s" % _COOKIE_HINT + return self._paginate_tweets( + "/users/%s/liked_tweets" % user_id, + count, + { + "tweet.fields": _TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, ) def fetch_tweet_detail(self, tweet_id: str, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError( - "Official API mode does not support tweet detail plus replies yet. %s" % _COOKIE_HINT + root_data, includes = self._lookup_tweet_payload(tweet_id, include_article=True) + root_tweets = self._parse_tweets([root_data], includes) + if not root_tweets: + raise NotFoundError("Tweet %s not found" % tweet_id) + root_tweet = root_tweets[0] + if count <= 1: + return [root_tweet] + + conversation_id = str(root_data.get("conversation_id") or root_tweet.id or tweet_id) + reply_query = ( + "conversation_id:%s" % conversation_id + if conversation_id == root_tweet.id + else "in_reply_to_tweet_id:%s" % root_tweet.id ) + replies = self._paginate_tweets( + "/tweets/search/recent", + max(count * 2, count), + { + "query": reply_query, + "sort_order": "recency", + "tweet.fields": _DETAIL_TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) + filtered_replies = [tweet for tweet in replies if tweet.id != root_tweet.id] + filtered_replies.sort(key=lambda tweet: tweet.created_at) + return [root_tweet] + filtered_replies[: max(count - 1, 0)] def fetch_article(self, tweet_id: str) -> Tweet: - raise UnsupportedFeatureError("Official API mode does not support Twitter Articles yet. %s" % _COOKIE_HINT) + tweet_data, includes = self._lookup_tweet_payload(tweet_id, include_article=True) + tweets = self._parse_tweets([tweet_data], includes) + if not tweets: + raise NotFoundError("Tweet %s not found" % tweet_id) + article_tweet = tweets[0] + if article_tweet.article_title is None and article_tweet.article_text is None: + raise NotFoundError("Tweet %s has no article content" % tweet_id) + return article_tweet def fetch_list_timeline(self, list_id: str, count: int = 20) -> List[Tweet]: - raise UnsupportedFeatureError("Official API mode does not support list timelines yet. %s" % _COOKIE_HINT) + return self._paginate_tweets( + "/lists/%s/tweets" % list_id, + count, + { + "tweet.fields": _TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) def bookmark_tweet(self, tweet_id: str) -> bool: - raise UnsupportedFeatureError( - "Official API mode does not expose bookmark write endpoints. %s" % _COOKIE_HINT + self._api_request( + "POST", + "/users/%s/bookmarks" % self._authenticated_user_id(), + json_body={"tweet_id": tweet_id}, + require_user_context=True, ) + self._write_delay() + return True def unbookmark_tweet(self, tweet_id: str) -> bool: - raise UnsupportedFeatureError( - "Official API mode does not expose bookmark write endpoints. %s" % _COOKIE_HINT + self._api_request( + "DELETE", + "/users/%s/bookmarks/%s" % (self._authenticated_user_id(), tweet_id), + require_user_context=True, ) + self._write_delay() + return True def upload_media(self, path: str) -> str: - raise UnsupportedFeatureError( - "Official API mode does not support media upload yet. %s" % _COOKIE_HINT + if not self._access_token: + raise AuthenticationError("Official API media upload requires TWITTER_API_ACCESS_TOKEN.") + if not os.path.isfile(path): + raise MediaUploadError("File not found: %s" % path) + + file_size = os.path.getsize(path) + if file_size > self._MAX_IMAGE_SIZE: + raise MediaUploadError( + "File too large: %.1f MB (max %.0f MB)" + % (file_size / (1024 * 1024), self._MAX_IMAGE_SIZE / (1024 * 1024)), + ) + + media_type = mimetypes.guess_type(path)[0] or "" + if media_type not in self._SUPPORTED_IMAGE_TYPES: + raise MediaUploadError( + "Unsupported image format: %s (supported: bmp, jpeg, png, tiff, webp)" % media_type, + ) + + with open(path, "rb") as image_file: + media = base64.b64encode(image_file.read()).decode("ascii") + + data = self._api_request( + "POST", + "/media/upload", + json_body={ + "media": media, + "media_category": "tweet_image", + "media_type": media_type, + "shared": False, + }, + require_user_context=True, ) + raw_media_payload = data.get("data") + media_payload: Dict[str, Any] = raw_media_payload if isinstance(raw_media_payload, dict) else {} + media_id = str(media_payload.get("id") or "") + if not media_id: + raise MediaUploadError("Media upload did not return an id") + self._wait_for_media(media_id, media_payload.get("processing_info")) + return media_id # ── Internals ──────────────────────────────────────────────────── @@ -313,7 +438,63 @@ def _authenticated_user_id(self) -> str: return self._configured_user_id return self.fetch_me().id - def _paginate_tweets(self, path: str, count: int, params: Dict[str, Any]) -> List[Tweet]: + def _lookup_tweet_payload( + self, + tweet_id: str, + *, + include_article: bool = False, + ) -> tuple[Dict[str, Any], Dict[str, Any]]: + expansions = _DETAIL_TWEET_EXPANSIONS if include_article else _TWEET_EXPANSIONS + tweet_fields = _DETAIL_TWEET_FIELDS if include_article else _TWEET_FIELDS + data = self._api_request( + "GET", + "/tweets/%s" % tweet_id, + params={ + "tweet.fields": tweet_fields, + "expansions": expansions, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) + tweet = data.get("data") + if not isinstance(tweet, dict): + raise NotFoundError("Tweet %s not found" % tweet_id) + raw_includes = data.get("includes") + includes: Dict[str, Any] = raw_includes if isinstance(raw_includes, dict) else {} + return tweet, includes + + def _wait_for_media(self, media_id: str, processing_info: Any) -> None: + current_info: Dict[str, Any] = processing_info if isinstance(processing_info, dict) else {} + while current_info: + state = str(current_info.get("state") or "") + if state in {"", "succeeded"}: + return + if state == "failed": + raw_error = current_info.get("error") + error: Dict[str, Any] = raw_error if isinstance(raw_error, dict) else {} + detail = error.get("detail") or error.get("message") or "Media processing failed" + raise MediaUploadError(str(detail)) + delay = max(int(current_info.get("check_after_secs") or 1), 1) + time.sleep(delay) + status = self._api_request( + "GET", + "/media/upload", + params={"command": "STATUS", "media_id": media_id}, + require_user_context=True, + ) + raw_data = status.get("data") + data: Dict[str, Any] = raw_data if isinstance(raw_data, dict) else {} + raw_processing_info = data.get("processing_info") + current_info = raw_processing_info if isinstance(raw_processing_info, dict) else {} + + def _paginate_tweets( + self, + path: str, + count: int, + params: Dict[str, Any], + *, + require_user_context: bool = False, + ) -> List[Tweet]: if count <= 0: return [] count = min(count, self._max_count) @@ -327,9 +508,16 @@ def _paginate_tweets(self, path: str, count: int, params: Dict[str, Any]) -> Lis if next_token: page_params["pagination_token"] = next_token - data = self._api_request("GET", path, params=page_params) - page_items = data.get("data") if isinstance(data.get("data"), list) else [] - includes = data.get("includes") if isinstance(data.get("includes"), dict) else {} + data = self._api_request( + "GET", + path, + params=page_params, + require_user_context=require_user_context, + ) + raw_page_items = data.get("data") + page_items: List[Any] = raw_page_items if isinstance(raw_page_items, list) else [] + raw_includes = data.get("includes") + includes: Dict[str, Any] = raw_includes if isinstance(raw_includes, dict) else {} for tweet in self._parse_tweets(page_items, includes): if tweet.id and tweet.id not in seen_ids: seen_ids.add(tweet.id) @@ -360,7 +548,8 @@ def _paginate_users(self, path: str, count: int, params: Dict[str, Any]) -> List page_params["pagination_token"] = next_token data = self._api_request("GET", path, params=page_params) - items = data.get("data") if isinstance(data.get("data"), list) else [] + raw_items = data.get("data") + items: List[Any] = raw_items if isinstance(raw_items, list) else [] for item in items: if not isinstance(item, dict): continue @@ -533,7 +722,9 @@ def _parse_tweet( ) -> Tweet: author_data = user_map.get(str(tweet.get("author_id")), {}) metrics = tweet.get("public_metrics") or {} + note_tweet = tweet.get("note_tweet") or {} attachments = tweet.get("attachments") or {} + article = tweet.get("article") or {} entities = tweet.get("entities") or {} media_items: List[TweetMedia] = [] for media_key in attachments.get("media_keys") or []: @@ -544,8 +735,8 @@ def _parse_tweet( TweetMedia( type=str(media.get("type") or ""), url=str(media.get("url") or media.get("preview_image_url") or ""), - width=int(media.get("width")) if media.get("width") is not None else None, - height=int(media.get("height")) if media.get("height") is not None else None, + width=cast(Optional[int], media.get("width")), + height=cast(Optional[int], media.get("height")), ) ) @@ -569,9 +760,13 @@ def _parse_tweet( if ref_type == "retweeted": is_retweet = True + article_title = self._extract_article_title(article) + article_text = self._extract_article_text(article) + text = str(note_tweet.get("text") or tweet.get("text") or "") + return Tweet( id=str(tweet.get("id") or ""), - text=str(tweet.get("text") or ""), + text=text, author=Author( id=str(author_data.get("id") or tweet.get("author_id") or ""), name=str(author_data.get("name") or ""), @@ -593,4 +788,33 @@ def _parse_tweet( is_retweet=is_retweet, lang=str(tweet.get("lang") or ""), quoted_tweet=quoted_tweet, + article_title=article_title, + article_text=article_text, ) + + def _extract_article_title(self, article: Any) -> Optional[str]: + return self._find_nested_text(article, ["title", "headline", "display_title", "name"]) + + def _extract_article_text(self, article: Any) -> Optional[str]: + return self._find_nested_text( + article, + ["text", "plain_text", "body", "content", "description", "summary", "markdown"], + ) + + def _find_nested_text(self, value: Any, candidate_keys: List[str]) -> Optional[str]: + if isinstance(value, dict): + for key in candidate_keys: + candidate = value.get(key) + if isinstance(candidate, str) and candidate.strip(): + return candidate.strip() + for nested in value.values(): + found = self._find_nested_text(nested, candidate_keys) + if found: + return found + return None + if isinstance(value, list): + for item in value: + found = self._find_nested_text(item, candidate_keys) + if found: + return found + return None diff --git a/twitter_cli/cli.py b/twitter_cli/cli.py index d1b9cc6..21ae054 100644 --- a/twitter_cli/cli.py +++ b/twitter_cli/cli.py @@ -325,7 +325,8 @@ def cli(ctx, verbose, compact, auth_mode): _setup_logging(verbose) ctx.ensure_object(dict) ctx.obj["compact"] = compact - resolved = (auth_mode or os.environ.get("TWITTER_AUTH_MODE", "auto")).strip().lower() + raw_auth_mode = auth_mode if auth_mode is not None else (os.environ.get("TWITTER_AUTH_MODE") or "auto") + resolved = raw_auth_mode.strip().lower() ctx.obj["auth_mode"] = resolved if resolved in AUTH_MODES else "auto" From 222351cc9870c728ff8bd02205e8c3cca62d4776 Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:26:43 +0100 Subject: [PATCH 3/6] Ignore macOS Finder metadata --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index a78b3ea..ed267d2 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ dist/ build/ .venv/ .env +.DS_Store *.json !tests/fixtures/*.json !config.yaml From f069e97a3db68fb7b0cf884668bfbae2b8f806ad Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:35:15 +0100 Subject: [PATCH 4/6] Sync Chinese API mode README with implementation --- README.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c1e4df8..befa683 100644 --- a/README.md +++ b/README.md @@ -538,19 +538,20 @@ twitter-cli 现在支持两套认证后端: export TWITTER_AUTH_MODE=api export TWITTER_API_BEARER_TOKEN=... -# OAuth 2.0 user context(whoami/status/post/like/retweet/follow 等需要) +# OAuth 2.0 user context(whoami/status/post/like/retweet/follow/bookmarks 等需要) export TWITTER_API_ACCESS_TOKEN=... # 可选:避免写操作前额外请求 /users/me export TWITTER_API_USER_ID=... ``` -**官方 API 模式当前支持:** -- 读取:`user`、`user-posts`、`search`、`followers`、`following`、`status`、`whoami` -- 写入:`post`、`reply`、`quote`、`delete`、`like`、`unlike`、`retweet`、`unretweet`、`follow`、`unfollow` +**twitter-cli API 模式当前支持:** +- 读取:`feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`likes`、`search`、`user`、`user-posts`、`followers`、`following`、`status`、`whoami` +- 写入:`post`、`reply`、`quote`、`delete`、`like`、`unlike`、`retweet`、`unretweet`、`follow`、`unfollow`、`bookmark`、`unbookmark` +- 媒体:`post` / `reply` / `quote` 支持图片上传 -**官方 API 模式暂不支持:** -- `feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`likes`、`bookmark`、`unbookmark` -- `post` / `reply` / `quote` 的图片上传 +**API 模式说明:** +- `feed` 和 `feed -t following` 当前都映射到官方 reverse-chronological home timeline。 +- `article` 基于官方 tweet lookup 返回的 article 字段做 best-effort 渲染;是否有正文取决于 API 返回内容。 **Chrome 多 Profile 支持**:会自动遍历所有 Chrome profile。也可以通过环境变量指定: From 1343381b3efffb120cfad39f63dadaa8932d06c6 Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:47:46 +0100 Subject: [PATCH 5/6] Expand official API mode coverage and docs --- README.md | 30 +++- SKILL.md | 41 ++++- tests/test_api_client.py | 247 ++++++++++++++++++++++++++++-- tests/test_cli.py | 117 ++++++++++++++- twitter_cli/api_client.py | 281 +++++++++++++++++++++++++++++++++-- twitter_cli/cli.py | 200 +++++++++++++++++++++++-- twitter_cli/client.py | 25 +++- twitter_cli/formatter.py | 67 ++++++++- twitter_cli/models.py | 12 ++ twitter_cli/serialization.py | 24 ++- 10 files changed, 984 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index befa683..9c6576d 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ uv sync ### Quick Start ```bash -# Fetch home timeline (For You) +# Fetch home timeline twitter feed # Fetch Following timeline @@ -92,8 +92,8 @@ twitter feed -t following # Enable ranking filter explicitly twitter feed --filter -# Use official API auth mode for supported commands -twitter --auth-mode api search "AI agent" --json +# Use official API auth mode with full-archive search +twitter --auth-mode api search "AI agent" --scope all --json ``` ### Usage @@ -114,6 +114,7 @@ twitter bookmarks --max 30 --yaml # Search twitter search "Claude Code" twitter search "AI agent" -t Latest --max 50 +twitter search "AI agent" --scope all # Official API full-archive search twitter search "AI agent" --full-text twitter search "机器学习" --yaml twitter search "python" --from elonmusk --lang en --since 2026-01-01 @@ -124,11 +125,13 @@ twitter search "trending" --filter # Apply ranking filter # Tweet detail (view tweet + replies) twitter tweet 1234567890 twitter tweet 1234567890 --full-text +twitter tweet 1234567890 --reply-scope all # Prefer full-archive reply lookup twitter tweet https://x.com/user/status/1234567890 # Open tweet by index from last list output twitter show 2 # Open tweet #2 from last feed/search twitter show 2 --full-text # Full text in reply table +twitter show 2 --reply-scope all # Prefer full-archive reply lookup twitter show 2 --json # Structured output # Twitter Article @@ -140,15 +143,19 @@ twitter article 1234567890 --output article.md # List timeline twitter list 1539453138322673664 twitter list 1539453138322673664 --full-text +twitter list-info 1539453138322673664 # User twitter user elonmusk twitter user-posts elonmusk --max 20 twitter user-posts elonmusk --full-text twitter user-posts elonmusk -o tweets.json +twitter mentions elonmusk --max 20 twitter likes elonmusk --max 30 # ⚠️ own likes only (private since Jun 2024) twitter likes elonmusk --full-text twitter likes elonmusk -o likes.json +twitter owned-lists elonmusk --max 20 +twitter followed-lists elonmusk --max 20 twitter followers elonmusk --max 50 twitter following elonmusk --max 50 @@ -196,12 +203,14 @@ export TWITTER_API_USER_ID=... ``` **twitter-cli API mode currently supports:** -- Read: `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `likes`, `search`, `user`, `user-posts`, `followers`, `following`, `status`, `whoami` +- Read: `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `list-info`, `likes`, `mentions`, `search`, `user`, `user-posts`, `owned-lists`, `followed-lists`, `followers`, `following`, `status`, `whoami` - Write: `post`, `reply`, `quote`, `delete`, `like`, `unlike`, `retweet`, `unretweet`, `follow`, `unfollow`, `bookmark`, `unbookmark` - Media: image upload in `post` / `reply` / `quote` **API mode notes:** - `feed` and `feed -t following` both use the official reverse-chronological home timeline endpoint exposed by the authenticated user's API access. +- `search --scope all` uses the official full-archive search endpoint when your API access permits it. +- `tweet` / `show` support `--reply-scope auto|recent|all`; `auto` prefers full-archive search for older posts and falls back when needed. - `article` uses the official tweet lookup response and renders article metadata/content when the API returns article fields for that post. **Chrome multi-profile**: All Chrome profiles are scanned automatically. To specify a profile: @@ -458,7 +467,7 @@ twitter feed --filter twitter feed --full-text # 官方 API 模式 -twitter --auth-mode api search "AI agent" --json +twitter --auth-mode api search "AI agent" --scope all --json # 收藏 twitter bookmarks @@ -467,6 +476,7 @@ twitter bookmarks --full-text # 搜索 twitter search "Claude Code" twitter search "AI agent" -t Latest --max 50 +twitter search "AI agent" --scope all # 官方 API 全量历史搜索 twitter search "AI agent" --full-text twitter search "topic" -o results.json # 保存到文件 twitter search "trending" --filter # 启用排序筛选 @@ -474,10 +484,12 @@ twitter search "trending" --filter # 启用排序筛选 # 推文详情 twitter tweet 1234567890 twitter tweet 1234567890 --full-text +twitter tweet 1234567890 --reply-scope all # 通过序号打开上次列表里的推文 twitter show 2 # 打开上次 feed/search 的第 2 条 twitter show 2 --full-text # 在回复表格里显示完整正文 +twitter show 2 --reply-scope all twitter show 2 --json # 结构化输出 # Twitter 长文 @@ -489,15 +501,19 @@ twitter article 1234567890 --output article.md # 列表时间线 twitter list 1539453138322673664 twitter list 1539453138322673664 --full-text +twitter list-info 1539453138322673664 # 用户 twitter user elonmusk twitter user-posts elonmusk --max 20 twitter user-posts elonmusk --full-text twitter user-posts elonmusk -o tweets.json +twitter mentions elonmusk --max 20 twitter likes elonmusk --max 30 # ⚠️ 仅可查看自己的点赞(2024年6月起平台已私密化) twitter likes elonmusk --full-text twitter likes elonmusk -o likes.json +twitter owned-lists elonmusk --max 20 +twitter followed-lists elonmusk --max 20 twitter followers elonmusk twitter following elonmusk @@ -545,12 +561,14 @@ export TWITTER_API_USER_ID=... ``` **twitter-cli API 模式当前支持:** -- 读取:`feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`likes`、`search`、`user`、`user-posts`、`followers`、`following`、`status`、`whoami` +- 读取:`feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`list-info`、`likes`、`mentions`、`search`、`user`、`user-posts`、`owned-lists`、`followed-lists`、`followers`、`following`、`status`、`whoami` - 写入:`post`、`reply`、`quote`、`delete`、`like`、`unlike`、`retweet`、`unretweet`、`follow`、`unfollow`、`bookmark`、`unbookmark` - 媒体:`post` / `reply` / `quote` 支持图片上传 **API 模式说明:** - `feed` 和 `feed -t following` 当前都映射到官方 reverse-chronological home timeline。 +- `search --scope all` 会在权限允许时使用官方 full-archive search endpoint。 +- `tweet` / `show` 支持 `--reply-scope auto|recent|all`;`auto` 会优先对较老的推文使用 full-archive 搜索,并在必要时自动回退。 - `article` 基于官方 tweet lookup 返回的 article 字段做 best-effort 渲染;是否有正文取决于 API 返回内容。 **Chrome 多 Profile 支持**:会自动遍历所有 Chrome profile。也可以通过环境变量指定: diff --git a/SKILL.md b/SKILL.md index 512f7b9..f397b1c 100644 --- a/SKILL.md +++ b/SKILL.md @@ -2,7 +2,7 @@ name: twitter-cli description: Use twitter-cli for ALL Twitter/X operations — reading tweets, posting, replying, quoting, liking, retweeting, following, searching, user lookups. Invoke whenever user requests any Twitter interaction. author: jackwener -version: "0.8.0" +version: "0.8.1" tags: - twitter - x @@ -14,7 +14,7 @@ tags: # twitter-cli — Twitter/X CLI Tool **Binary:** `twitter` -**Credentials:** browser cookies (auto-extracted) or env vars +**Credentials:** browser cookies (auto-extracted), cookie env vars, or official API env vars ## Setup @@ -32,7 +32,7 @@ uv tool upgrade twitter-cli **IMPORTANT FOR AGENTS**: Before executing ANY twitter-cli command, you MUST first check if credentials exist. If not, you MUST proactively guide the user through the authentication process. Do NOT assume credentials are configured. -**CRITICAL**: Write operations (posting tweets, replying, quoting) REQUIRE full browser cookies. Only providing `auth_token` + `ct0` via env vars may result in **226 error** ("looks like automated behavior"). For best results, use browser cookie extraction. +**CRITICAL**: Write operations in cookie mode (posting tweets, replying, quoting) REQUIRE full browser cookies. Only providing `auth_token` + `ct0` via env vars may result in **226 error** ("looks like automated behavior"). For best results, use browser cookie extraction. Official API mode is available for supported read/write commands when API tokens are configured. ### Step 0: Check if already authenticated @@ -45,6 +45,22 @@ If `AUTH_NEEDED`, proceed to guide the user: ### Step 1: Guide user to authenticate +**Method 0: Official API mode** + +Use this when the user has official X API credentials and wants supported API-mode commands. + +```bash +export TWITTER_AUTH_MODE=api +export TWITTER_API_BEARER_TOKEN="" + +# For user-context endpoints and write operations: +export TWITTER_API_ACCESS_TOKEN="" +# Optional: skip /users/me lookup on writes +export TWITTER_API_USER_ID="" + +twitter status --json +``` + **Method A: Browser cookie extraction (recommended)** Ensure user is logged into x.com in one of: Arc, Chrome, Edge, Firefox, Brave. twitter-cli auto-extracts cookies. @@ -94,6 +110,8 @@ twitter whoami | Read works, write returns 226 | Full cookies missing — use browser cookie extraction instead of env vars | | `Cookie expired (401/403)` | Ask user to re-login to x.com and retry | | User changed password | All old cookies invalidated — re-extract | +| API mode `401/403` | Check bearer/access token validity and user-context scope | +| API mode full-archive search denied | Retry with `--scope recent` | ## Output Format @@ -150,6 +168,7 @@ twitter whoami --json # JSON output twitter user elonmusk # User profile twitter user elonmusk --json # JSON output twitter feed # Home timeline (For You) +twitter feed -t home # Explicit home timeline twitter feed -t following # Following timeline twitter feed --max 50 # Limit count twitter feed --full-text # Show full post body in table @@ -161,20 +180,27 @@ twitter bookmarks --full-text # Full text in bookmarks table twitter bookmarks --max 30 --yaml twitter search "keyword" # Search tweets twitter search "AI agent" -t Latest --max 50 +twitter search "AI agent" --scope all # Official API full-archive search twitter search "AI agent" --full-text # Full text in search results twitter search "topic" -o results.json # Save to file +twitter mentions elonmusk --max 20 # Mentions timeline twitter tweet 1234567890 # Tweet detail + replies twitter tweet 1234567890 --full-text # Full text in reply table +twitter tweet 1234567890 --reply-scope all # Force full-archive reply lookup twitter tweet https://x.com/user/status/12345 # Accepts URL twitter show 2 # Open tweet #2 from last feed/search list twitter show 2 --full-text # Full text in reply table +twitter show 2 --reply-scope all # Force full-archive reply lookup twitter show 2 --json # Structured output twitter list 1539453138322673664 # List timeline twitter list 1539453138322673664 --full-text +twitter list-info 1539453138322673664 # List metadata twitter user-posts elonmusk --max 20 # User's tweets twitter user-posts elonmusk --full-text twitter likes elonmusk --max 30 # User's likes (own only, see note) twitter likes elonmusk --full-text +twitter owned-lists elonmusk --max 20 # Lists owned by user +twitter followed-lists elonmusk --max 20 # Lists followed by user twitter followers elonmusk --max 50 # Followers twitter following elonmusk --max 50 # Following ``` @@ -207,6 +233,15 @@ twitter unfollow elonmusk # Unfollow user - Max 4 images per tweet - Use `--image` / `-i` (repeatable) +## Official API Mode Notes + +- `feed` and `feed -t following` both use the official reverse-chronological home timeline endpoint in API mode. +- `search --scope all` uses the official full-archive search endpoint when the configured API access permits it. +- `tweet` / `show` support `--reply-scope auto|recent|all`. + - `auto` prefers full-archive search for older posts and falls back when `/search/all` is unavailable. +- `article` is best-effort in API mode and depends on the article fields returned by official tweet lookup. +- `mentions`, `list-info`, `owned-lists`, and `followed-lists` are available in official API mode. + ## Agent Workflows ### Post and verify diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 677bce5..5402e7c 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -109,6 +109,19 @@ def test_api_client_fetch_search_parses_expansions(monkeypatch) -> None: assert session.calls[0][2]["sort_order"] == "recency" +def test_api_client_fetch_search_all_scope_uses_full_archive_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession([DummyResponse(200, {"data": [], "meta": {"result_count": 0}})]) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_search("python", count=1, scope="all") + + assert tweets == [] + assert session.calls[0][1].endswith("/tweets/search/all") + + def test_api_client_fetch_me_requires_user_context(monkeypatch) -> None: monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") @@ -199,6 +212,40 @@ def test_api_client_fetch_home_timeline_uses_reverse_chronological_endpoint(monk assert session.calls[1][1].endswith("/users/me/timelines/reverse_chronological") +def test_api_client_fetch_mentions_uses_mentions_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [ + { + "id": "12", + "text": "@alice hi", + "author_id": "u2", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {"like_count": 1}, + } + ], + "includes": { + "users": [{"id": "u2", "name": "Bob", "username": "bob"}], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_mentions("42", count=1) + + assert [tweet.id for tweet in tweets] == ["12"] + assert session.calls[0][1].endswith("/users/42/mentions") + + def test_api_client_fetch_list_timeline_uses_lists_endpoint(monkeypatch) -> None: monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) @@ -241,14 +288,14 @@ def test_api_client_fetch_tweet_detail_uses_lookup_and_search(monkeypatch) -> No DummyResponse( 200, { - "data": { - "id": "123", - "text": "root", - "author_id": "u1", - "conversation_id": "123", - "created_at": "2026-03-08T12:00:00.000Z", - "public_metrics": {"reply_count": 1}, - }, + "data": { + "id": "123", + "text": "root", + "author_id": "u1", + "conversation_id": "123", + "created_at": "2026-03-12T12:00:00.000Z", + "public_metrics": {"reply_count": 1}, + }, "includes": { "users": [{"id": "u1", "name": "Alice", "username": "alice"}], }, @@ -262,14 +309,14 @@ def test_api_client_fetch_tweet_detail_uses_lookup_and_search(monkeypatch) -> No "id": "123", "text": "root", "author_id": "u1", - "created_at": "2026-03-08T12:00:00.000Z", + "created_at": "2026-03-12T12:00:00.000Z", "public_metrics": {"reply_count": 1}, }, { "id": "124", "text": "reply", "author_id": "u2", - "created_at": "2026-03-08T12:05:00.000Z", + "created_at": "2026-03-12T12:05:00.000Z", "public_metrics": {"like_count": 1}, }, ], @@ -295,6 +342,58 @@ def test_api_client_fetch_tweet_detail_uses_lookup_and_search(monkeypatch) -> No assert session.calls[1][2]["query"] == "conversation_id:123" +def test_api_client_fetch_tweet_detail_auto_tries_full_archive_for_old_posts(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": { + "id": "123", + "text": "root", + "author_id": "u1", + "conversation_id": "123", + "created_at": "2026-01-01T12:00:00.000Z", + "public_metrics": {"reply_count": 1}, + }, + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + }, + ), + DummyResponse(403, {"detail": "forbidden"}), + DummyResponse( + 200, + { + "data": [ + { + "id": "124", + "text": "recent reply", + "author_id": "u2", + "created_at": "2026-03-08T12:05:00.000Z", + "public_metrics": {"like_count": 1}, + } + ], + "includes": { + "users": [{"id": "u2", "name": "Bob", "username": "bob"}], + }, + "meta": {"result_count": 1}, + }, + ), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweets = client.fetch_tweet_detail("123", count=5, reply_scope="auto") + + assert [tweet.id for tweet in tweets] == ["123", "124"] + assert session.calls[1][1].endswith("/tweets/search/all") + assert session.calls[2][1].endswith("/tweets/search/recent") + + def test_api_client_fetch_bookmarks_requires_user_context(monkeypatch) -> None: monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) @@ -370,6 +469,46 @@ def test_api_client_fetch_article_parses_article_fields(monkeypatch) -> None: assert tweet.article_text == "Body text" +def test_api_client_fetch_article_merges_article_media(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": { + "id": "56", + "text": "article teaser", + "author_id": "u1", + "created_at": "2026-03-08T12:00:00.000Z", + "public_metrics": {}, + "article": {"title": "Title", "text": "Body text", "cover_media_key": "m2"}, + }, + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + "media": [ + { + "media_key": "m2", + "type": "photo", + "url": "https://img.example/article.jpg", + "width": 1280, + "height": 720, + } + ], + }, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + tweet = client.fetch_article("56") + + assert tweet.media[0].url == "https://img.example/article.jpg" + + def test_api_client_fetch_user_likes_uses_liked_tweets_endpoint(monkeypatch) -> None: monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) @@ -404,6 +543,94 @@ def test_api_client_fetch_user_likes_uses_liked_tweets_endpoint(monkeypatch) -> assert session.calls[0][1].endswith("/users/42/liked_tweets") +def test_api_client_fetch_list_returns_metadata(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": { + "id": "200", + "name": "Python", + "description": "Language news", + "owner_id": "u1", + "follower_count": 12, + "member_count": 34, + "private": False, + "created_at": "2026-03-01T00:00:00.000Z", + }, + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + twitter_list = client.fetch_list("200") + + assert twitter_list.name == "Python" + assert twitter_list.owner_screen_name == "alice" + assert session.calls[0][1].endswith("/lists/200") + + +def test_api_client_fetch_owned_lists_uses_owned_lists_endpoint(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_BEARER_TOKEN", "bearer-token") + monkeypatch.delenv("TWITTER_API_ACCESS_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [{"id": "201", "name": "Owned", "owner_id": "u1"}], + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + twitter_lists = client.fetch_owned_lists("42", count=1) + + assert [twitter_list.id for twitter_list in twitter_lists] == ["201"] + assert session.calls[0][1].endswith("/users/42/owned_lists") + + +def test_api_client_fetch_followed_lists_requires_user_context(monkeypatch) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + session = DummySession( + [ + DummyResponse( + 200, + { + "data": [{"id": "202", "name": "Followed", "owner_id": "u1"}], + "includes": { + "users": [{"id": "u1", "name": "Alice", "username": "alice"}], + }, + "meta": {"result_count": 1}, + }, + ) + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + twitter_lists = client.fetch_followed_lists("42", count=1) + + assert [twitter_list.id for twitter_list in twitter_lists] == ["202"] + assert session.calls[0][1].endswith("/users/42/followed_lists") + assert session.calls[0][4]["Authorization"] == "Bearer access-token" + + def test_api_client_bookmark_write_endpoints_use_user_context(monkeypatch) -> None: monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) diff --git a/tests/test_cli.py b/tests/test_cli.py index f8b11fe..487c9fe 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -10,7 +10,7 @@ from twitter_cli.cli import cli from twitter_cli.formatter import article_to_markdown, print_tweet_table -from twitter_cli.models import Author, Metrics, Tweet, UserProfile +from twitter_cli.models import Author, Metrics, Tweet, TwitterList, UserProfile from twitter_cli.serialization import tweets_to_json @@ -122,9 +122,10 @@ def test_cli_user_error_yaml(monkeypatch) -> None: def test_cli_tweet_accepts_shared_url_with_query(monkeypatch) -> None: class FakeClient: - def fetch_tweet_detail(self, tweet_id: str, max_count: int): + def fetch_tweet_detail(self, tweet_id: str, max_count: int, reply_scope: str = "auto"): assert tweet_id == "12345" assert max_count == 50 + assert reply_scope == "auto" return [] monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) @@ -337,6 +338,66 @@ def fetch_home_timeline(self, count: int): assert payload["data"] == [] +def test_cli_mentions_command(monkeypatch) -> None: + class FakeClient: + def fetch_user(self, screen_name: str) -> UserProfile: + return UserProfile(id="42", name="Alice", screen_name=screen_name) + + def fetch_mentions(self, user_id: str, count: int): + assert user_id == "42" + assert count == 50 + return [] + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + monkeypatch.setattr("twitter_cli.cli.load_config", lambda: {"fetch": {"count": 50}, "filter": {}, "rateLimit": {}}) + runner = CliRunner() + + result = runner.invoke(cli, ["mentions", "alice", "--json"]) + + assert result.exit_code == 0 + payload = yaml.safe_load(result.output) + assert payload["ok"] is True + assert payload["data"] == [] + + +def test_cli_list_info_command(monkeypatch) -> None: + class FakeClient: + def fetch_list(self, list_id: str) -> TwitterList: + assert list_id == "123" + return TwitterList(id=list_id, name="Python", owner_screen_name="alice") + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + runner = CliRunner() + + result = runner.invoke(cli, ["list-info", "123", "--json"]) + + assert result.exit_code == 0 + payload = yaml.safe_load(result.output) + assert payload["data"]["id"] == "123" + assert payload["data"]["ownerScreenName"] == "alice" + + +def test_cli_owned_lists_command(monkeypatch) -> None: + class FakeClient: + def fetch_user(self, screen_name: str) -> UserProfile: + return UserProfile(id="42", name="Alice", screen_name=screen_name) + + def fetch_owned_lists(self, user_id: str, count: int): + assert user_id == "42" + assert count == 50 + return [TwitterList(id="1", name="Python", owner_screen_name="alice")] + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + monkeypatch.setattr("twitter_cli.cli.load_config", lambda: {"fetch": {"count": 50}, "filter": {}, "rateLimit": {}}) + runner = CliRunner() + + result = runner.invoke(cli, ["owned-lists", "alice", "--json"]) + + assert result.exit_code == 0 + payload = yaml.safe_load(result.output) + assert payload["data"][0]["id"] == "1" + + def test_cli_whoami_auto_yaml(monkeypatch) -> None: class FakeClient: def fetch_me(self) -> UserProfile: @@ -529,9 +590,10 @@ def test_cli_search_advanced_options(monkeypatch) -> None: captured = {} class FakeClient: - def fetch_search(self, query: str, count: int, product: str): + def fetch_search(self, query: str, count: int, product: str, scope: str): captured["query"] = query captured["product"] = product + captured["scope"] = scope return [] monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) @@ -559,14 +621,16 @@ def fetch_search(self, query: str, count: int, product: str): "filter:links -filter:retweets min_faves:100" ) assert captured["product"] == "Latest" + assert captured["scope"] == "recent" def test_cli_search_operators_only_no_query(monkeypatch) -> None: captured = {} class FakeClient: - def fetch_search(self, query: str, count: int, product: str): + def fetch_search(self, query: str, count: int, product: str, scope: str): captured["query"] = query + captured["scope"] = scope return [] monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) @@ -579,6 +643,28 @@ def fetch_search(self, query: str, count: int, product: str): result = runner.invoke(cli, ["search", "--from", "bbc", "--json"]) assert result.exit_code == 0, f"search failed: {result.output}" assert captured["query"] == "from:bbc" + assert captured["scope"] == "recent" + + +def test_cli_search_all_scope(monkeypatch) -> None: + captured = {} + + class FakeClient: + def fetch_search(self, query: str, count: int, product: str, scope: str): + captured["scope"] = scope + return [] + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + monkeypatch.setattr( + "twitter_cli.cli.load_config", + lambda: {"fetch": {"count": 50}, "filter": {}, "rateLimit": {}}, + ) + runner = CliRunner() + + result = runner.invoke(cli, ["search", "python", "--scope", "all", "--json"]) + + assert result.exit_code == 0 + assert captured["scope"] == "all" def test_cli_search_empty_query_no_options() -> None: @@ -641,8 +727,9 @@ def test_show_happy_path(monkeypatch, tmp_path, tweet_factory): monkeypatch.setattr("twitter_cli.cache._CACHE_FILE", cache_file) class FakeClient: - def fetch_tweet_detail(self, tweet_id, count): + def fetch_tweet_detail(self, tweet_id, count, reply_scope="auto"): assert tweet_id == "42" + assert reply_scope == "auto" return [tw] monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) @@ -653,6 +740,26 @@ def fetch_tweet_detail(self, tweet_id, count): assert result.exit_code == 0 +def test_cli_tweet_reply_scope_passed_to_client(monkeypatch) -> None: + class FakeClient: + def fetch_tweet_detail(self, tweet_id: str, max_count: int, reply_scope: str = "auto"): + assert tweet_id == "12345" + assert max_count == 50 + assert reply_scope == "all" + return [] + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + monkeypatch.setattr( + "twitter_cli.cli.load_config", + lambda: {"fetch": {"count": 50}, "filter": {}, "rateLimit": {}}, + ) + runner = CliRunner() + + result = runner.invoke(cli, ["tweet", "12345", "--reply-scope", "all"]) + + assert result.exit_code == 0 + + def test_show_empty_cache(monkeypatch, tmp_path): """show fails with a helpful message when no cache exists.""" cache_file = tmp_path / "last_results.json" diff --git a/twitter_cli/api_client.py b/twitter_cli/api_client.py index 291cacc..603ebbd 100644 --- a/twitter_cli/api_client.py +++ b/twitter_cli/api_client.py @@ -3,6 +3,7 @@ from __future__ import annotations import base64 +from datetime import datetime, timedelta, timezone import json import logging import mimetypes @@ -20,7 +21,7 @@ NotFoundError, TwitterAPIError, ) -from .models import Author, Metrics, Tweet, TweetMedia, UserProfile +from .models import Author, Metrics, Tweet, TweetMedia, TwitterList, UserProfile logger = logging.getLogger(__name__) @@ -30,6 +31,7 @@ _TWEET_FIELDS = "attachments,author_id,created_at,entities,lang,public_metrics,referenced_tweets" _MEDIA_FIELDS = "media_key,preview_image_url,type,url,width,height" _TWEET_EXPANSIONS = "author_id,attachments.media_keys,referenced_tweets.id,referenced_tweets.id.author_id" +_LIST_FIELDS = "created_at,description,follower_count,id,member_count,name,owner_id,private" _DETAIL_TWEET_FIELDS = ( "article,attachments,author_id,conversation_id,created_at,entities,in_reply_to_user_id," "lang,note_tweet,public_metrics,referenced_tweets" @@ -135,7 +137,13 @@ def fetch_user_tweets(self, user_id: str, count: int = 20) -> List[Tweet]: }, ) - def fetch_search(self, query: str, count: int = 20, product: str = "Top") -> List[Tweet]: + def fetch_search( + self, + query: str, + count: int = 20, + product: str = "Top", + scope: str = "recent", + ) -> List[Tweet]: search_query = query sort_order = "relevancy" normalized_product = (product or "Top").strip().lower() @@ -149,7 +157,7 @@ def fetch_search(self, query: str, count: int = 20, product: str = "Top") -> Lis search_query = "%s has:videos" % query return self._paginate_tweets( - "/tweets/search/recent", + self._search_path(scope), count, { "query": search_query, @@ -175,6 +183,18 @@ def fetch_following(self, user_id: str, count: int = 20) -> List[UserProfile]: {"user.fields": _USER_FIELDS}, ) + def fetch_mentions(self, user_id: str, count: int = 20) -> List[Tweet]: + return self._paginate_tweets( + "/users/%s/mentions" % user_id, + count, + { + "tweet.fields": _DETAIL_TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + }, + ) + # ── Write operations ───────────────────────────────────────────── def create_tweet( @@ -318,7 +338,7 @@ def fetch_user_likes(self, user_id: str, count: int = 20) -> List[Tweet]: }, ) - def fetch_tweet_detail(self, tweet_id: str, count: int = 20) -> List[Tweet]: + def fetch_tweet_detail(self, tweet_id: str, count: int = 20, reply_scope: str = "auto") -> List[Tweet]: root_data, includes = self._lookup_tweet_payload(tweet_id, include_article=True) root_tweets = self._parse_tweets([root_data], includes) if not root_tweets: @@ -333,17 +353,11 @@ def fetch_tweet_detail(self, tweet_id: str, count: int = 20) -> List[Tweet]: if conversation_id == root_tweet.id else "in_reply_to_tweet_id:%s" % root_tweet.id ) - replies = self._paginate_tweets( - "/tweets/search/recent", + replies = self._fetch_conversation_replies( max(count * 2, count), - { - "query": reply_query, - "sort_order": "recency", - "tweet.fields": _DETAIL_TWEET_FIELDS, - "expansions": _TWEET_EXPANSIONS, - "user.fields": _USER_FIELDS, - "media.fields": _MEDIA_FIELDS, - }, + reply_query, + root_tweet.created_at, + reply_scope=reply_scope, ) filtered_replies = [tweet for tweet in replies if tweet.id != root_tweet.id] filtered_replies.sort(key=lambda tweet: tweet.created_at) @@ -371,6 +385,33 @@ def fetch_list_timeline(self, list_id: str, count: int = 20) -> List[Tweet]: }, ) + def fetch_list(self, list_id: str) -> TwitterList: + data = self._api_request( + "GET", + "/lists/%s" % list_id, + params={ + "list.fields": _LIST_FIELDS, + "expansions": "owner_id", + "user.fields": _USER_FIELDS, + }, + ) + list_data = data.get("data") + if not isinstance(list_data, dict): + raise NotFoundError("List %s not found" % list_id) + raw_includes = data.get("includes") + includes: Dict[str, Any] = raw_includes if isinstance(raw_includes, dict) else {} + return self._parse_list(list_data, includes) + + def fetch_owned_lists(self, user_id: str, count: int = 20) -> List[TwitterList]: + return self._paginate_lists("/users/%s/owned_lists" % user_id, count) + + def fetch_followed_lists(self, user_id: str, count: int = 20) -> List[TwitterList]: + return self._paginate_lists( + "/users/%s/followed_lists" % user_id, + count, + require_user_context=True, + ) + def bookmark_tweet(self, tweet_id: str) -> bool: self._api_request( "POST", @@ -438,6 +479,64 @@ def _authenticated_user_id(self) -> str: return self._configured_user_id return self.fetch_me().id + def _search_path(self, scope: str) -> str: + normalized = (scope or "recent").strip().lower() + if normalized == "all": + return "/tweets/search/all" + return "/tweets/search/recent" + + def _fetch_conversation_replies( + self, + count: int, + query: str, + root_created_at: str, + *, + reply_scope: str, + ) -> List[Tweet]: + params = { + "query": query, + "sort_order": "recency", + "tweet.fields": _DETAIL_TWEET_FIELDS, + "expansions": _TWEET_EXPANSIONS, + "user.fields": _USER_FIELDS, + "media.fields": _MEDIA_FIELDS, + } + scope = (reply_scope or "auto").strip().lower() + if scope == "recent": + return self._paginate_tweets("/tweets/search/recent", count, params) + if scope == "all": + return self._paginate_tweets("/tweets/search/all", count, params) + + preferred_path = "/tweets/search/all" if self._is_older_than_recent_search(root_created_at) else "/tweets/search/recent" + replies = self._try_paginate_tweets(preferred_path, count, params) + if replies is not None: + return replies + fallback_path = "/tweets/search/recent" if preferred_path.endswith("/all") else "/tweets/search/all" + fallback = self._try_paginate_tweets(fallback_path, count, params) + return fallback if fallback is not None else [] + + def _is_older_than_recent_search(self, created_at: str) -> bool: + if not created_at: + return False + try: + created = datetime.fromisoformat(created_at.replace("Z", "+00:00")) + except ValueError: + return False + return created < datetime.now(timezone.utc) - timedelta(days=7) + + def _try_paginate_tweets( + self, + path: str, + count: int, + params: Dict[str, Any], + ) -> Optional[List[Tweet]]: + try: + return self._paginate_tweets(path, count, params) + except TwitterAPIError as exc: + if path.endswith("/all") and exc.status_code in (403, 404): + return None + raise + def _lookup_tweet_payload( self, tweet_id: str, @@ -463,6 +562,58 @@ def _lookup_tweet_payload( includes: Dict[str, Any] = raw_includes if isinstance(raw_includes, dict) else {} return tweet, includes + def _paginate_lists( + self, + path: str, + count: int, + *, + require_user_context: bool = False, + ) -> List[TwitterList]: + if count <= 0: + return [] + count = min(count, self._max_count) + twitter_lists: List[TwitterList] = [] + seen_ids = set() + next_token: Optional[str] = None + + while len(twitter_lists) < count: + params: Dict[str, Any] = { + "list.fields": _LIST_FIELDS, + "expansions": "owner_id", + "user.fields": _USER_FIELDS, + "max_results": max(10, min(100, count - len(twitter_lists))), + } + if next_token: + params["pagination_token"] = next_token + + data = self._api_request( + "GET", + path, + params=params, + require_user_context=require_user_context, + ) + raw_items = data.get("data") + items: List[Any] = raw_items if isinstance(raw_items, list) else [] + raw_includes = data.get("includes") + includes: Dict[str, Any] = raw_includes if isinstance(raw_includes, dict) else {} + for item in items: + if not isinstance(item, dict): + continue + twitter_list = self._parse_list(item, includes) + if twitter_list.id and twitter_list.id not in seen_ids: + seen_ids.add(twitter_list.id) + twitter_lists.append(twitter_list) + if len(twitter_lists) >= count: + break + + meta = data.get("meta") or {} + next_token = str(meta.get("next_token") or "") + if not next_token or len(twitter_lists) >= count: + break + self._sleep_between_pages() + + return twitter_lists[:count] + def _wait_for_media(self, media_id: str, processing_info: Any) -> None: current_info: Dict[str, Any] = processing_info if isinstance(processing_info, dict) else {} while current_info: @@ -727,10 +878,12 @@ def _parse_tweet( article = tweet.get("article") or {} entities = tweet.get("entities") or {} media_items: List[TweetMedia] = [] + seen_media_keys = set() for media_key in attachments.get("media_keys") or []: media = media_map.get(str(media_key)) if not media: continue + seen_media_keys.add(str(media_key)) media_items.append( TweetMedia( type=str(media.get("type") or ""), @@ -762,6 +915,21 @@ def _parse_tweet( article_title = self._extract_article_title(article) article_text = self._extract_article_text(article) + for media_key in self._extract_article_media_keys(article): + if media_key in seen_media_keys: + continue + media = media_map.get(media_key) + if not media: + continue + seen_media_keys.add(media_key) + media_items.append( + TweetMedia( + type=str(media.get("type") or ""), + url=str(media.get("url") or media.get("preview_image_url") or ""), + width=cast(Optional[int], media.get("width")), + height=cast(Optional[int], media.get("height")), + ) + ) text = str(note_tweet.get("text") or tweet.get("text") or "") return Tweet( @@ -792,14 +960,51 @@ def _parse_tweet( article_text=article_text, ) + def _parse_list(self, data: Dict[str, Any], includes: Dict[str, Any]) -> TwitterList: + raw_owners = includes.get("users") + owners: List[Any] = raw_owners if isinstance(raw_owners, list) else [] + owner_map = { + str(owner.get("id")): owner + for owner in owners + if isinstance(owner, dict) and owner.get("id") + } + owner = owner_map.get(str(data.get("owner_id")), {}) + return TwitterList( + id=str(data.get("id") or ""), + name=str(data.get("name") or ""), + owner_screen_name=str(owner.get("username") or ""), + description=str(data.get("description") or ""), + follower_count=int(data.get("follower_count") or 0), + member_count=int(data.get("member_count") or 0), + private=bool(data.get("private", False)), + created_at=str(data.get("created_at") or ""), + ) + def _extract_article_title(self, article: Any) -> Optional[str]: return self._find_nested_text(article, ["title", "headline", "display_title", "name"]) def _extract_article_text(self, article: Any) -> Optional[str]: - return self._find_nested_text( + direct = self._find_nested_text( article, ["text", "plain_text", "body", "content", "description", "summary", "markdown"], ) + if direct: + return direct + parts = self._collect_article_text_parts(article) + if parts: + return "\n\n".join(parts) + return None + + def _extract_article_media_keys(self, article: Any) -> List[str]: + keys: List[str] = [] + self._collect_article_media_keys(article, keys) + deduped: List[str] = [] + seen = set() + for key in keys: + if key and key not in seen: + seen.add(key) + deduped.append(key) + return deduped def _find_nested_text(self, value: Any, candidate_keys: List[str]) -> Optional[str]: if isinstance(value, dict): @@ -818,3 +1023,49 @@ def _find_nested_text(self, value: Any, candidate_keys: List[str]) -> Optional[s if found: return found return None + + def _collect_article_text_parts(self, value: Any) -> List[str]: + parts: List[str] = [] + if isinstance(value, dict): + for key in ("text", "plain_text", "content", "description", "summary", "markdown"): + candidate = value.get(key) + if isinstance(candidate, str) and candidate.strip(): + parts.append(candidate.strip()) + for nested_key in ("blocks", "items", "paragraphs", "sections", "children", "content"): + nested = value.get(nested_key) + parts.extend(self._collect_article_text_parts(nested)) + return self._dedupe_text_parts(parts) + if isinstance(value, list): + for item in value: + parts.extend(self._collect_article_text_parts(item)) + return self._dedupe_text_parts(parts) + + def _collect_article_media_keys(self, value: Any, keys: List[str]) -> None: + if isinstance(value, dict): + media_key = value.get("media_key") + if isinstance(media_key, str): + keys.append(media_key) + cover_media_key = value.get("cover_media_key") + if isinstance(cover_media_key, str): + keys.append(cover_media_key) + media_keys = value.get("media_keys") + if isinstance(media_keys, list): + for media_key_item in media_keys: + if isinstance(media_key_item, str): + keys.append(media_key_item) + for nested in value.values(): + self._collect_article_media_keys(nested, keys) + elif isinstance(value, list): + for item in value: + self._collect_article_media_keys(item, keys) + + def _dedupe_text_parts(self, parts: List[str]) -> List[str]: + deduped: List[str] = [] + seen = set() + for part in parts: + normalized = part.strip() + if not normalized or normalized in seen: + continue + seen.add(normalized) + deduped.append(normalized) + return deduped diff --git a/twitter_cli/cli.py b/twitter_cli/cli.py index 21ae054..c689a92 100644 --- a/twitter_cli/cli.py +++ b/twitter_cli/cli.py @@ -1,17 +1,21 @@ """CLI entry point for twitter-cli. Read commands: - twitter feed # home timeline (For You) + twitter feed # home timeline twitter feed -t following # following feed twitter bookmarks # bookmarks twitter search "query" # search tweets twitter search "query" --from user # advanced search twitter user elonmusk # user profile twitter user-posts elonmusk # user tweets + twitter mentions elonmusk # user mentions twitter likes elonmusk # user likes twitter tweet # tweet detail + replies twitter article # Twitter Article as Markdown twitter list # list timeline + twitter list-info # list metadata + twitter owned-lists # user-owned lists + twitter followed-lists # followed lists twitter followers # followers list twitter following # following list twitter whoami # current user profile @@ -54,6 +58,8 @@ article_to_markdown, print_filter_stats, print_article, + print_list_detail, + print_list_table, print_tweet_detail, print_tweet_table, print_user_profile, @@ -71,6 +77,8 @@ use_rich_output, ) from .serialization import ( + twitter_list_to_dict, + twitter_lists_to_data, tweet_to_dict, tweets_from_json, tweets_to_data, @@ -90,10 +98,12 @@ logger = logging.getLogger(__name__) console = Console(stderr=True) -FEED_TYPES = ["for-you", "following"] +FEED_TYPES = ["home", "for-you", "following"] SEARCH_PRODUCTS = ["Top", "Latest", "Photos", "Videos"] +SEARCH_SCOPES = ["recent", "all"] SEARCH_HAS_CHOICES = ["links", "images", "videos", "media"] SEARCH_EXCLUDE_CHOICES = ["retweets", "replies", "links"] +REPLY_SCOPES = ["auto", "recent", "all"] AUTH_MODES = ["auto", "cookie", "api"] @@ -403,8 +413,8 @@ def _run(): "-t", "feed_type", type=click.Choice(FEED_TYPES), - default="for-you", - help="Feed type: for-you (algorithmic) or following (chronological).", + default="home", + help="Feed type: home, for-you, or following.", ) @click.option("--max", "-n", "max_count", type=int, default=None, help="Max number of tweets to fetch.") @structured_output_options @@ -432,6 +442,11 @@ def feed(ctx, feed_type, max_count, as_json, as_yaml, input_file, output_file, d label = "following feed" if feed_type == "following" else "home timeline" if rich_output: console.print("📡 Fetching %s (%d tweets)...\n" % (label, fetch_count)) + if isinstance(client, TwitterAPIv2Client) and feed_type == "for-you": + console.print( + "[yellow]⚠️ API mode does not expose the algorithmic For You feed. " + "Using the official reverse-chronological home timeline instead.[/yellow]\n" + ) start = time.time() if feed_type == "following": tweets = client.fetch_following_feed(fetch_count) @@ -557,6 +572,44 @@ def _run(): _run_guarded(_run) +@cli.command() +@click.argument("screen_name") +@click.option("--max", "-n", "max_count", type=int, default=None, help="Max number of tweets to fetch.") +@structured_output_options +@click.option("--output", "-o", "output_file", type=str, default=None, help="Save tweets to JSON file.") +@click.option("--filter", "do_filter", is_flag=True, help="Enable score-based filtering.") +@click.option("--full-text", is_flag=True, help="Show full tweet text in table output.") +@click.pass_context +def mentions(ctx, screen_name, max_count, as_json, as_yaml, output_file, do_filter, full_text): + # type: (Any, str, int, bool, bool, Optional[str], bool, bool) -> None + """Show posts mentioning a user. SCREEN_NAME is the @handle (without @).""" + screen_name = screen_name.lstrip("@") + compact = ctx.obj.get("compact", False) + config = load_config() + + def _run(): + rich_output = use_rich_output(as_json=as_json, as_yaml=as_yaml, compact=compact) + client = _get_client(config, quiet=not rich_output) + if rich_output: + console.print("👤 Fetching @%s's profile..." % screen_name) + profile = client.fetch_user(screen_name) + _fetch_and_display( + lambda count: client.fetch_mentions(profile.id, count), + "@%s mentions" % screen_name, + "🔔", + max_count, + as_json, + as_yaml, + output_file, + do_filter, + config, + compact=compact, + full_text=full_text, + ) + + _run_guarded(_run) + + @cli.command() @click.argument("query", default="") @click.option( @@ -567,6 +620,12 @@ def _run(): default="Top", help="Search tab: Top, Latest, Photos, or Videos.", ) +@click.option( + "--scope", + type=click.Choice(SEARCH_SCOPES, case_sensitive=False), + default="recent", + help="Search scope: recent or all.", +) @click.option("--from", "from_user", type=str, default=None, help="Only tweets from this user.") @click.option("--to", "to_user", type=str, default=None, help="Only tweets directed at this user.") @click.option("--lang", type=str, default=None, help="Filter by language (ISO code, e.g. en, fr, ja).") @@ -592,8 +651,8 @@ def _run(): @click.option("--filter", "do_filter", is_flag=True, help="Enable score-based filtering.") @click.option("--full-text", is_flag=True, help="Show full tweet text in table output.") @click.pass_context -def search(ctx, query, product, from_user, to_user, lang, since, until, has, exclude, min_likes, min_retweets, max_count, as_json, as_yaml, output_file, do_filter, full_text): - # type: (Any, str, str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], tuple, tuple, Optional[int], Optional[int], int, bool, bool, Optional[str], bool, bool) -> None +def search(ctx, query, product, scope, from_user, to_user, lang, since, until, has, exclude, min_likes, min_retweets, max_count, as_json, as_yaml, output_file, do_filter, full_text): + # type: (Any, str, str, str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], tuple, tuple, Optional[int], Optional[int], int, bool, bool, Optional[str], bool, bool) -> None """Search tweets by QUERY string with optional advanced filters. QUERY is the search keywords (optional when using advanced filters). @@ -632,8 +691,15 @@ def _run(): rich_output = use_rich_output(as_json=as_json, as_yaml=as_yaml, compact=compact) client = _get_client(config, quiet=not rich_output) _fetch_and_display( - lambda count: client.fetch_search(composed_query, count, product), - "'%s' (%s)" % (composed_query, product), "🔍", max_count, as_json, as_yaml, output_file, do_filter, config, + lambda count: client.fetch_search(composed_query, count, product, scope), + "'%s' (%s, %s)" % (composed_query, product, scope), + "🔍", + max_count, + as_json, + as_yaml, + output_file, + do_filter, + config, compact=compact, full_text=full_text, ) _run_guarded(_run) @@ -694,11 +760,17 @@ def _run(): @cli.command() @click.argument("tweet_id") @click.option("--max", "-n", "max_count", type=int, default=None, help="Max replies to fetch.") +@click.option( + "--reply-scope", + type=click.Choice(REPLY_SCOPES, case_sensitive=False), + default="auto", + help="Reply search scope: auto, recent, or all.", +) @click.option("--full-text", is_flag=True, help="Show full reply text in table output.") @structured_output_options @click.pass_context -def tweet(ctx, tweet_id, max_count, full_text, as_json, as_yaml): - # type: (Any, str, int, bool, bool, bool) -> None +def tweet(ctx, tweet_id, max_count, reply_scope, full_text, as_json, as_yaml): + # type: (Any, str, int, str, bool, bool, bool) -> None """View a tweet and its replies. TWEET_ID is the numeric tweet ID or full URL.""" compact = ctx.obj.get("compact", False) tweet_id = _normalize_tweet_id(tweet_id) @@ -709,7 +781,11 @@ def tweet(ctx, tweet_id, max_count, full_text, as_json, as_yaml): if rich_output: console.print("🐦 Fetching tweet %s...\n" % tweet_id) start = time.time() - tweets = client.fetch_tweet_detail(tweet_id, _resolve_configured_count(config, max_count)) + tweets = client.fetch_tweet_detail( + tweet_id, + _resolve_configured_count(config, max_count), + reply_scope=reply_scope, + ) elapsed = time.time() - start if rich_output: console.print("✅ Fetched %d tweets in %.1fs\n" % (len(tweets), elapsed)) @@ -746,12 +822,18 @@ def _print_show_hint(): @cli.command() @click.argument("index", type=click.IntRange(1)) @click.option("--max", "-n", "max_count", type=int, default=None, help="Max replies to fetch.") +@click.option( + "--reply-scope", + type=click.Choice(REPLY_SCOPES, case_sensitive=False), + default="auto", + help="Reply search scope: auto, recent, or all.", +) @click.option("--full-text", is_flag=True, help="Show full reply text in table output.") @click.option("--output", "-o", "output_file", type=str, default=None, help="Save tweet detail as JSON to file.") @structured_output_options @click.pass_context -def show(ctx, index, max_count, full_text, output_file, as_json, as_yaml): - # type: (Any, int, Optional[int], bool, Optional[str], bool, bool) -> None +def show(ctx, index, max_count, reply_scope, full_text, output_file, as_json, as_yaml): + # type: (Any, int, Optional[int], str, bool, Optional[str], bool, bool) -> None """View tweet #INDEX from the last feed/search results.""" compact = ctx.obj.get("compact", False) @@ -773,7 +855,11 @@ def show(ctx, index, max_count, full_text, output_file, as_json, as_yaml): if rich_output: console.print("🐦 Fetching tweet #%d (id: %s)...\n" % (index, tweet_id)) start = time.time() - tweets = client.fetch_tweet_detail(tweet_id, _resolve_configured_count(config, max_count)) + tweets = client.fetch_tweet_detail( + tweet_id, + _resolve_configured_count(config, max_count), + reply_scope=reply_scope, + ) elapsed = time.time() - start if rich_output: console.print("✅ Fetched %d tweets in %.1fs\n" % (len(tweets), elapsed)) @@ -856,6 +942,92 @@ def _run(): _run_guarded(_run) +@cli.command(name="list-info") +@click.argument("list_id") +@structured_output_options +def list_info(list_id, as_json, as_yaml): + # type: (str, bool, bool) -> None + """Show metadata for a Twitter List. LIST_ID is the numeric list ID.""" + config = load_config() + try: + rich_output = use_rich_output(as_json=as_json, as_yaml=as_yaml) + client = _get_client(config, quiet=not rich_output) + if rich_output: + console.print("📋 Fetching list %s..." % list_id) + twitter_list = client.fetch_list(list_id) + except (TwitterError, RuntimeError) as exc: + _exit_with_error(exc) + + if emit_structured(twitter_list_to_dict(twitter_list), as_json=as_json, as_yaml=as_yaml): + return + + console.print() + print_list_detail(twitter_list, console) + console.print() + + +def _fetch_and_display_lists( + screen_name: str, + fetch_fn_name: str, + label: str, + max_count: Optional[int], + as_json: bool, + as_yaml: bool, +) -> None: + """Shared fetch-and-display logic for list lookup commands.""" + screen_name = screen_name.lstrip("@") + config = load_config() + try: + rich_output = use_rich_output(as_json=as_json, as_yaml=as_yaml) + client = _get_client(config, quiet=not rich_output) + if rich_output: + console.print("👤 Fetching @%s's profile..." % screen_name) + profile = client.fetch_user(screen_name) + fetch_count = _resolve_configured_count(config, max_count) + if rich_output: + console.print("📚 Fetching %s (%d)...\n" % (label, fetch_count)) + start = time.time() + twitter_lists = getattr(client, fetch_fn_name)(profile.id, fetch_count) + elapsed = time.time() - start + if rich_output: + console.print("✅ Fetched %d %s in %.1fs\n" % (len(twitter_lists), label, elapsed)) + except (TwitterError, RuntimeError) as exc: + _exit_with_error(exc) + + if emit_structured(twitter_lists_to_data(twitter_lists), as_json=as_json, as_yaml=as_yaml): + return + + print_list_table(twitter_lists, console, title="📚 @%s %s — %d" % (screen_name, label, len(twitter_lists))) + console.print() + + +@cli.command(name="owned-lists") +@click.argument("screen_name") +@click.option("--max", "-n", "max_count", type=int, default=None, help="Max lists to fetch.") +@structured_output_options +def owned_lists(screen_name, max_count, as_json, as_yaml): + # type: (str, int, bool, bool) -> None + """List Twitter Lists owned by a user. SCREEN_NAME is the @handle (without @).""" + _fetch_and_display_lists(screen_name, "fetch_owned_lists", "owned lists", max_count, as_json, as_yaml) + + +@cli.command(name="followed-lists") +@click.argument("screen_name") +@click.option("--max", "-n", "max_count", type=int, default=None, help="Max lists to fetch.") +@structured_output_options +def followed_lists(screen_name, max_count, as_json, as_yaml): + # type: (str, int, bool, bool) -> None + """List Twitter Lists followed by a user. SCREEN_NAME is the @handle (without @).""" + _fetch_and_display_lists( + screen_name, + "fetch_followed_lists", + "followed lists", + max_count, + as_json, + as_yaml, + ) + + def _fetch_and_display_users( screen_name: str, fetch_fn_name: str, diff --git a/twitter_cli/client.py b/twitter_cli/client.py index 329d046..38e895e 100644 --- a/twitter_cli/client.py +++ b/twitter_cli/client.py @@ -38,6 +38,7 @@ MediaUploadError, NotFoundError, TwitterAPIError, + UnsupportedFeatureError, ) from .graphql import ( FALLBACK_QUERY_IDS, @@ -279,8 +280,8 @@ def get_likes_instructions(data): override_base_variables=True, ) - def fetch_search(self, query, count=20, product="Top"): - # type: (str, int, str) -> List[Tweet] + def fetch_search(self, query, count=20, product="Top", scope="recent"): + # type: (str, int, str, str) -> List[Tweet] """Search tweets by query. Args: @@ -302,8 +303,8 @@ def fetch_search(self, query, count=20, product="Top"): override_base_variables=True, ) - def fetch_tweet_detail(self, tweet_id, count=20): - # type: (str, int) -> List[Tweet] + def fetch_tweet_detail(self, tweet_id, count=20, reply_scope="auto"): + # type: (str, int, str) -> List[Tweet] """Fetch a tweet and its conversation thread (replies).""" return self._fetch_timeline( "TweetDetail", @@ -330,6 +331,10 @@ def fetch_tweet_detail(self, tweet_id, count=20): }, ) + def fetch_mentions(self, user_id, count=20): + # type: (str, int) -> List[Tweet] + raise UnsupportedFeatureError("`mentions` is supported only in official API mode.") + def fetch_article(self, tweet_id): # type: (str) -> Tweet """Fetch a Twitter Article by tweet ID.""" @@ -380,6 +385,18 @@ def fetch_list_timeline(self, list_id, count=20): override_base_variables=True, ) + def fetch_list(self, list_id): + # type: (str) -> Any + raise UnsupportedFeatureError("`list-info` is supported only in official API mode.") + + def fetch_owned_lists(self, user_id, count=20): + # type: (str, int) -> List[Any] + raise UnsupportedFeatureError("`owned-lists` is supported only in official API mode.") + + def fetch_followed_lists(self, user_id, count=20): + # type: (str, int) -> List[Any] + raise UnsupportedFeatureError("`followed-lists` is supported only in official API mode.") + def fetch_followers(self, user_id, count=20): # type: (str, int) -> List[UserProfile] """Fetch followers of a user.""" diff --git a/twitter_cli/formatter.py b/twitter_cli/formatter.py index 44564f0..355a981 100644 --- a/twitter_cli/formatter.py +++ b/twitter_cli/formatter.py @@ -10,7 +10,7 @@ from rich.panel import Panel from rich.table import Table -from .models import Tweet, UserProfile +from .models import Tweet, TwitterList, UserProfile from .timeutil import format_local_time, format_relative_time @@ -323,3 +323,68 @@ def print_user_table( table.add_row(str(i + 1), user_text, bio, stats) console.print(table) + + +def print_list_table( + twitter_lists: List[TwitterList], + console: Optional[Console] = None, + title: Optional[str] = None, +) -> None: + """Print Twitter lists as a rich table.""" + if console is None: + console = _make_console() + + if not title: + title = "📚 Lists — %d" % len(twitter_lists) + + table = Table(title=title, show_lines=True, expand=True) + table.add_column("#", style="dim", width=3, justify="right") + table.add_column("List", style="cyan", width=28) + table.add_column("Description", ratio=3) + table.add_column("Stats", style="green", width=18, no_wrap=True) + + for i, twitter_list in enumerate(twitter_lists): + privacy = "🔒 " if twitter_list.private else "" + owner = "@%s" % twitter_list.owner_screen_name if twitter_list.owner_screen_name else "unknown" + list_text = "%s%s\n%s" % (privacy, twitter_list.name, owner) + + description = (twitter_list.description or "").replace("\n", " ").strip() + if len(description) > 100: + description = description[:97] + "..." + + stats = ( + "👥 %s followers\n📝 %s members" + % ( + format_number(twitter_list.follower_count), + format_number(twitter_list.member_count), + ) + ) + table.add_row(str(i + 1), list_text, description, stats) + + console.print(table) + + +def print_list_detail(twitter_list: TwitterList, console: Optional[Console] = None) -> None: + """Print a single Twitter list in detail using a rich panel.""" + if console is None: + console = _make_console() + + privacy = "🔒 Private" if twitter_list.private else "🌐 Public" + header = "%s (%s)" % (twitter_list.name, privacy) + lines = [] + if twitter_list.owner_screen_name: + lines.append("👤 @%s" % twitter_list.owner_screen_name) + if twitter_list.description: + lines.extend(["", twitter_list.description]) + lines.extend( + [ + "", + "👥 %s followers · 📝 %s members" + % (format_number(twitter_list.follower_count), format_number(twitter_list.member_count)), + "🆔 %s" % twitter_list.id, + ] + ) + if twitter_list.created_at: + lines.append("📅 Created %s" % twitter_list.created_at) + + console.print(Panel("\n".join(lines), title=header, border_style="magenta", expand=True)) diff --git a/twitter_cli/models.py b/twitter_cli/models.py index 6a4c660..661b1df 100644 --- a/twitter_cli/models.py +++ b/twitter_cli/models.py @@ -54,6 +54,18 @@ class Tweet: article_text: Optional[str] = None +@dataclass +class TwitterList: + id: str + name: str + owner_screen_name: str = "" + description: str = "" + follower_count: int = 0 + member_count: int = 0 + private: bool = False + created_at: str = "" + + @dataclass class UserProfile: id: str diff --git a/twitter_cli/serialization.py b/twitter_cli/serialization.py index fcfb709..a8cbfde 100644 --- a/twitter_cli/serialization.py +++ b/twitter_cli/serialization.py @@ -1,11 +1,11 @@ -"""Serialization helpers for Tweet and UserProfile models.""" +"""Serialization helpers for Tweet, TwitterList, and UserProfile models.""" from __future__ import annotations import json from typing import Any, Dict, Iterable, List, Optional -from .models import Author, Metrics, Tweet, TweetMedia, UserProfile +from .models import Author, Metrics, Tweet, TweetMedia, TwitterList, UserProfile from .timeutil import format_iso8601, format_local_time @@ -195,6 +195,21 @@ def user_profile_to_dict(user: UserProfile) -> Dict[str, Any]: } +def twitter_list_to_dict(twitter_list: TwitterList) -> Dict[str, Any]: + """Convert a TwitterList dataclass into a JSON-safe dict.""" + return { + "id": twitter_list.id, + "name": twitter_list.name, + "ownerScreenName": twitter_list.owner_screen_name, + "description": twitter_list.description, + "followers": twitter_list.follower_count, + "members": twitter_list.member_count, + "private": twitter_list.private, + "createdAt": twitter_list.created_at, + "createdAtISO": format_iso8601(twitter_list.created_at), + } + + def users_to_json(users: Iterable[UserProfile]) -> str: """Serialize UserProfile objects to pretty JSON.""" return json.dumps( @@ -209,6 +224,11 @@ def users_to_data(users: Iterable[UserProfile]) -> List[Dict[str, Any]]: return [user_profile_to_dict(user) for user in users] +def twitter_lists_to_data(twitter_lists: Iterable[TwitterList]) -> List[Dict[str, Any]]: + """Serialize TwitterList objects to Python dicts.""" + return [twitter_list_to_dict(twitter_list) for twitter_list in twitter_lists] + + def _optional_int(value: Any) -> Optional[int]: """Parse an optional integer value.""" if value is None: From 1d94fbd6b658f8435a81b0be0275683b899c2470 Mon Sep 17 00:00:00 2001 From: aaajiao Date: Mon, 16 Mar 2026 23:58:19 +0100 Subject: [PATCH 6/6] Add official API mode video uploads --- README.md | 14 +++- SKILL.md | 20 ++++- tests/test_api_client.py | 42 ++++++++++- tests/test_cli.py | 51 ++++++++++++- twitter_cli/api_client.py | 155 ++++++++++++++++++++++++++++++++++---- twitter_cli/cli.py | 64 ++++++++++++---- twitter_cli/client.py | 4 +- 7 files changed, 311 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 9c6576d..c48f867 100644 --- a/README.md +++ b/README.md @@ -163,9 +163,13 @@ twitter following elonmusk --max 50 twitter post "Hello from twitter-cli!" twitter post "Hello!" --image photo.jpg # Post with image twitter post "Gallery" -i a.png -i b.jpg -i c.webp # Up to 4 images +twitter post "Watch this" --video clip.mp4 # API mode: post with video +twitter post "Watch this" --file clip.mp4 --alt-text "Demo clip" twitter post "reply text" --reply-to 1234567890 twitter reply 1234567890 "Nice!" -i screenshot.png # Reply with image +twitter reply 1234567890 "Nice!" --video clip.mp4 # API mode: reply with video twitter quote 1234567890 "Look" -i chart.png # Quote with image +twitter quote 1234567890 "Look" --video clip.mp4 # API mode: quote with video twitter post "Hello from twitter-cli!" --json twitter delete 1234567890 twitter like 1234567890 @@ -205,12 +209,13 @@ export TWITTER_API_USER_ID=... **twitter-cli API mode currently supports:** - Read: `feed`, `bookmarks`, `tweet`, `show`, `article`, `list`, `list-info`, `likes`, `mentions`, `search`, `user`, `user-posts`, `owned-lists`, `followed-lists`, `followers`, `following`, `status`, `whoami` - Write: `post`, `reply`, `quote`, `delete`, `like`, `unlike`, `retweet`, `unretweet`, `follow`, `unfollow`, `bookmark`, `unbookmark` -- Media: image upload in `post` / `reply` / `quote` +- Media: image and video upload in `post` / `reply` / `quote` **API mode notes:** - `feed` and `feed -t following` both use the official reverse-chronological home timeline endpoint exposed by the authenticated user's API access. - `search --scope all` uses the official full-archive search endpoint when your API access permits it. - `tweet` / `show` support `--reply-scope auto|recent|all`; `auto` prefers full-archive search for older posts and falls back when needed. +- `--video` / `--file` and `--alt-text` are currently intended for official API mode. - `article` uses the official tweet lookup response and renders article metadata/content when the API returns article fields for that post. **Chrome multi-profile**: All Chrome profiles are scanned automatically. To specify a profile: @@ -521,9 +526,13 @@ twitter following elonmusk twitter post "你好,世界!" twitter post "发图" --image photo.jpg # 带图发推 twitter post "多图" -i a.png -i b.jpg -i c.webp # 最多 4 张图片 +twitter post "看这个" --video clip.mp4 # API 模式:带视频发推 +twitter post "看这个" --file clip.mp4 --alt-text "演示视频" twitter post "回复内容" --reply-to 1234567890 twitter reply 1234567890 "回复" -i screenshot.png # 带图回复 +twitter reply 1234567890 "回复" --video clip.mp4 # API 模式:带视频回复 twitter quote 1234567890 "评论" -i chart.png # 带图引用 +twitter quote 1234567890 "评论" --video clip.mp4 # API 模式:带视频引用 twitter post "你好,世界!" --json twitter delete 1234567890 twitter like 1234567890 @@ -563,12 +572,13 @@ export TWITTER_API_USER_ID=... **twitter-cli API 模式当前支持:** - 读取:`feed`、`bookmarks`、`tweet`、`show`、`article`、`list`、`list-info`、`likes`、`mentions`、`search`、`user`、`user-posts`、`owned-lists`、`followed-lists`、`followers`、`following`、`status`、`whoami` - 写入:`post`、`reply`、`quote`、`delete`、`like`、`unlike`、`retweet`、`unretweet`、`follow`、`unfollow`、`bookmark`、`unbookmark` -- 媒体:`post` / `reply` / `quote` 支持图片上传 +- 媒体:`post` / `reply` / `quote` 支持图片和视频上传 **API 模式说明:** - `feed` 和 `feed -t following` 当前都映射到官方 reverse-chronological home timeline。 - `search --scope all` 会在权限允许时使用官方 full-archive search endpoint。 - `tweet` / `show` 支持 `--reply-scope auto|recent|all`;`auto` 会优先对较老的推文使用 full-archive 搜索,并在必要时自动回退。 +- `--video` / `--file` 和 `--alt-text` 当前主要面向官方 API 模式。 - `article` 基于官方 tweet lookup 返回的 article 字段做 best-effort 渲染;是否有正文取决于 API 返回内容。 **Chrome 多 Profile 支持**:会自动遍历所有 Chrome profile。也可以通过环境变量指定: diff --git a/SKILL.md b/SKILL.md index f397b1c..194802b 100644 --- a/SKILL.md +++ b/SKILL.md @@ -211,11 +211,15 @@ twitter following elonmusk --max 50 # Following twitter post "Hello from twitter-cli!" # Post tweet twitter post "Hello!" --image photo.jpg # Post with image twitter post "Gallery" -i a.png -i b.jpg # Up to 4 images +twitter post "Watch this" --video clip.mp4 # API mode: post with video +twitter post "Watch this" --file clip.mp4 --alt-text "Demo clip" twitter reply 1234567890 "Great tweet!" # Reply (standalone) twitter reply 1234567890 "Nice!" -i pic.png # Reply with image +twitter reply 1234567890 "Nice!" --video clip.mp4 # API mode: reply with video twitter post "reply text" --reply-to 1234567890 # Reply (via post) twitter quote 1234567890 "Interesting take" # Quote-tweet twitter quote 1234567890 "Look" -i chart.png # Quote with image +twitter quote 1234567890 "Look" --video clip.mp4 # API mode: quote with video twitter delete 1234567890 # Delete tweet twitter like 1234567890 # Like twitter unlike 1234567890 # Unlike @@ -227,11 +231,15 @@ twitter follow elonmusk # Follow user twitter unfollow elonmusk # Unfollow user ``` -**Image upload notes:** +**Media upload notes:** - Supported formats: JPEG, PNG, GIF, WebP - Max file size: 5 MB per image - Max 4 images per tweet - Use `--image` / `-i` (repeatable) +- In official API mode, you can also upload one video with `--video` / `--file` +- Supported video formats in API mode: MP4, MOV, WebM +- Max video size in API mode: 512 MB +- `--alt-text` currently applies to one uploaded media item ## Official API Mode Notes @@ -241,6 +249,7 @@ twitter unfollow elonmusk # Unfollow user - `auto` prefers full-archive search for older posts and falls back when `/search/all` is unavailable. - `article` is best-effort in API mode and depends on the article fields returned by official tweet lookup. - `mentions`, `list-info`, `owned-lists`, and `followed-lists` are available in official API mode. +- `--video` / `--file` and `--alt-text` are currently intended for official API mode. ## Agent Workflows @@ -261,6 +270,13 @@ twitter post "Check this out!" --image /path/to/photo.jpg twitter post "Photo gallery" -i img1.png -i img2.jpg -i img3.webp ``` +### Post with video (official API mode) + +```bash +twitter --auth-mode api post "Watch this" --video /path/to/clip.mp4 +twitter --auth-mode api post "Watch this" --file /path/to/clip.mp4 --alt-text "Demo clip" +``` + ### Reply to someone's latest tweet ```bash @@ -365,7 +381,7 @@ twitter bookmarks --filter ## Limitations -- **Images only** — video/GIF animation upload not yet supported (image upload supports JPEG/PNG/GIF/WebP) +- **Video upload is API-mode oriented** — `--video` / `--file` currently targets the official API backend - **No DMs** — no direct messaging - **No notifications** — can't read notifications - **No polls** — can't create polls diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 5402e7c..8945bc2 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -21,14 +21,15 @@ def json(self) -> dict: class DummySession: def __init__(self, responses: list[DummyResponse]) -> None: self._responses = responses - self.calls: list[tuple[str, str, dict | None, str | None, dict]] = [] + self.calls: list[tuple[str, str, dict | None, object, dict]] = [] def get(self, url: str, headers=None, params=None, timeout=None): self.calls.append(("GET", url, params, None, headers or {})) return self._responses.pop(0) - def post(self, url: str, headers=None, params=None, data=None, timeout=None): - self.calls.append(("POST", url, params, data, headers or {})) + def post(self, url: str, headers=None, params=None, data=None, files=None, timeout=None): + payload = {"data": data, "files": files} if files is not None else data + self.calls.append(("POST", url, params, payload, headers or {})) return self._responses.pop(0) def delete(self, url: str, headers=None, params=None, timeout=None): @@ -670,3 +671,38 @@ def test_api_client_upload_media_uses_v2_media_endpoint(monkeypatch, tmp_path) - payload = json.loads(session.calls[0][3]) assert payload["media_category"] == "tweet_image" assert payload["media_type"] == "image/png" + + +def test_api_client_upload_video_uses_chunked_flow_and_metadata(monkeypatch, tmp_path) -> None: + monkeypatch.setenv("TWITTER_API_ACCESS_TOKEN", "access-token") + monkeypatch.delenv("TWITTER_API_BEARER_TOKEN", raising=False) + video_path = tmp_path / "clip.mp4" + video_path.write_bytes(b"\x00" * 32) + session = DummySession( + [ + DummyResponse(200, {"data": {"id": "m1"}}), + DummyResponse(200, {}), + DummyResponse(200, {"data": {"processing_info": {"state": "pending", "check_after_secs": 0}}}), + DummyResponse(200, {"data": {"processing_info": {"state": "succeeded"}}}), + DummyResponse(200, {"data": {"updated": True}}), + ] + ) + monkeypatch.setattr("twitter_cli.api_client._get_api_session", lambda: session) + + client = TwitterAPIv2Client({"requestDelay": 0, "maxRetries": 1}) + media_id = client.upload_media(str(video_path), alt_text="demo clip") + + assert media_id == "m1" + assert session.calls[0][1].endswith("/media/upload/initialize") + init_payload = json.loads(session.calls[0][3]) + assert init_payload["media_category"] == "tweet_video" + assert session.calls[1][1].endswith("/media/upload/m1/append") + append_payload = session.calls[1][3] + assert isinstance(append_payload, dict) + assert append_payload["data"]["segment_index"] == "0" + assert session.calls[2][1].endswith("/media/upload/m1/finalize") + assert session.calls[3][1].endswith("/media/upload") + assert session.calls[3][2] == {"command": "STATUS", "media_id": "m1"} + assert session.calls[4][1].endswith("/media/metadata") + metadata_payload = json.loads(session.calls[4][3]) + assert metadata_payload["metadata"]["alt_text"]["text"] == "demo clip" diff --git a/tests/test_cli.py b/tests/test_cli.py index 487c9fe..045ab07 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -493,8 +493,9 @@ def test_cli_post_with_images_passes_media_ids(monkeypatch, tmp_path) -> None: calls = [] class FakeClient: - def upload_media(self, path: str) -> str: + def upload_media(self, path: str, alt_text=None) -> str: assert path == str(image_path) + assert alt_text is None return "m1" def create_tweet(self, text: str, reply_to_id=None, media_ids=None) -> str: @@ -510,6 +511,54 @@ def create_tweet(self, text: str, reply_to_id=None, media_ids=None) -> str: assert calls == [{"text": "hello", "reply_to_id": None, "media_ids": ["m1"]}] +def test_cli_post_with_video_passes_media_id_and_alt_text(monkeypatch, tmp_path) -> None: + video_path = tmp_path / "clip.mp4" + video_path.write_bytes(b"mp4") + calls = [] + + class FakeClient: + def upload_media(self, path: str, alt_text=None) -> str: + assert path == str(video_path) + assert alt_text == "demo clip" + return "v1" + + def create_tweet(self, text: str, reply_to_id=None, media_ids=None) -> str: + calls.append({"text": text, "reply_to_id": reply_to_id, "media_ids": media_ids}) + return "999" + + monkeypatch.setattr("twitter_cli.cli.TwitterAPIv2Client", FakeClient) + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + runner = CliRunner() + + result = runner.invoke( + cli, + ["post", "hello", "--video", str(video_path), "--alt-text", "demo clip", "--json"], + ) + + assert result.exit_code == 0 + assert calls == [{"text": "hello", "reply_to_id": None, "media_ids": ["v1"]}] + + +def test_cli_post_alt_text_requires_api_mode(monkeypatch, tmp_path) -> None: + image_path = tmp_path / "photo.png" + image_path.write_bytes(b"png") + + class FakeClient: + def upload_media(self, path: str, alt_text=None) -> str: + raise AssertionError("upload_media should not be called") + + monkeypatch.setattr("twitter_cli.cli._get_client", lambda config=None, quiet=False: FakeClient()) + runner = CliRunner() + + result = runner.invoke( + cli, + ["post", "hello", "--image", str(image_path), "--alt-text", "demo", "--json"], + ) + + assert result.exit_code != 0 + assert "--alt-text currently requires --auth-mode api." in result.output + + def test_cli_like_yaml_output(monkeypatch) -> None: class FakeClient: def like_tweet(self, tweet_id: str) -> bool: diff --git a/twitter_cli/api_client.py b/twitter_cli/api_client.py index 603ebbd..17c0e10 100644 --- a/twitter_cli/api_client.py +++ b/twitter_cli/api_client.py @@ -10,7 +10,7 @@ import os import time import urllib.parse -from typing import Any, Dict, List, Optional, cast +from typing import Any, Dict, Iterator, List, Optional, cast from curl_cffi import requests as _cffi_requests @@ -73,7 +73,17 @@ class TwitterAPIv2Client: "image/tiff", "image/webp", } + _SUPPORTED_GIF_TYPES = {"image/gif"} + _SUPPORTED_VIDEO_TYPES = { + "video/mp4", + "video/quicktime", + "video/webm", + "video/mp2t", + } _MAX_IMAGE_SIZE = 5 * 1024 * 1024 + _MAX_GIF_SIZE = 15 * 1024 * 1024 + _MAX_VIDEO_SIZE = 512 * 1024 * 1024 + _UPLOAD_CHUNK_SIZE = 4 * 1024 * 1024 def __init__(self, rate_limit_config: Optional[Dict[str, Any]] = None) -> None: self._access_token = os.environ.get("TWITTER_API_ACCESS_TOKEN", "").strip() @@ -431,28 +441,71 @@ def unbookmark_tweet(self, tweet_id: str) -> bool: self._write_delay() return True - def upload_media(self, path: str) -> str: + def upload_media(self, path: str, alt_text: Optional[str] = None) -> str: if not self._access_token: raise AuthenticationError("Official API media upload requires TWITTER_API_ACCESS_TOKEN.") if not os.path.isfile(path): raise MediaUploadError("File not found: %s" % path) file_size = os.path.getsize(path) - if file_size > self._MAX_IMAGE_SIZE: + media_type = mimetypes.guess_type(path)[0] or "" + media_category: Optional[str] = None + if media_type in self._SUPPORTED_IMAGE_TYPES: + max_size = self._MAX_IMAGE_SIZE + elif media_type in self._SUPPORTED_GIF_TYPES: + max_size = self._MAX_GIF_SIZE + media_category = "tweet_gif" + elif media_type in self._SUPPORTED_VIDEO_TYPES: + max_size = self._MAX_VIDEO_SIZE + media_category = "tweet_video" + else: raise MediaUploadError( - "File too large: %.1f MB (max %.0f MB)" - % (file_size / (1024 * 1024), self._MAX_IMAGE_SIZE / (1024 * 1024)), + "Unsupported media format: %s (supported: bmp, jpeg, png, tiff, webp, gif, mp4, mov, webm)" % media_type, ) - - media_type = mimetypes.guess_type(path)[0] or "" - if media_type not in self._SUPPORTED_IMAGE_TYPES: + if file_size > max_size: raise MediaUploadError( - "Unsupported image format: %s (supported: bmp, jpeg, png, tiff, webp)" % media_type, + "File too large: %.1f MB (max %.0f MB)" + % (file_size / (1024 * 1024), max_size / (1024 * 1024)), + ) + if media_category is None: + media_id = self._upload_simple_media(path, media_type) + else: + media_id = self._upload_chunked_media( + path, + media_type, + file_size, + media_category=media_category, ) + if alt_text: + self._apply_media_alt_text(media_id, alt_text) + return media_id + + # ── Internals ──────────────────────────────────────────────────── - with open(path, "rb") as image_file: - media = base64.b64encode(image_file.read()).decode("ascii") + def _authenticated_user_id(self) -> str: + if self._configured_user_id: + return self._configured_user_id + return self.fetch_me().id + def _api_headers(self, *, require_user_context: bool) -> Dict[str, str]: + token = self._access_token if require_user_context else (self._access_token or self._bearer_token) + if require_user_context and not token: + raise AuthenticationError( + "Official API user-context commands require TWITTER_API_ACCESS_TOKEN." + ) + if not token: + raise AuthenticationError( + "Official API mode requires TWITTER_API_ACCESS_TOKEN or TWITTER_API_BEARER_TOKEN." + ) + return { + "Authorization": "Bearer %s" % token, + "Accept": "application/json", + "User-Agent": "twitter-cli", + } + + def _upload_simple_media(self, path: str, media_type: str) -> str: + with open(path, "rb") as media_file: + media = base64.b64encode(media_file.read()).decode("ascii") data = self._api_request( "POST", "/media/upload", @@ -472,12 +525,82 @@ def upload_media(self, path: str) -> str: self._wait_for_media(media_id, media_payload.get("processing_info")) return media_id - # ── Internals ──────────────────────────────────────────────────── + def _upload_chunked_media( + self, + path: str, + media_type: str, + file_size: int, + *, + media_category: str, + ) -> str: + initialized = self._api_request( + "POST", + "/media/upload/initialize", + json_body={ + "media_category": media_category, + "media_type": media_type, + "shared": False, + "total_bytes": file_size, + }, + require_user_context=True, + ) + raw_init_data = initialized.get("data") + init_data: Dict[str, Any] = raw_init_data if isinstance(raw_init_data, dict) else {} + media_id = str(init_data.get("id") or "") + if not media_id: + raise MediaUploadError("Media upload initialize did not return an id") - def _authenticated_user_id(self) -> str: - if self._configured_user_id: - return self._configured_user_id - return self.fetch_me().id + session = _get_api_session() + headers = self._api_headers(require_user_context=True) + for segment_index, chunk in enumerate(self._iter_file_chunks(path)): + response = session.post( + "%s/media/upload/%s/append" % (_API_BASE_URL, media_id), + headers=headers, + data={"segment_index": str(segment_index)}, + files={"media": ("chunk", chunk)}, + timeout=60, + ) + payload = self._safe_json(response) + if response.status_code >= 400: + raise MediaUploadError(self._extract_error_message(payload, response.text)) + + finalized = self._api_request( + "POST", + "/media/upload/%s/finalize" % media_id, + require_user_context=True, + ) + raw_finalize_data = finalized.get("data") + finalize_data: Dict[str, Any] = raw_finalize_data if isinstance(raw_finalize_data, dict) else {} + self._wait_for_media(media_id, finalize_data.get("processing_info")) + return media_id + + def _iter_file_chunks(self, path: str) -> Iterator[bytes]: + with open(path, "rb") as media_file: + while True: + chunk = media_file.read(self._UPLOAD_CHUNK_SIZE) + if not chunk: + break + yield chunk + + def _apply_media_alt_text(self, media_id: str, alt_text: str) -> None: + text = alt_text.strip() + if not text: + return + if len(text) > 1000: + raise MediaUploadError("Alt text must be 1000 characters or fewer.") + self._api_request( + "POST", + "/media/metadata", + json_body={ + "id": media_id, + "metadata": { + "alt_text": { + "text": text, + } + }, + }, + require_user_context=True, + ) def _search_path(self, scope: str) -> str: normalized = (scope or "recent").strip().lower() diff --git a/twitter_cli/cli.py b/twitter_cli/cli.py index c689a92..ef973ae 100644 --- a/twitter_cli/cli.py +++ b/twitter_cli/cli.py @@ -1088,9 +1088,23 @@ def following(screen_name, max_count, as_json, as_yaml): _MAX_IMAGES = 4 # Twitter allows up to 4 images per tweet -def _upload_images(client, image_paths, rich_output=True): - # type: (Any, tuple, bool) -> list - """Upload images and return list of media_id strings.""" +def _upload_media(client, image_paths, video_path=None, alt_text=None, rich_output=True): + # type: (Any, tuple, Optional[str], Optional[str], bool) -> list + """Upload selected media and return list of media_id strings.""" + if image_paths and video_path: + raise click.UsageError("Use either --image or --video/--file, not both.") + if alt_text and not isinstance(client, TwitterAPIv2Client): + raise click.UsageError("--alt-text currently requires --auth-mode api.") + if video_path and not isinstance(client, TwitterAPIv2Client): + raise click.UsageError("Video upload currently requires --auth-mode api.") + if alt_text and not (video_path or len(image_paths) == 1): + raise click.UsageError("--alt-text requires exactly one uploaded image or one uploaded video.") + + if video_path: + if rich_output: + console.print("📤 Uploading video: %s" % video_path) + return [client.upload_media(video_path, alt_text=alt_text)] + if not image_paths: return [] if len(image_paths) > _MAX_IMAGES: @@ -1099,7 +1113,7 @@ def _upload_images(client, image_paths, rich_output=True): for i, path in enumerate(image_paths, 1): if rich_output: console.print("📤 Uploading image %d/%d: %s" % (i, len(image_paths), path)) - media_ids.append(client.upload_media(path)) + media_ids.append(client.upload_media(path, alt_text=alt_text if len(image_paths) == 1 else None)) return media_ids @@ -1129,9 +1143,11 @@ def operation(client: Any) -> WritePayload: @click.argument("text") @click.option("--reply-to", "-r", default=None, help="Reply to this tweet ID.") @click.option("--image", "-i", "images", multiple=True, type=click.Path(exists=True), help="Attach image (up to 4). Repeatable.") +@click.option("--video", "--file", "video_path", type=click.Path(exists=True), default=None, help="Attach one video file in API mode.") +@click.option("--alt-text", type=str, default=None, help="Optional alt text for a single uploaded media file.") @structured_output_options -def post(text, reply_to, images, as_json, as_yaml): - # type: (str, Optional[str], tuple, bool, bool) -> None +def post(text, reply_to, images, video_path, alt_text, as_json, as_yaml): + # type: (str, Optional[str], tuple, Optional[str], Optional[str], bool, bool) -> None """Post a new tweet. TEXT is the tweet content. Attach images with --image / -i (up to 4): @@ -1144,7 +1160,13 @@ def post(text, reply_to, images, as_json, as_yaml): rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) def operation(client: Any) -> WritePayload: - media_ids = _upload_images(client, images, rich_output=rich_output) + media_ids = _upload_media( + client, + images, + video_path=video_path, + alt_text=alt_text, + rich_output=rich_output, + ) tweet_id = client.create_tweet(text, reply_to_id=reply_to, media_ids=media_ids or None) return {"success": True, "action": "post", "id": tweet_id, "url": "https://x.com/i/status/%s" % tweet_id} @@ -1164,14 +1186,22 @@ def operation(client: Any) -> WritePayload: @click.argument("tweet_id") @click.argument("text") @click.option("--image", "-i", "images", multiple=True, type=click.Path(exists=True), help="Attach image (up to 4). Repeatable.") +@click.option("--video", "--file", "video_path", type=click.Path(exists=True), default=None, help="Attach one video file in API mode.") +@click.option("--alt-text", type=str, default=None, help="Optional alt text for a single uploaded media file.") @structured_output_options -def reply_tweet(tweet_id, text, images, as_json, as_yaml): - # type: (str, str, tuple, bool, bool) -> None +def reply_tweet(tweet_id, text, images, video_path, alt_text, as_json, as_yaml): + # type: (str, str, tuple, Optional[str], Optional[str], bool, bool) -> None """Reply to a tweet. TWEET_ID is the tweet to reply to, TEXT is the reply content.""" tweet_id = _normalize_tweet_id(tweet_id) rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) def operation(client: Any) -> WritePayload: - media_ids = _upload_images(client, images, rich_output=rich_output) + media_ids = _upload_media( + client, + images, + video_path=video_path, + alt_text=alt_text, + rich_output=rich_output, + ) new_id = client.create_tweet(text, reply_to_id=tweet_id, media_ids=media_ids or None) return { "success": True, @@ -1197,14 +1227,22 @@ def operation(client: Any) -> WritePayload: @click.argument("tweet_id") @click.argument("text") @click.option("--image", "-i", "images", multiple=True, type=click.Path(exists=True), help="Attach image (up to 4). Repeatable.") +@click.option("--video", "--file", "video_path", type=click.Path(exists=True), default=None, help="Attach one video file in API mode.") +@click.option("--alt-text", type=str, default=None, help="Optional alt text for a single uploaded media file.") @structured_output_options -def quote_tweet(tweet_id, text, images, as_json, as_yaml): - # type: (str, str, tuple, bool, bool) -> None +def quote_tweet(tweet_id, text, images, video_path, alt_text, as_json, as_yaml): + # type: (str, str, tuple, Optional[str], Optional[str], bool, bool) -> None """Quote-tweet a tweet. TWEET_ID is the tweet to quote, TEXT is the commentary.""" tweet_id = _normalize_tweet_id(tweet_id) rich_output = not _structured_mode(as_json=as_json, as_yaml=as_yaml) def operation(client: Any) -> WritePayload: - media_ids = _upload_images(client, images, rich_output=rich_output) + media_ids = _upload_media( + client, + images, + video_path=video_path, + alt_text=alt_text, + rich_output=rich_output, + ) new_id = client.quote_tweet(tweet_id, text, media_ids=media_ids or None) return { "success": True, diff --git a/twitter_cli/client.py b/twitter_cli/client.py index 38e895e..742a4dc 100644 --- a/twitter_cli/client.py +++ b/twitter_cli/client.py @@ -426,8 +426,8 @@ def _write_delay(self): logger.debug("Write operation delay: %.1fs", delay) time.sleep(delay) - def upload_media(self, file_path): - # type: (str) -> str + def upload_media(self, file_path, alt_text=None): + # type: (str, Any) -> str """Upload an image file to Twitter. Returns the media_id string. Uses Twitter's chunked upload API (INIT → APPEND → FINALIZE).