diff --git a/aichat/.env.example b/aichat/.env.example new file mode 100644 index 0000000..09dad5e --- /dev/null +++ b/aichat/.env.example @@ -0,0 +1,16 @@ +# Environment template - copy to .env and fill in your values +# Get your API token from https://platform.acedata.cloud + +# AceDataCloud API Configuration (REQUIRED) +ACEDATACLOUD_API_TOKEN=your_api_token_here +ACEDATACLOUD_API_BASE_URL=https://api.acedata.cloud + +# AIChat Configuration (Optional) +AICHAT_REQUEST_TIMEOUT=60 + +# MCP Server Configuration (Optional) +MCP_SERVER_NAME=aichat +MCP_TRANSPORT=stdio + +# Logging (Optional) +LOG_LEVEL=INFO diff --git a/aichat/.gitignore b/aichat/.gitignore new file mode 100644 index 0000000..aa347c5 --- /dev/null +++ b/aichat/.gitignore @@ -0,0 +1,82 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Ruff +.ruff_cache/ + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# OS +.DS_Store +Thumbs.db + +# Project specific +*.log diff --git a/aichat/.python-version b/aichat/.python-version new file mode 100644 index 0000000..c8cfe39 --- /dev/null +++ b/aichat/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/aichat/.ruff.toml b/aichat/.ruff.toml new file mode 100644 index 0000000..b78284b --- /dev/null +++ b/aichat/.ruff.toml @@ -0,0 +1,38 @@ +target-version = "py310" +line-length = 100 +exclude = [ + ".git", + ".ruff_cache", + ".venv", + "__pycache__", + "build", + "dist", +] + +[format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" + +[lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # Pyflakes + "I", # isort + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "UP", # pyupgrade + "ARG", # flake8-unused-arguments + "SIM", # flake8-simplify +] +ignore = [ + "E501", # line too long (handled by formatter) + "B008", # do not perform function calls in argument defaults + "E402", # module level import not at top of file (needed for dotenv loading) +] + +[lint.isort] +known-first-party = ["core", "tools", "prompts"] + diff --git a/aichat/CHANGELOG.md b/aichat/CHANGELOG.md new file mode 100644 index 0000000..0c9621a --- /dev/null +++ b/aichat/CHANGELOG.md @@ -0,0 +1,18 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [2026.1.0.1] - 2026-04-25 + +### Added +- Initial release of MCP AIChat server +- AI conversation tool supporting 70+ models +- Support for GPT-5, GPT-4, o-series, DeepSeek, Grok, and GLM models +- Stateful conversation mode for multi-turn dialogues +- Reference URLs support for context-aware responses +- Model listing tool with detailed descriptions +- Usage guide tool +- OAuth 2.1 support for remote HTTP mode diff --git a/aichat/CLAUDE.md b/aichat/CLAUDE.md new file mode 100644 index 0000000..d4beac3 --- /dev/null +++ b/aichat/CLAUDE.md @@ -0,0 +1,40 @@ +# AIChatMCP + +MCP (Model Context Protocol) server for AI Dialogue via AceDataCloud API. + +## Project Structure + +``` +core/ + config.py — Settings dataclass (API token, base URL) + server.py — FastMCP server singleton + client.py — httpx async HTTP client + types.py — Literal types (ChatModel, etc.) + exceptions.py — Error classes (AuthError, APIError, TimeoutError) +tools/ + chat_tools.py — aichat conversation + info_tools.py — list models, usage guide +prompts/ — LLM guidance prompts +tests/ — pytest-asyncio tests +``` + +## Sync from Docs + +When invoked by the sync workflow, the Docs repo is checked out at `_docs/`. Your job: + +1. **Source of truth** — `_docs/openapi/aichat.json` is the OpenAPI spec for the AIChat API. +2. **Compare models** — The Literal types in `core/types.py` must match the spec's model enum. Add/remove as needed. +3. **Compare parameters** — Each `@mcp.tool()` function's parameters should match the corresponding OpenAPI endpoint. +4. **Update defaults** — If a new model becomes the recommended default, update the default in `core/types.py`. +5. **Update README** — Keep the model table and feature list current. +6. **Add tests** — For new tools or parameters, add test cases in `tests/`. +7. **PR title** — Use format: `sync: [auto-sync]` + +## Development + +```bash +pip install -e ".[dev]" +pytest --cov=core --cov=tools +ruff check . +``` + diff --git a/aichat/Dockerfile b/aichat/Dockerfile new file mode 100644 index 0000000..cd36d6c --- /dev/null +++ b/aichat/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . . + +RUN pip install --no-cache-dir . + +CMD ["mcp-aichat", "--transport", "http", "--port", "8000"] diff --git a/aichat/LICENSE b/aichat/LICENSE new file mode 100644 index 0000000..5100e6c --- /dev/null +++ b/aichat/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 AceDataCloud + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/aichat/README.md b/aichat/README.md new file mode 100644 index 0000000..54a56d1 --- /dev/null +++ b/aichat/README.md @@ -0,0 +1,93 @@ +# AIChatMCP + + + +[![PyPI version](https://img.shields.io/pypi/v/mcp-aichat.svg)](https://pypi.org/project/mcp-aichat/) +[![PyPI downloads](https://img.shields.io/pypi/dm/mcp-aichat.svg)](https://pypi.org/project/mcp-aichat/) +[![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![MCP](https://img.shields.io/badge/MCP-Compatible-green.svg)](https://modelcontextprotocol.io) + +A [Model Context Protocol (MCP)](https://modelcontextprotocol.io) server for AI dialogue using the [AIChat API](https://platform.acedata.cloud) through the [AceDataCloud API](https://platform.acedata.cloud). + +Access 70+ AI models including GPT-5, GPT-4, o-series, DeepSeek, Grok, and GLM directly from Claude, VS Code, or any MCP-compatible client. + +## Features + +- **Multi-Model Support** - Access GPT-5, GPT-4, o-series, DeepSeek, Grok, GLM, and more +- **Stateful Conversations** - Multi-turn dialogue with conversation memory +- **Reference Support** - Provide URLs as context for model responses +- **Preset Configurations** - Use preset model configurations +- **Model Listing** - Browse all available models with descriptions + +## Tool Reference + +| Tool | Description | +|------|-------------| +| `aichat_conversation` | Send a question to an AI model and get an answer. | +| `aichat_list_models` | List all available AI chat models. | +| `aichat_get_usage_guide` | Get a comprehensive guide for using the AIChat tools. | + +## Quick Start + +### 1. Get Your API Token + +1. Sign up at [AceDataCloud Platform](https://platform.acedata.cloud) +2. Go to the API documentation page +3. Click **"Acquire"** to get your API token +4. Copy the token for use below + +### 2. Install and Configure + +```bash +# Install with pip +pip install mcp-aichat + +# Or use with uvx (no installation needed) +uvx mcp-aichat +``` + +### 3. Add to Claude Desktop + +Add to `~/Library/Application Support/Claude/claude_desktop_config.json`: + +```json +{ + "mcpServers": { + "aichat": { + "command": "uvx", + "args": ["mcp-aichat"], + "env": { + "ACEDATACLOUD_API_TOKEN": "your_token_here" + } + } + } +} +``` + +## Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `ACEDATACLOUD_API_TOKEN` | Yes | - | API token from AceDataCloud | +| `ACEDATACLOUD_API_BASE_URL` | No | `https://api.acedata.cloud` | API base URL | +| `AICHAT_REQUEST_TIMEOUT` | No | `60` | Request timeout in seconds | +| `MCP_SERVER_NAME` | No | `aichat` | MCP server name | +| `LOG_LEVEL` | No | `INFO` | Logging level | + +## Available Models + +The AIChat API supports 70+ models including: + +- **GPT-5 Series**: gpt-5.5, gpt-5.5-pro, gpt-5.4, gpt-5.2, gpt-5.1, gpt-5, gpt-5-mini, gpt-5-nano +- **GPT-4 Series**: gpt-4.1, gpt-4o, gpt-4o-mini, gpt-4, gpt-4-turbo, and dated variants +- **o-series**: o1, o1-mini, o1-pro, o3, o3-mini, o3-pro, o4-mini, and variants +- **DeepSeek**: deepseek-r1, deepseek-r1-0528, deepseek-v3, deepseek-v3-250324 +- **Grok**: grok-3 +- **GLM**: glm-5.1, glm-4.7, glm-4.6, glm-4.5-air, glm-3-turbo + +Use `aichat_list_models` to see the full list. + +## License + +MIT License - see [LICENSE](LICENSE) file for details. diff --git a/aichat/core/__init__.py b/aichat/core/__init__.py new file mode 100644 index 0000000..b3359ee --- /dev/null +++ b/aichat/core/__init__.py @@ -0,0 +1,15 @@ +"""Core module for MCP AIChat server.""" + +from core.client import AIChatClient +from core.config import settings +from core.exceptions import AIChatAPIError, AIChatAuthError, AIChatValidationError +from core.server import mcp + +__all__ = [ + "AIChatClient", + "settings", + "mcp", + "AIChatAPIError", + "AIChatAuthError", + "AIChatValidationError", +] diff --git a/aichat/core/client.py b/aichat/core/client.py new file mode 100644 index 0000000..28739b6 --- /dev/null +++ b/aichat/core/client.py @@ -0,0 +1,151 @@ +"""HTTP client for AIChat API via AceDataCloud.""" + +import contextvars +import json +from typing import Any + +import httpx +from loguru import logger + +from core.config import settings +from core.exceptions import AIChatAPIError, AIChatAuthError, AIChatError, AIChatTimeoutError + +# Context variable for per-request API token (used in HTTP/remote mode) +_request_api_token: contextvars.ContextVar[str | None] = contextvars.ContextVar( + "_request_api_token", default=None +) + + +def set_request_api_token(token: str | None) -> None: + """Set the API token for the current request context (HTTP mode).""" + _request_api_token.set(token) + + +def get_request_api_token() -> str | None: + """Get the API token from the current request context.""" + return _request_api_token.get() + + +class AIChatClient: + """Async HTTP client for AceDataCloud AIChat API.""" + + def __init__(self, api_token: str | None = None, base_url: str | None = None): + """Initialize the AIChat API client. + + Args: + api_token: API token for authentication. If not provided, uses settings. + base_url: Base URL for the API. If not provided, uses settings. + """ + self.api_token = api_token if api_token is not None else settings.api_token + self.base_url = base_url or settings.api_base_url + self.timeout = settings.request_timeout + + logger.info(f"AIChatClient initialized with base_url: {self.base_url}") + logger.debug(f"API token configured: {'Yes' if self.api_token else 'No'}") + logger.debug(f"Request timeout: {self.timeout}s") + + def _get_headers(self) -> dict[str, str]: + """Get request headers with authentication.""" + token = get_request_api_token() or self.api_token + if not token: + logger.error("API token not configured!") + raise AIChatAuthError("API token not configured") + + return { + "accept": "application/json", + "authorization": f"Bearer {token}", + "content-type": "application/json", + } + + def _handle_error_response(self, response: httpx.Response) -> None: + """Parse API error response and raise the appropriate exception. + + The AceDataCloud API returns errors in the format: + {"error": {"code": "...", "message": "..."}} + """ + status = response.status_code + try: + body = response.json() + except Exception: + body = {} + + error_obj = body.get("error", {}) + code = error_obj.get("code", f"http_{status}") + message = ( + error_obj.get("message") or body.get("detail") or response.text or f"HTTP {status}" + ) + + logger.error(f"API error {status} [{code}]: {message}") + + if status in (401, 403): + raise AIChatAuthError(message) + raise AIChatAPIError(message=message, code=code, status_code=status) + + async def request( + self, + endpoint: str, + payload: dict[str, Any], + timeout: float | None = None, + ) -> dict[str, Any]: + """Make a POST request to the AIChat API. + + Args: + endpoint: API endpoint path (e.g., "/aichat/conversations") + payload: Request body as dictionary + timeout: Optional timeout override + + Returns: + API response as dictionary + + Raises: + AIChatAuthError: If authentication fails + AIChatAPIError: If the API request fails + AIChatTimeoutError: If the request times out + """ + url = f"{self.base_url}{endpoint}" + request_timeout = timeout or self.timeout + + logger.info(f"POST {url}") + logger.debug(f"Request payload: {json.dumps(payload, ensure_ascii=False, indent=2)}") + logger.debug(f"Timeout: {request_timeout}s") + + async with httpx.AsyncClient() as http_client: + try: + response = await http_client.post( + url, + json=payload, + headers=self._get_headers(), + timeout=request_timeout, + ) + + logger.info(f"Response status: {response.status_code}") + + if response.status_code >= 400: + self._handle_error_response(response) + + result = response.json() + logger.success("Request successful!") + + return result # type: ignore[no-any-return] + + except httpx.TimeoutException as e: + logger.error(f"Request timeout after {request_timeout}s: {e}") + raise AIChatTimeoutError( + f"Request to {endpoint} timed out after {request_timeout}s" + ) from e + + except AIChatError: + raise + + except Exception as e: + logger.error(f"Request error: {e}") + raise AIChatAPIError(message=str(e)) from e + + async def conversations(self, **kwargs: Any) -> dict[str, Any]: + """Send a conversation request to the AIChat API.""" + logger.info(f"AIChat conversation with model: {kwargs.get('model', 'unknown')}") + return await self.request("/aichat/conversations", kwargs) + + +# Global client instance +client = AIChatClient() diff --git a/aichat/core/config.py b/aichat/core/config.py new file mode 100644 index 0000000..2875e20 --- /dev/null +++ b/aichat/core/config.py @@ -0,0 +1,65 @@ +"""Configuration management for MCP AIChat server.""" + +import os +from dataclasses import dataclass, field +from pathlib import Path + +from dotenv import load_dotenv + +# Load .env file from project root +_env_path = Path(__file__).parent.parent / ".env" +load_dotenv(dotenv_path=_env_path) + + +@dataclass +class Settings: + """Application settings loaded from environment variables.""" + + # API Configuration + api_base_url: str = field( + default_factory=lambda: os.getenv("ACEDATACLOUD_API_BASE_URL", "https://api.acedata.cloud") + ) + api_token: str = field(default_factory=lambda: os.getenv("ACEDATACLOUD_API_TOKEN", "")) + + # Request Configuration + request_timeout: float = field( + default_factory=lambda: float(os.getenv("AICHAT_REQUEST_TIMEOUT", "60")) + ) + + # Server Configuration + server_name: str = field(default_factory=lambda: os.getenv("MCP_SERVER_NAME", "aichat")) + transport: str = field(default_factory=lambda: os.getenv("MCP_TRANSPORT", "stdio")) + log_level: str = field(default_factory=lambda: os.getenv("LOG_LEVEL", "INFO")) + + # OAuth / Remote Auth Configuration + server_url: str = field(default_factory=lambda: os.getenv("MCP_SERVER_URL", "")) + auth_base_url: str = field( + default_factory=lambda: os.getenv( + "ACEDATACLOUD_AUTH_BASE_URL", "https://auth.acedata.cloud" + ) + ) + platform_base_url: str = field( + default_factory=lambda: os.getenv( + "ACEDATACLOUD_PLATFORM_BASE_URL", "https://platform.acedata.cloud" + ) + ) + oauth_client_id: str = field( + default_factory=lambda: os.getenv("ACEDATACLOUD_OAUTH_CLIENT_ID", "") + ) + + def validate(self) -> None: + """Validate required settings.""" + if not self.api_token: + raise ValueError( + "ACEDATACLOUD_API_TOKEN environment variable is required. " + "Get your token from https://platform.acedata.cloud" + ) + + @property + def is_configured(self) -> bool: + """Check if the API token is configured.""" + return bool(self.api_token) + + +# Global settings instance +settings = Settings() diff --git a/aichat/core/exceptions.py b/aichat/core/exceptions.py new file mode 100644 index 0000000..d673031 --- /dev/null +++ b/aichat/core/exceptions.py @@ -0,0 +1,39 @@ +"""Custom exceptions for MCP AIChat server.""" + + +class AIChatError(Exception): + """Base exception for AIChat API errors.""" + + def __init__(self, message: str, code: str = "unknown"): + self.message = message + self.code = code + super().__init__(self.message) + + +class AIChatAuthError(AIChatError): + """Authentication error.""" + + def __init__(self, message: str = "Authentication failed"): + super().__init__(message, code="auth_error") + + +class AIChatAPIError(AIChatError): + """API request error.""" + + def __init__(self, message: str, code: str = "api_error", status_code: int | None = None): + self.status_code = status_code + super().__init__(message, code) + + +class AIChatValidationError(AIChatError): + """Validation error for request parameters.""" + + def __init__(self, message: str): + super().__init__(message, code="validation_error") + + +class AIChatTimeoutError(AIChatError): + """Request timeout error.""" + + def __init__(self, message: str = "Request timed out"): + super().__init__(message, code="timeout_error") diff --git a/aichat/core/oauth.py b/aichat/core/oauth.py new file mode 100644 index 0000000..07e2bcd --- /dev/null +++ b/aichat/core/oauth.py @@ -0,0 +1,584 @@ +"""OAuth 2.1 provider for AceDataCloud MCP servers. + +Implements the MCP SDK's OAuthAuthorizationServerProvider interface, +delegating user authentication to AceDataCloud's OAuth 2.0 Authorization Server. + +Flow: +1. Claude.ai redirects user to /authorize +2. MCP server redirects to auth.acedata.cloud/oauth2/authorize (consent page) +3. User logs in (if needed), sees consent page, approves +4. auth.acedata.cloud issues an authorization code, redirects to /oauth/callback +5. MCP server exchanges code for JWT via POST /oauth2/token (with PKCE) +6. MCP server uses JWT to fetch/create user's API credential +7. Issues the credential token as the OAuth access_token +8. Claude uses this token for all subsequent MCP requests +""" + +import base64 +import hashlib +import json +import secrets +import time +from urllib.parse import urlencode + +import httpx +from loguru import logger +from mcp.server.auth.provider import ( + AccessToken, + AuthorizationCode, + AuthorizationParams, + OAuthClientInformationFull, + OAuthToken, + RefreshToken, +) +from starlette.requests import Request +from starlette.responses import JSONResponse, RedirectResponse + +from core.client import set_request_api_token +from core.config import settings + + +class AceDataCloudOAuthProvider: + """OAuth provider that delegates authentication to AceDataCloud platform. + + In-memory storage is used for auth state (suitable for single-replica K8s deployment). + """ + + def __init__(self) -> None: + self._clients: dict[str, OAuthClientInformationFull] = {} + self._auth_codes: dict[ + str, tuple[AuthorizationCode, str] + ] = {} # code → (AuthCode, api_token) + self._access_tokens: dict[str, AccessToken] = {} + self._refresh_tokens: dict[str, RefreshToken] = {} + self._pending_auth: dict[str, dict] = {} # mcp_state → {client_id, params} + + async def get_client(self, client_id: str) -> OAuthClientInformationFull | None: + return self._clients.get(client_id) + + async def register_client(self, client_info: OAuthClientInformationFull) -> None: + client_id = client_info.client_id + assert client_id is not None + self._clients[client_id] = client_info + logger.info(f"Registered OAuth client: {client_id}") + + async def authorize( + self, client: OAuthClientInformationFull, params: AuthorizationParams + ) -> str: + """Redirect user to AceDataCloud OAuth 2.0 consent page.""" + # Generate state key for tracking this auth flow + mcp_state = secrets.token_urlsafe(32) + + # Generate PKCE pair for auth.acedata.cloud token exchange + code_verifier = secrets.token_urlsafe(48) + digest = hashlib.sha256(code_verifier.encode("ascii")).digest() + auth_code_challenge = base64.urlsafe_b64encode(digest).rstrip(b"=").decode("ascii") + + self._pending_auth[mcp_state] = { + "client_id": client.client_id, + "redirect_uri": str(params.redirect_uri), + "state": params.state, + "code_challenge": params.code_challenge, + "redirect_uri_provided_explicitly": params.redirect_uri_provided_explicitly, + "scopes": params.scopes, + "resource": params.resource, + "auth_code_verifier": code_verifier, + } + + # Build callback URL + callback_url = f"{settings.server_url}/oauth/callback" + + # Build OAuth 2.0 authorize URL + auth_params = { + "client_id": settings.oauth_client_id, + "redirect_uri": callback_url, + "response_type": "code", + "scope": "profile platform", + "state": mcp_state, + "code_challenge": auth_code_challenge, + "code_challenge_method": "S256", + } + auth_url = f"{settings.auth_base_url}/oauth2/authorize?{urlencode(auth_params)}" + logger.info(f"OAuth authorize: redirecting to consent page (mcp_state={mcp_state})") + return auth_url + + async def handle_callback(self, request: Request) -> RedirectResponse | JSONResponse: + """Handle the callback from AceDataCloud OAuth 2.0 after user consent. + + This is called as a Starlette route handler, not part of the SDK interface. + """ + mcp_state = request.query_params.get("state") + adc_code = request.query_params.get("code") + + logger.debug( + f"handle_callback: state={mcp_state}, code={adc_code[:16] if adc_code else None}, " + f"pending_auth_keys={list(self._pending_auth.keys())}" + ) + + if not mcp_state or not adc_code: + logger.error(f"handle_callback: missing state={mcp_state} or code={adc_code}") + return JSONResponse({"error": "Missing state or code parameter"}, status_code=400) + + pending = self._pending_auth.pop(mcp_state, None) + if not pending: + logger.error( + f"handle_callback: state {mcp_state} not found in pending_auth. " + f"Available states: {list(self._pending_auth.keys())}" + ) + return JSONResponse({"error": "Invalid or expired state"}, status_code=400) + + try: + # Exchange AceDataCloud OAuth 2.0 code for JWT (with PKCE) + code_verifier = pending.get("auth_code_verifier", "") + logger.debug( + f"handle_callback: exchanging code for JWT, pending_keys={list(pending.keys())}" + ) + jwt_token = await self._exchange_code_for_jwt(adc_code, code_verifier) + logger.debug( + f"handle_callback: JWT exchange returned " + f"{'token=' + jwt_token[:32] + '...' if jwt_token else 'None'}" + ) + if not jwt_token: + logger.error("handle_callback: JWT exchange failed, returning 502") + return JSONResponse( + {"error": "Failed to exchange authorization code"}, status_code=502 + ) + + # Fetch user's API credential token from PlatformBackend + logger.debug("handle_callback: fetching user credential...") + api_token = await self._get_user_credential(jwt_token) + logger.debug( + f"handle_callback: _get_user_credential returned " + f"{'token=' + api_token[:12] + '...' if api_token else 'None'}" + ) + if not api_token: + logger.error("handle_callback: credential fetch returned None, returning 403") + return JSONResponse( + { + "error": "No API credential found. Please create an API key at " + "https://platform.acedata.cloud first." + }, + status_code=403, + ) + + # Create MCP authorization code + auth_code_str = secrets.token_urlsafe(48) + auth_code = AuthorizationCode( + code=auth_code_str, + scopes=pending.get("scopes") or [], + expires_at=time.time() + 600, # 10 minutes + client_id=pending["client_id"], + code_challenge=pending["code_challenge"], + redirect_uri=pending["redirect_uri"], + redirect_uri_provided_explicitly=pending["redirect_uri_provided_explicitly"], + resource=pending.get("resource"), + ) + self._auth_codes[auth_code_str] = (auth_code, api_token) + + # Redirect back to Claude with the MCP auth code + redirect_uri = pending["redirect_uri"] + params = {"code": auth_code_str} + if pending.get("state"): + params["state"] = pending["state"] + + separator = "&" if "?" in redirect_uri else "?" + redirect_url = f"{redirect_uri}{separator}{urlencode(params)}" + logger.info("OAuth callback: issuing auth code, redirecting to client") + return RedirectResponse(url=redirect_url, status_code=302) + + except Exception: + logger.exception("OAuth callback error") + return JSONResponse({"error": "Internal server error"}, status_code=500) + + async def load_authorization_code( + self, + client: OAuthClientInformationFull, # noqa: ARG002 + authorization_code: str, + ) -> AuthorizationCode | None: + data = self._auth_codes.get(authorization_code) + if not data: + return None + auth_code, _ = data + if auth_code.expires_at < time.time(): + self._auth_codes.pop(authorization_code, None) + return None + return auth_code + + async def exchange_authorization_code( + self, client: OAuthClientInformationFull, authorization_code: AuthorizationCode + ) -> OAuthToken: + data = self._auth_codes.pop(authorization_code.code, None) + if not data: + raise ValueError("Authorization code not found or already used") + _, api_token = data + + client_id = client.client_id or "" + + # Store access token mapping + self._access_tokens[api_token] = AccessToken( + token=api_token, + client_id=client_id, + scopes=authorization_code.scopes, + expires_at=None, # API credential tokens don't expire by time + ) + + # Generate refresh token + refresh_token_str = secrets.token_urlsafe(48) + self._refresh_tokens[refresh_token_str] = RefreshToken( + token=refresh_token_str, + client_id=client_id, + scopes=authorization_code.scopes, + ) + + logger.info(f"OAuth token exchange: issued access token for client {client_id}") + return OAuthToken( + access_token=api_token, + token_type="Bearer", + refresh_token=refresh_token_str, + ) + + async def load_refresh_token( + self, + client: OAuthClientInformationFull, # noqa: ARG002 + refresh_token: str, + ) -> RefreshToken | None: + return self._refresh_tokens.get(refresh_token) + + async def exchange_refresh_token( + self, + client: OAuthClientInformationFull, + refresh_token: RefreshToken, + scopes: list[str], + ) -> OAuthToken: + # For refresh, we reuse the same API credential token + # Find the associated access token + self._refresh_tokens.pop(refresh_token.token, None) + + # The original access_token (API credential) is still valid + # Just issue a new refresh token + client_id = client.client_id or "" + new_refresh = secrets.token_urlsafe(48) + self._refresh_tokens[new_refresh] = RefreshToken( + token=new_refresh, + client_id=client_id, + scopes=scopes or refresh_token.scopes, + ) + + # Find the access token for this client + for token, at in self._access_tokens.items(): + if at.client_id == client.client_id: + return OAuthToken( + access_token=token, + token_type="Bearer", + refresh_token=new_refresh, + ) + + raise ValueError("No access token found for refresh") + + async def load_access_token(self, token: str) -> AccessToken | None: + """Validate an access token. + + Accepts both OAuth-issued tokens and direct API credential tokens. + Direct tokens are accepted since the real validation happens at api.acedata.cloud. + """ + # Check OAuth-issued tokens first + if token in self._access_tokens: + access_token = self._access_tokens[token] + if access_token.expires_at and time.time() > access_token.expires_at: + self._access_tokens.pop(token, None) + return None + set_request_api_token(token) + return access_token + + # Accept direct API credential tokens (for VS Code, Cursor, etc.) + set_request_api_token(token) + return AccessToken(token=token, client_id="direct", scopes=[]) + + async def revoke_token(self, token: AccessToken | RefreshToken) -> None: + if isinstance(token, AccessToken): + self._access_tokens.pop(token.token, None) + elif isinstance(token, RefreshToken): + self._refresh_tokens.pop(token.token, None) + logger.info(f"Revoked token: {token.token[:8]}...") + + # --- Internal helpers --- + + @staticmethod + def _decode_jwt_payload(token: str) -> dict | None: + """Decode JWT payload without verification (for debug logging only).""" + try: + parts = token.split(".") + if len(parts) != 3: + logger.debug(f"JWT has {len(parts)} parts, expected 3") + return None + payload_b64 = parts[1] + # Add padding + padding = 4 - len(payload_b64) % 4 + if padding != 4: + payload_b64 += "=" * padding + payload_bytes = base64.urlsafe_b64decode(payload_b64) + payload: dict = json.loads(payload_bytes) + return payload + except Exception as e: + logger.debug(f"Failed to decode JWT payload: {e}") + return None + + async def _exchange_code_for_jwt(self, code: str, code_verifier: str) -> str | None: + """Exchange AceDataCloud OAuth 2.0 authorization code for JWT.""" + callback_url = f"{settings.server_url}/oauth/callback" + token_url = f"{settings.auth_base_url}/oauth2/token" + logger.debug( + f"Exchanging code for JWT: token_url={token_url}, " + f"client_id={settings.oauth_client_id}, " + f"redirect_uri={callback_url}, " + f"code={code[:16]}..., " + f"code_verifier={code_verifier[:16]}..." + ) + try: + async with httpx.AsyncClient(timeout=30) as client: + response = await client.post( + token_url, + data={ + "grant_type": "authorization_code", + "code": code, + "client_id": settings.oauth_client_id, + "redirect_uri": callback_url, + "code_verifier": code_verifier, + }, + ) + logger.debug( + f"Token exchange response: status={response.status_code}, " + f"body={response.text[:500]}" + ) + if response.status_code == 200: + data = response.json() + access_token: str | None = data.get("access_token") + if access_token: + # Decode and log JWT claims for debugging + claims = self._decode_jwt_payload(access_token) + if claims: + logger.debug( + f"JWT claims: user_id={claims.get('user_id')}, " + f"scope={claims.get('scope')}, " + f"permissions={claims.get('permissions')}, " + f"is_superuser={claims.get('is_superuser')}, " + f"is_verified={claims.get('is_verified')}, " + f"exp={claims.get('exp')}, " + f"iat={claims.get('iat')}, " + f"token_type={claims.get('token_type')}, " + f"all_keys={list(claims.keys())}" + ) + else: + logger.warning("Could not decode JWT payload for debug") + else: + logger.error( + f"Token exchange 200 but no access_token in response. " + f"Keys: {list(data.keys())}" + ) + return access_token + logger.error(f"OAuth token exchange failed: {response.status_code} {response.text}") + except Exception: + logger.exception("OAuth token exchange error") + return None + + async def _get_user_credential(self, jwt_token: str) -> str | None: + """Fetch or auto-create user's API credential token from PlatformBackend. + + Flow: + 1. List existing credentials → return first token if found + 2. List Global Usage applications → use first if found + 3. If no application, create one (POST /api/v1/applications/) + 4. Create credential under that application (POST /api/v1/credentials/) + """ + headers = {"Authorization": f"Bearer {jwt_token}"} + logger.debug( + f"_get_user_credential: platform_base_url={settings.platform_base_url}, " + f"jwt_token={jwt_token[:32]}..." + ) + + # Decode JWT to extract user_id (needed for filtering API queries) + claims = self._decode_jwt_payload(jwt_token) + user_id: str | None = None + if claims: + user_id = claims.get("user_id") + logger.debug( + f"_get_user_credential JWT: user_id={user_id}, " + f"scope={claims.get('scope')}, " + f"permissions={claims.get('permissions')}, " + f"token_type={claims.get('token_type')}, " + f"exp={claims.get('exp')}" + ) + else: + logger.warning("_get_user_credential: could not decode JWT for debug") + + try: + async with httpx.AsyncClient(timeout=30) as client: + # Step 1: Check for existing credentials + creds_url = f"{settings.platform_base_url}/api/v1/credentials/" + creds_params: dict[str, str] = {} + if user_id: + creds_params["user_id"] = user_id + logger.debug(f"Step 1: GET {creds_url} params={creds_params}") + response = await client.get(creds_url, headers=headers, params=creds_params) + logger.debug( + f"Step 1 response: status={response.status_code}, body={response.text[:1000]}" + ) + + if response.status_code == 200: + data = response.json() + results = data.get("results", data) if isinstance(data, dict) else data + logger.debug( + f"Step 1 parsed: type(data)={type(data).__name__}, " + f"type(results)={type(results).__name__}, " + f"count={len(results) if isinstance(results, list) else 'N/A'}" + ) + if isinstance(results, list): + for i, cred in enumerate(results): + logger.debug( + f"Step 1 credential[{i}]: " + f"id={cred.get('id')}, " + f"token={'present' if cred.get('token') else 'MISSING'}, " + f"type={cred.get('type')}, " + f"keys={list(cred.keys())}" + ) + cred_token: str | None = cred.get("token") + if cred_token: + logger.info( + f"Found existing credential token " + f"(id={cred.get('id')}, token={cred_token[:12]}...)" + ) + return cred_token + logger.debug( + f"Step 1: iterated {len(results)} credentials, none had a token" + ) + else: + logger.warning(f"Step 1: results is not a list: {type(results).__name__}") + else: + logger.error( + f"Step 1 FAILED: credentials list returned " + f"status={response.status_code}, body={response.text[:500]}" + ) + + # Step 2: No credentials found — auto-provision + logger.info("No credentials found, auto-provisioning Application + Credential") + + # Step 2a: Find or create a Global Usage application + apps_url = f"{settings.platform_base_url}/api/v1/applications/" + apps_params: dict[str, str] = { + "limit": "10", + "ordering": "-created_at", + "type": "Usage", + "scope": "Global", + } + if user_id: + apps_params["user_id"] = user_id + logger.debug(f"Step 2a: GET {apps_url} params={apps_params}") + app_resp = await client.get(apps_url, params=apps_params, headers=headers) + logger.debug( + f"Step 2a response: status={app_resp.status_code}, body={app_resp.text[:1000]}" + ) + + application_id: str | None = None + if app_resp.status_code == 200: + app_data = app_resp.json() + items = app_data.get("items", app_data.get("results", [])) + logger.debug( + f"Step 2a parsed: " + f"data_keys={list(app_data.keys()) if isinstance(app_data, dict) else 'not-dict'}, " + f"items_count={len(items) if isinstance(items, list) else 'N/A'}" + ) + if isinstance(items, list) and items: + app = items[0] + application_id = app.get("id") + logger.debug( + f"Step 2a: using app id={application_id}, " + f"type={app.get('type')}, " + f"scope={app.get('scope')}, " + f"remaining_amount={app.get('remaining_amount')}, " + f"keys={list(app.keys())}" + ) + # Check if the app already has a credential + app_creds = app.get("credentials", []) + logger.debug( + f"Step 2a: app.credentials count=" + f"{len(app_creds) if isinstance(app_creds, list) else 'not-list'}" + ) + if isinstance(app_creds, list) and app_creds: + logger.debug(f"Step 2a: first credential in app: {app_creds[0]}") + existing_token: str | None = app_creds[0].get("token") + if isinstance(existing_token, str) and existing_token: + logger.info( + f"Found credential in existing application " + f"(app_id={application_id}, token={existing_token[:12]}...)" + ) + return existing_token + logger.debug("Step 2a: credential in app has no token field or empty") + else: + logger.debug("Step 2a: no Global Usage applications found") + else: + logger.error( + f"Step 2a FAILED: applications list returned " + f"status={app_resp.status_code}, body={app_resp.text[:500]}" + ) + + if not application_id: + # Create a new Global Usage application + create_payload = {"type": "Usage", "scope": "Global"} + logger.debug(f"Step 2a-create: POST {apps_url} json={create_payload}") + create_app_resp = await client.post( + apps_url, + headers={**headers, "Content-Type": "application/json"}, + json=create_payload, + ) + logger.debug( + f"Step 2a-create response: status={create_app_resp.status_code}, " + f"body={create_app_resp.text[:1000]}" + ) + if create_app_resp.status_code in (200, 201): + new_app = create_app_resp.json() + application_id = new_app.get("id") + logger.info(f"Created Global Application: {application_id}") + else: + logger.error( + f"Failed to create application: " + f"{create_app_resp.status_code} {create_app_resp.text}" + ) + return None + + # Step 2b: Create a credential under the application + cred_create_url = f"{settings.platform_base_url}/api/v1/credentials/" + cred_create_payload = {"application_id": application_id} + logger.debug(f"Step 2b: POST {cred_create_url} json={cred_create_payload}") + cred_resp = await client.post( + cred_create_url, + headers={**headers, "Content-Type": "application/json"}, + json=cred_create_payload, + ) + logger.debug( + f"Step 2b response: status={cred_resp.status_code}, " + f"body={cred_resp.text[:1000]}" + ) + if cred_resp.status_code in (200, 201): + cred_data = cred_resp.json() + logger.debug( + f"Step 2b parsed: type={type(cred_data).__name__}, " + f"keys={list(cred_data.keys()) if isinstance(cred_data, dict) else 'not-dict'}" + ) + new_token: str | None = ( + cred_data.get("token") if isinstance(cred_data, dict) else None + ) + if isinstance(new_token, str) and new_token: + logger.info( + f"Auto-provisioned new credential token (token={new_token[:12]}...)" + ) + return new_token + logger.error(f"Credential created but no token in response: {cred_data}") + else: + logger.error( + f"Failed to create credential: {cred_resp.status_code} {cred_resp.text}" + ) + except Exception: + logger.exception("Credential fetch/provision error") + + logger.error("_get_user_credential: returning None — all steps failed") + return None diff --git a/aichat/core/server.py b/aichat/core/server.py new file mode 100644 index 0000000..ae624d4 --- /dev/null +++ b/aichat/core/server.py @@ -0,0 +1,49 @@ +"""MCP Server initialization.""" + +import logging + +from mcp.server.fastmcp import FastMCP +from mcp.types import Icon + +from core.config import settings + +# Configure logging +logging.basicConfig( + level=getattr(logging, settings.log_level.upper(), logging.INFO), + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) + +# Suppress noisy loggers +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("httpcore").setLevel(logging.WARNING) + +logger = logging.getLogger(__name__) + +# Build FastMCP kwargs, enabling OAuth when MCP_SERVER_URL is configured +mcp_kwargs: dict = {"host": "0.0.0.0"} +oauth_provider = None + +if settings.server_url: + from mcp.server.auth.settings import AuthSettings, ClientRegistrationOptions, RevocationOptions + from pydantic import AnyHttpUrl + + from core.oauth import AceDataCloudOAuthProvider + + oauth_provider = AceDataCloudOAuthProvider() + mcp_kwargs["auth_server_provider"] = oauth_provider + mcp_kwargs["auth"] = AuthSettings( + issuer_url=AnyHttpUrl(settings.server_url), + resource_server_url=AnyHttpUrl(settings.server_url), + client_registration_options=ClientRegistrationOptions(enabled=True), + revocation_options=RevocationOptions(enabled=True), + ) + logger.info(f"OAuth enabled: issuer_url={settings.server_url}") + +# Initialize FastMCP server +mcp = FastMCP( + settings.server_name, + icons=[Icon(src="", mimeType="image/png")], + **mcp_kwargs, +) + +logger.info(f"Initialized MCP server: {settings.server_name}") diff --git a/aichat/core/types.py b/aichat/core/types.py new file mode 100644 index 0000000..8355d65 --- /dev/null +++ b/aichat/core/types.py @@ -0,0 +1,88 @@ +"""Type definitions for AIChat MCP server.""" + +from typing import Literal + +# Chat model options — keep in sync with openapi/aichat.json +ChatModel = Literal[ + "gpt-5.5", + "gpt-5.5-pro", + "gpt-5.4", + "gpt-5.4-pro", + "gpt-5.2", + "gpt-5.1", + "gpt-5.1-all", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-all", + "gpt-4", + "gpt-4-all", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4-vision-preview", + "gpt-4.1", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano", + "gpt-4.1-nano-2025-04-14", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-2024-11-20", + "gpt-4o-all", + "gpt-4o-image", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4o-mini-search-preview", + "gpt-4o-mini-search-preview-2025-03-11", + "gpt-4o-search-preview", + "gpt-4o-search-preview-2025-03-11", + "o1", + "o1-2024-12-17", + "o1-all", + "o1-mini", + "o1-mini-2024-09-12", + "o1-mini-all", + "o1-preview", + "o1-preview-2024-09-12", + "o1-preview-all", + "o1-pro", + "o1-pro-2025-03-19", + "o1-pro-all", + "o3", + "o3-2025-04-16", + "o3-all", + "o3-mini", + "o3-mini-2025-01-31", + "o3-mini-2025-01-31-high", + "o3-mini-2025-01-31-low", + "o3-mini-2025-01-31-medium", + "o3-mini-all", + "o3-mini-high", + "o3-mini-high-all", + "o3-mini-low", + "o3-mini-medium", + "o3-pro", + "o3-pro-2025-06-10", + "o4-mini", + "o4-mini-2025-04-16", + "o4-mini-all", + "o4-mini-high-all", + "deepseek-r1", + "deepseek-r1-0528", + "deepseek-v3", + "deepseek-v3-250324", + "grok-3", + "glm-5.1", + "glm-4.7", + "glm-4.6", + "glm-4.5-air", + "glm-3-turbo", +] + +# === Default values === + +DEFAULT_CHAT_MODEL: ChatModel = "gpt-4.1" diff --git a/aichat/docker-compose.yaml b/aichat/docker-compose.yaml new file mode 100644 index 0000000..31e40ae --- /dev/null +++ b/aichat/docker-compose.yaml @@ -0,0 +1,9 @@ +version: "3" +services: + mcp-aichat: + platform: linux/amd64 + container_name: "mcp-aichat" + build: ./ + image: "ghcr.io/acedatacloud/mcp-aichat:${BUILD_NUMBER}" + ports: + - "8000:8000" diff --git a/aichat/glama.json b/aichat/glama.json new file mode 100644 index 0000000..109274e --- /dev/null +++ b/aichat/glama.json @@ -0,0 +1,6 @@ +{ + "$schema": "https://glama.ai/mcp/schemas/server.json", + "maintainers": [ + "Germey" + ] +} diff --git a/aichat/main.py b/aichat/main.py new file mode 100644 index 0000000..9ceb963 --- /dev/null +++ b/aichat/main.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +""" +MCP AIChat Server - AI Dialogue via AceDataCloud API. + +A Model Context Protocol (MCP) server that provides tools for conversing with +large language models using the AIChat API through the AceDataCloud platform. +""" + +import argparse +import logging +import sys +from importlib import metadata + +from dotenv import load_dotenv + +# Load environment variables before importing other modules +load_dotenv() + +from core.config import settings +from core.server import mcp + +# Configure logging +logging.basicConfig( + level=getattr(logging, settings.log_level.upper(), logging.INFO), + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + + +def safe_print(text: str) -> None: + """Print to stderr safely, handling encoding issues.""" + if not sys.stderr.isatty(): + logger.debug(f"[MCP AIChat] {text}") + return + + try: + print(text, file=sys.stderr) + except UnicodeEncodeError: + print(text.encode("ascii", errors="replace").decode(), file=sys.stderr) + + +def get_version() -> str: + """Get the package version.""" + try: + return metadata.version("mcp-aichat") + except metadata.PackageNotFoundError: + return "dev" + + +def main() -> None: + """Run the MCP AIChat server.""" + parser = argparse.ArgumentParser( + description="MCP AIChat Server - AI Dialogue via AceDataCloud", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + mcp-aichat # Run with stdio transport (default) + mcp-aichat --transport http # Run with HTTP transport + mcp-aichat --version # Show version + +Environment Variables: + ACEDATACLOUD_API_TOKEN API token from AceDataCloud (required) + AICHAT_REQUEST_TIMEOUT Request timeout in seconds (default: 60) + LOG_LEVEL Logging level (default: INFO) + """, + ) + parser.add_argument( + "--version", + action="version", + version=f"mcp-aichat {get_version()}", + ) + parser.add_argument( + "--transport", + choices=["stdio", "http"], + default="stdio", + help="Transport mode (default: stdio)", + ) + parser.add_argument( + "--port", + type=int, + default=8000, + help="Port for HTTP transport (default: 8000)", + ) + args = parser.parse_args() + + # Print startup banner + safe_print("") + safe_print("=" * 50) + safe_print(" MCP AIChat Server - AI Dialogue") + safe_print("=" * 50) + safe_print("") + safe_print(f" Version: {get_version()}") + safe_print(f" Transport: {args.transport}") + safe_print(f" Log Level: {settings.log_level}") + safe_print("") + + # Validate configuration + if not settings.is_configured and args.transport != "http": + safe_print(" [ERROR] ACEDATACLOUD_API_TOKEN not configured!") + safe_print(" Get your token from https://platform.acedata.cloud") + safe_print("") + sys.exit(1) + + if args.transport == "http": + safe_print(" [OK] HTTP mode - tokens from request headers") + else: + safe_print(" [OK] API token configured") + safe_print("") + + # Import tools and prompts to register them + safe_print(" Loading tools and prompts...") + import prompts # noqa: F401, I001 + import tools # noqa: F401 + + safe_print(" [OK] Tools and prompts loaded") + safe_print("") + safe_print(" Available tools:") + safe_print(" - aichat_conversation") + safe_print(" - aichat_list_models") + safe_print(" - aichat_get_usage_guide") + safe_print("") + safe_print(" Available prompts:") + safe_print(" - aichat_usage_guide") + safe_print(" - aichat_model_selection_guide") + safe_print("") + safe_print("=" * 50) + safe_print(" Ready for MCP connections") + safe_print("=" * 50) + safe_print("") + + # Run the server + try: + if args.transport == "http": + import contextlib + + import uvicorn + from starlette.applications import Starlette + from starlette.requests import Request + from starlette.responses import JSONResponse, RedirectResponse + from starlette.routing import Mount, Route + + from core.server import oauth_provider + + async def health(_request: Request) -> JSONResponse: + return JSONResponse({"status": "ok"}) + + async def favicon(_request: Request) -> RedirectResponse: + return RedirectResponse("https://cdn.acedata.cloud/2870040497.png", status_code=301) + + async def server_card(_request: Request) -> JSONResponse: + """MCP Server Card for Smithery and other registries.""" + return JSONResponse( + { + "serverInfo": {"name": "MCP AIChat"}, + "authentication": {"required": True, "schemes": ["bearer"]}, + "tools": [ + { + "name": "aichat_conversation", + "description": "Send a question to an AI model and get an answer", + }, + { + "name": "aichat_list_models", + "description": "List available AI chat models", + }, + { + "name": "aichat_get_usage_guide", + "description": "Get usage guide for AIChat tools", + }, + ], + "prompts": [ + { + "name": "aichat_usage_guide", + "description": "Guide for using AIChat conversation tool", + }, + { + "name": "aichat_model_selection_guide", + "description": "Guide for selecting the right AI model", + }, + ], + "resources": [], + } + ) + + @contextlib.asynccontextmanager + async def lifespan(_app: Starlette): # type: ignore[no-untyped-def] + async with mcp.session_manager.run(): + yield + + mcp.settings.stateless_http = True + mcp.settings.json_response = True + mcp.settings.streamable_http_path = "/mcp" + + # Build routes + routes: list[Route | Mount] = [ + Route("/health", health), + Route("/favicon.ico", favicon), + Route("/.well-known/mcp/server-card.json", server_card), + ] + + # Add OAuth callback route if OAuth is enabled + if oauth_provider: + routes.append(Route("/oauth/callback", oauth_provider.handle_callback)) + + routes.append(Mount("/", app=mcp.streamable_http_app())) + + app = Starlette(routes=routes, lifespan=lifespan) + uvicorn.run(app, host="0.0.0.0", port=args.port) + else: + mcp.run(transport="stdio") + except KeyboardInterrupt: + safe_print("\nShutdown requested") + sys.exit(0) + except Exception as e: + logger.error(f"Server error: {e}", exc_info=True) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/aichat/prompts/__init__.py b/aichat/prompts/__init__.py new file mode 100644 index 0000000..8f12484 --- /dev/null +++ b/aichat/prompts/__init__.py @@ -0,0 +1,72 @@ +"""Prompt templates for AIChat MCP server. + +MCP Prompts provide guidance to LLMs on when and how to use the available tools. +These are exposed via the MCP protocol and help LLMs make better decisions. +""" + +from core.server import mcp + + +@mcp.prompt() +def aichat_usage_guide() -> str: + """Guide for using the AIChat conversation tool effectively.""" + return """# AIChat Usage Guide + +When the user wants to ask a question or have a conversation with an AI model, use the +`aichat_conversation` tool. + +## Single-Turn Question +**Use when:** User asks a one-off question +``` +aichat_conversation(question="What is the capital of France?") +``` + +## Multi-Turn Conversation +**Use when:** User wants to continue a conversation +1. First call: `aichat_conversation(question="...", stateful=True)` +2. Extract `id` from the response +3. Follow-up: `aichat_conversation(question="...", stateful=True, conversation_id="")` + +## Model Selection +- **General tasks**: gpt-4.1 (default) +- **Reasoning/math**: deepseek-r1, o3, o4-mini +- **Cost efficiency**: gpt-4.1-mini, gpt-4o-mini +- **Chinese language**: glm-4.6, glm-4.7 +- **Latest capabilities**: gpt-5.5, gpt-5.4 + +## Available Models +Call `aichat_list_models` to see the full list of available models. +""" + + +@mcp.prompt() +def aichat_model_selection_guide() -> str: + """Guide for selecting the right AI model for a task.""" + return """# AIChat Model Selection Guide + +## For General Conversational Tasks +Use **gpt-4.1** (default) - balanced capability and cost. + +## For Complex Reasoning and Math +Use **deepseek-r1**, **o3**, or **o4-mini** - these models excel at step-by-step reasoning. + +## For Code Generation +Use **gpt-4.1**, **gpt-4o**, or **deepseek-v3** - strong coding capabilities. + +## For Cost-Efficient Tasks +Use **gpt-4.1-mini** or **gpt-4o-mini** - faster and cheaper for simpler tasks. + +## For Cutting-Edge Performance +Use **gpt-5.5**, **gpt-5.4**, or **gpt-5** - latest generation models. + +## For Chinese Language Tasks +Use **glm-4.6**, **glm-4.7**, or **glm-5.1** - optimized for Chinese content. + +## For Browsing/Search Tasks +Use **gpt-4o-search-preview** or **gpt-4o-mini-search-preview** - have web search access. + +## Tips +- Always use `aichat_list_models` to get the current full list of available models +- For multi-turn conversations, set `stateful=True` and pass the conversation `id` +- Use `references` to provide URLs as context for the model +""" diff --git a/aichat/pyproject.toml b/aichat/pyproject.toml new file mode 100644 index 0000000..0c457b4 --- /dev/null +++ b/aichat/pyproject.toml @@ -0,0 +1,131 @@ +[project] +name = "mcp-aichat" +version = "2026.1.0.1" +description = "MCP Server for AI Dialogue via AceDataCloud API" +readme = "README.md" +license = { text = "MIT" } +requires-python = ">=3.10" +authors = [ + { name = "AceDataCloud", email = "support@acedata.cloud" } +] +maintainers = [ + { name = "AceDataCloud", email = "support@acedata.cloud" } +] +keywords = [ + "mcp", + "model-context-protocol", + "aichat", + "ai", + "dialogue", + "chat", + "gpt", + "llm", + "acedata", + "claude", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "mcp>=1.2.0", + "httpx>=0.27.0", + "python-dotenv>=1.0.0", + "pydantic>=2.0.0", + "loguru>=0.7.0", +] + +[project.optional-dependencies] +dev = [ + "ruff>=0.4.0", + "mypy>=1.10.0", + "pre-commit>=3.7.0", +] +test = [ + "pytest>=8.0.0", + "pytest-asyncio>=0.23.0", + "pytest-cov>=5.0.0", + "respx>=0.21.0", +] +release = [ + "build>=1.2.0", + "twine>=5.0.0", +] +all = [ + "mcp-aichat[dev,test,release]", +] + +[project.scripts] +mcp-aichat = "main:main" + +[project.urls] +Homepage = "https://github.com/AceDataCloud/AIChatMCP" +Documentation = "https://platform.acedata.cloud/documents/aichat" +Repository = "https://github.com/AceDataCloud/AIChatMCP" +Issues = "https://github.com/AceDataCloud/AIChatMCP/issues" +Changelog = "https://github.com/AceDataCloud/AIChatMCP/blob/main/CHANGELOG.md" + +[build-system] +requires = ["hatchling>=1.21.0,<1.30.0"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["core", "tools", "prompts", "main.py"] + +[tool.hatch.build.targets.sdist] +include = [ + "core/", + "tools/", + "prompts/", + "tests/", + "main.py", + "README.md", + "LICENSE", + "CHANGELOG.md", + ".env.example", +] + +# Mypy Configuration +[tool.mypy] +python_version = "3.10" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +ignore_missing_imports = true + +# Pytest Configuration +[tool.pytest.ini_options] +testpaths = ["tests"] +asyncio_mode = "auto" +addopts = [ + "-v", + "--tb=short", + "--strict-markers", +] +markers = [ + "integration: marks tests as integration tests (deselect with '-m \"not integration\"')", + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] + +# Coverage Configuration +[tool.coverage.run] +source = ["core", "tools"] +branch = true +omit = ["tests/*"] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "raise NotImplementedError", + "if TYPE_CHECKING:", +] diff --git a/aichat/server.json b/aichat/server.json new file mode 100644 index 0000000..9df11b0 --- /dev/null +++ b/aichat/server.json @@ -0,0 +1,36 @@ +{ + "$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json", + "name": "io.github.AceDataCloud/mcp-aichat", + "description": "MCP server for AI Dialogue via AceDataCloud AIChat API", + "version": "2026.1.0.1", + "repository": { + "url": "https://github.com/AceDataCloud/AIChatMCP", + "source": "github" + }, + "packages": [ + { + "identifier": "mcp-aichat", + "version": "2026.1.0.1", + "transport": { + "type": "stdio" + }, + "registryType": "pypi", + "runtimeHint": "uvx", + "environmentVariables": [ + { + "name": "ACEDATACLOUD_API_TOKEN", + "description": "API token from Ace Data Cloud (https://platform.acedata.cloud)", + "isRequired": true, + "isSecret": true + } + ] + } + ], + "websiteUrl": "https://aichat.mcp.acedata.cloud", + "remotes": [ + { + "type": "streamable-http", + "url": "https://aichat.mcp.acedata.cloud/mcp" + } + ] +} diff --git a/aichat/smithery.yaml b/aichat/smithery.yaml new file mode 100644 index 0000000..583c6ad --- /dev/null +++ b/aichat/smithery.yaml @@ -0,0 +1,23 @@ +# Smithery configuration file: https://smithery.ai/docs/build/publish +# Defines how this MCP server is built and configured on Smithery + +startCommand: + type: stdio + configSchema: + type: object + properties: + ACEDATACLOUD_API_TOKEN: + type: string + description: "API token from Ace Data Cloud (https://platform.acedata.cloud)" + required: + - ACEDATACLOUD_API_TOKEN + commandFunction: + # Installs from PyPI and runs via uvx + - |- + (config) => ({{ + command: 'uvx', + args: ['mcp-aichat'], + env: {{ + ACEDATACLOUD_API_TOKEN: config.ACEDATACLOUD_API_TOKEN + }} + }}) diff --git a/aichat/tests/__init__.py b/aichat/tests/__init__.py new file mode 100644 index 0000000..d735197 --- /dev/null +++ b/aichat/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for MCP AIChat server.""" diff --git a/aichat/tests/conftest.py b/aichat/tests/conftest.py new file mode 100644 index 0000000..17a0d97 --- /dev/null +++ b/aichat/tests/conftest.py @@ -0,0 +1,50 @@ +"""Pytest configuration and fixtures.""" + +import os +import sys +from pathlib import Path + +import pytest + +# Add project root to path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +# Load .env file BEFORE any other imports +from dotenv import load_dotenv + +load_dotenv(dotenv_path=project_root / ".env") + +# Set default log level for tests +os.environ.setdefault("LOG_LEVEL", "DEBUG") + + +@pytest.fixture +def api_token(): + """Get API token from environment for integration tests.""" + token = os.environ.get("ACEDATACLOUD_API_TOKEN", "") + if not token: + pytest.skip("ACEDATACLOUD_API_TOKEN not configured for integration tests") + return token + + +@pytest.fixture +def mock_conversation_response(): + """Mock successful conversation response.""" + return { + "id": "64a67fff-61dc-4801-8339-2c69334c61d6", + "answer": "I am a highly intelligent question answering AI. If you ask me a question, " + "I will do my best to give you a helpful answer.", + } + + +@pytest.fixture +def mock_error_response(): + """Mock error response.""" + return { + "error": { + "code": "invalid_request", + "message": "Invalid parameters provided", + }, + "trace_id": "2efa9340-b21b-4e26-9e14-4aac95f343ab", + } diff --git a/aichat/tests/test_client.py b/aichat/tests/test_client.py new file mode 100644 index 0000000..92d1159 --- /dev/null +++ b/aichat/tests/test_client.py @@ -0,0 +1,126 @@ +"""Unit tests for HTTP client.""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import httpx +import pytest + +from core.client import AIChatClient +from core.exceptions import AIChatAPIError, AIChatAuthError, AIChatTimeoutError + + +@pytest.fixture +def client(): + """Create a client instance for testing.""" + return AIChatClient(api_token="test-token", base_url="https://api.test.com") + + +class TestAIChatClient: + """Tests for AIChatClient class.""" + + def test_init_with_params(self): + """Test client initialization with explicit parameters.""" + client = AIChatClient(api_token="my-token", base_url="https://custom.api.com") + assert client.api_token == "my-token" + assert client.base_url == "https://custom.api.com" + + def test_get_headers(self, client): + """Test that headers are correctly generated.""" + headers = client._get_headers() + assert headers["accept"] == "application/json" + assert headers["authorization"] == "Bearer test-token" + assert headers["content-type"] == "application/json" + + def test_get_headers_no_token(self): + """Test that missing token raises auth error.""" + client = AIChatClient(api_token="", base_url="https://api.test.com") + with pytest.raises(AIChatAuthError, match="not configured"): + client._get_headers() + + @pytest.mark.asyncio + async def test_request_success(self, client, mock_conversation_response): + """Test successful API request.""" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_conversation_response + + with patch("httpx.AsyncClient") as mock_http: + mock_instance = AsyncMock() + mock_instance.post.return_value = mock_response + mock_http.return_value.__aenter__.return_value = mock_instance + + result = await client.request( + "/aichat/conversations", + {"model": "gpt-4.1", "question": "Hello!"}, + ) + assert result == mock_conversation_response + + @pytest.mark.asyncio + async def test_request_auth_error_401(self, client): + """Test 401 response raises auth error.""" + mock_response = MagicMock() + mock_response.status_code = 401 + mock_response.json.return_value = { + "error": {"code": "invalid_token", "message": "Invalid API token"} + } + mock_response.text = "Invalid API token" + + with patch("httpx.AsyncClient") as mock_http: + mock_instance = AsyncMock() + mock_instance.post.return_value = mock_response + mock_http.return_value.__aenter__.return_value = mock_instance + + with pytest.raises(AIChatAuthError, match="Invalid API token"): + await client.request("/aichat/conversations", {}) + + @pytest.mark.asyncio + async def test_request_timeout(self, client): + """Test timeout raises timeout error.""" + with patch("httpx.AsyncClient") as mock_http: + mock_instance = AsyncMock() + mock_instance.post.side_effect = httpx.TimeoutException("Timeout") + mock_http.return_value.__aenter__.return_value = mock_instance + + with pytest.raises(AIChatTimeoutError, match="timed out"): + await client.request("/aichat/conversations", {}) + + @pytest.mark.asyncio + async def test_request_http_error(self, client): + """Test HTTP error raises API error.""" + mock_response = MagicMock() + mock_response.status_code = 500 + mock_response.json.return_value = { + "error": {"code": "api_error", "message": "Internal Server Error"} + } + mock_response.text = "Internal Server Error" + + with patch("httpx.AsyncClient") as mock_http: + mock_instance = AsyncMock() + mock_instance.post.return_value = mock_response + mock_http.return_value.__aenter__.return_value = mock_instance + + with pytest.raises(AIChatAPIError, match="Internal Server Error") as exc_info: + await client.request("/aichat/conversations", {}) + + assert exc_info.value.status_code == 500 + + @pytest.mark.asyncio + async def test_conversations_method(self, client, mock_conversation_response): + """Test the conversations convenience method.""" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = mock_conversation_response + + with patch("httpx.AsyncClient") as mock_http: + mock_instance = AsyncMock() + mock_instance.post.return_value = mock_response + mock_http.return_value.__aenter__.return_value = mock_instance + + result = await client.conversations( + model="gpt-4.1", + question="What is machine learning?", + ) + assert result == mock_conversation_response + + call_args = mock_instance.post.call_args + assert "/aichat/conversations" in call_args[0][0] diff --git a/aichat/tests/test_config.py b/aichat/tests/test_config.py new file mode 100644 index 0000000..aca6beb --- /dev/null +++ b/aichat/tests/test_config.py @@ -0,0 +1,64 @@ +"""Unit tests for configuration.""" + +import os +from unittest.mock import patch + +import pytest + +from core.config import Settings + + +class TestSettings: + """Tests for Settings configuration class.""" + + def test_default_values(self): + """Test default settings values.""" + with patch.dict(os.environ, {}, clear=True): + settings = Settings() + assert settings.api_base_url == "https://api.acedata.cloud" + assert settings.api_token == "" + assert settings.request_timeout == 60.0 + assert settings.server_name == "aichat" + assert settings.log_level == "INFO" + + def test_custom_values_from_env(self): + """Test settings loaded from environment variables.""" + with patch.dict( + os.environ, + { + "ACEDATACLOUD_API_TOKEN": "test-token-123", + "ACEDATACLOUD_API_BASE_URL": "https://custom.api.com", + "AICHAT_REQUEST_TIMEOUT": "120", + "LOG_LEVEL": "DEBUG", + }, + ): + settings = Settings() + assert settings.api_token == "test-token-123" + assert settings.api_base_url == "https://custom.api.com" + assert settings.request_timeout == 120.0 + assert settings.log_level == "DEBUG" + + def test_is_configured_with_token(self): + """Test is_configured returns True when token is set.""" + with patch.dict(os.environ, {"ACEDATACLOUD_API_TOKEN": "my-token"}): + settings = Settings() + assert settings.is_configured is True + + def test_is_configured_without_token(self): + """Test is_configured returns False when token is not set.""" + with patch.dict(os.environ, {}, clear=True): + settings = Settings() + assert settings.is_configured is False + + def test_validate_raises_without_token(self): + """Test validate raises ValueError when token is missing.""" + with patch.dict(os.environ, {}, clear=True): + settings = Settings() + with pytest.raises(ValueError, match="ACEDATACLOUD_API_TOKEN"): + settings.validate() + + def test_validate_passes_with_token(self): + """Test validate passes when token is set.""" + with patch.dict(os.environ, {"ACEDATACLOUD_API_TOKEN": "valid-token"}): + settings = Settings() + settings.validate() # Should not raise diff --git a/aichat/tools/__init__.py b/aichat/tools/__init__.py new file mode 100644 index 0000000..0193082 --- /dev/null +++ b/aichat/tools/__init__.py @@ -0,0 +1,9 @@ +"""Tools module for MCP AIChat server.""" + +# Import all tools to register them with the MCP server +from tools import chat_tools, info_tools + +__all__ = [ + "chat_tools", + "info_tools", +] diff --git a/aichat/tools/chat_tools.py b/aichat/tools/chat_tools.py new file mode 100644 index 0000000..754a9a2 --- /dev/null +++ b/aichat/tools/chat_tools.py @@ -0,0 +1,105 @@ +"""Chat tools for AIChat API.""" + +import json +from typing import Annotated + +from pydantic import Field + +from core.client import client +from core.exceptions import AIChatAPIError, AIChatAuthError +from core.server import mcp +from core.types import ( + DEFAULT_CHAT_MODEL, + ChatModel, +) + + +@mcp.tool() +async def aichat_conversation( + question: Annotated[ + str, + Field(description="The prompt or question to be answered. Required."), + ], + model: Annotated[ + ChatModel, + Field( + description=( + "The model to use for answering the prompt. Options include gpt-4.1, gpt-4o, " + "gpt-5, o1, o3, o4-mini, deepseek-r1, deepseek-v3, grok-3, glm-4.6, and many " + "more. Default is gpt-4.1." + ) + ), + ] = DEFAULT_CHAT_MODEL, + conversation_id: Annotated[ + str | None, + Field( + description=( + "The unique identifier of an existing conversation. Provide this to continue " + "a previous conversation when stateful is True." + ) + ), + ] = None, + stateful: Annotated[ + bool | None, + Field( + description=( + "Whether to use stateful conversation mode. When True, the conversation " + "history is maintained across requests using the conversation id." + ) + ), + ] = None, + preset: Annotated[ + str | None, + Field(description="The preset model configuration to use for answering the prompt."), + ] = None, + references: Annotated[ + list[str] | None, + Field( + description=( + "A list of reference URLs or sources to be used as context when " + "answering the prompt." + ) + ), + ] = None, +) -> str: + """Send a question to an AI model and get an answer via AceDataCloud AIChat API. + + Supports a wide range of models including GPT, o-series, DeepSeek, Grok, and GLM. + Optionally supports stateful conversations to maintain context across multiple turns. + + Use this when: + - You need to ask a question to an AI model + - You want to have a multi-turn conversation (use stateful=True and pass conversation_id) + - You need to use a specific model like DeepSeek, Grok, or GLM + + Returns: + JSON response containing the answer and conversation id. + """ + try: + payload: dict = { + "question": question, + "model": model, + } + + if conversation_id is not None: + payload["id"] = conversation_id + if stateful is not None: + payload["stateful"] = stateful + if preset is not None: + payload["preset"] = preset + if references is not None: + payload["references"] = references + + result = await client.conversations(**payload) + + if not result: + return json.dumps({"error": "No response received."}) + + return json.dumps(result, ensure_ascii=False, indent=2) + + except AIChatAuthError as e: + return json.dumps({"error": "Authentication Error", "message": e.message}) + except AIChatAPIError as e: + return json.dumps({"error": "API Error", "message": e.message}) + except Exception as e: + return json.dumps({"error": "Error creating conversation", "message": str(e)}) diff --git a/aichat/tools/info_tools.py b/aichat/tools/info_tools.py new file mode 100644 index 0000000..86be40b --- /dev/null +++ b/aichat/tools/info_tools.py @@ -0,0 +1,195 @@ +"""Informational tools for AIChat API.""" + +from core.server import mcp + + +@mcp.tool() +async def aichat_list_models() -> str: + """List all available AI chat models. + + Shows all models available for the aichat conversations endpoint, + including GPT, o-series, DeepSeek, Grok, and GLM models. + + Returns: + Table of all available models with descriptions. + """ + # Last updated: 2026-04-25 + return """Available AIChat Models: + +## GPT-5 Series +| Model | Description | +|---------------|------------------------------------------| +| gpt-5.5 | Latest GPT-5.5 model | +| gpt-5.5-pro | GPT-5.5 Pro variant | +| gpt-5.4 | GPT-5.4 model | +| gpt-5.4-pro | GPT-5.4 Pro variant | +| gpt-5.2 | GPT-5.2 model | +| gpt-5.1 | GPT-5.1 model | +| gpt-5.1-all | GPT-5.1 all-access variant | +| gpt-5 | GPT-5 standard | +| gpt-5-mini | GPT-5 Mini - cost efficient | +| gpt-5-nano | GPT-5 Nano - ultra cost efficient | +| gpt-5-all | GPT-5 all-access variant | + +## GPT-4 Series +| Model | Description | +|--------------------------------|------------------------------------------| +| gpt-4 | GPT-4 standard | +| gpt-4-all | GPT-4 all-access | +| gpt-4-turbo | GPT-4 Turbo | +| gpt-4-turbo-preview | GPT-4 Turbo Preview | +| gpt-4-vision-preview | GPT-4 Vision Preview | +| gpt-4.1 | GPT-4.1 (recommended default) | +| gpt-4.1-2025-04-14 | GPT-4.1 dated variant | +| gpt-4.1-mini | GPT-4.1 Mini - cost efficient | +| gpt-4.1-mini-2025-04-14 | GPT-4.1 Mini dated variant | +| gpt-4.1-nano | GPT-4.1 Nano - ultra cost efficient | +| gpt-4.1-nano-2025-04-14 | GPT-4.1 Nano dated variant | +| gpt-4.5-preview | GPT-4.5 Preview | +| gpt-4.5-preview-2025-02-27 | GPT-4.5 Preview dated variant | +| gpt-4o | GPT-4o multimodal | +| gpt-4o-2024-05-13 | GPT-4o dated variant | +| gpt-4o-2024-08-06 | GPT-4o dated variant | +| gpt-4o-2024-11-20 | GPT-4o dated variant | +| gpt-4o-all | GPT-4o all-access | +| gpt-4o-image | GPT-4o with image capabilities | +| gpt-4o-mini | GPT-4o Mini | +| gpt-4o-mini-2024-07-18 | GPT-4o Mini dated variant | +| gpt-4o-mini-search-preview | GPT-4o Mini with search | +| gpt-4o-search-preview | GPT-4o with search | + +## Reasoning (o-series) +| Model | Description | +|--------------------------|-------------------------------------------| +| o1 | o1 - original reasoning model | +| o1-2024-12-17 | o1 dated variant | +| o1-all | o1 all-access | +| o1-mini | o1 Mini | +| o1-mini-2024-09-12 | o1 Mini dated variant | +| o1-mini-all | o1 Mini all-access | +| o1-preview | o1 Preview | +| o1-pro | o1 Pro | +| o1-pro-2025-03-19 | o1 Pro dated variant | +| o1-pro-all | o1 Pro all-access | +| o3 | o3 - advanced reasoning | +| o3-2025-04-16 | o3 dated variant | +| o3-all | o3 all-access | +| o3-mini | o3 Mini | +| o3-mini-high | o3 Mini (high effort) | +| o3-mini-low | o3 Mini (low effort) | +| o3-mini-medium | o3 Mini (medium effort) | +| o3-pro | o3 Pro | +| o3-pro-2025-06-10 | o3 Pro dated variant | +| o4-mini | o4 Mini - fast reasoning | +| o4-mini-2025-04-16 | o4 Mini dated variant | +| o4-mini-all | o4 Mini all-access | +| o4-mini-high-all | o4 Mini high-access | + +## DeepSeek Models +| Model | Description | +|--------------------|------------------------------------------| +| deepseek-r1 | DeepSeek R1 reasoning model | +| deepseek-r1-0528 | DeepSeek R1 dated variant | +| deepseek-v3 | DeepSeek V3 general model | +| deepseek-v3-250324 | DeepSeek V3 dated variant | + +## Other Models +| Model | Description | +|--------------|------------------------------------------| +| grok-3 | Grok 3 by xAI | +| glm-5.1 | GLM-5.1 by Zhipu AI | +| glm-4.7 | GLM-4.7 by Zhipu AI | +| glm-4.6 | GLM-4.6 by Zhipu AI | +| glm-4.5-air | GLM-4.5 Air by Zhipu AI | +| glm-3-turbo | GLM-3 Turbo by Zhipu AI | +""" + + +@mcp.tool() +async def aichat_get_usage_guide() -> str: + """Get a comprehensive guide for using the AIChat tools. + + Provides detailed information on how to use all available AIChat tools + effectively, including examples and best practices. + + Returns: + Complete usage guide for AIChat tools. + """ + # Last updated: 2026-04-25 + return """# AIChat Tools Usage Guide + +## Available Tools + +### Conversation +**aichat_conversation** - Send a question to an AI model and get an answer +- question: The prompt or question (required) +- model: AI model to use (default: gpt-4.1) +- conversation_id: Existing conversation ID for stateful continuation +- stateful: Enable conversation memory across turns +- preset: Preset model configuration +- references: List of reference URLs for context + +### Information Tools +- **aichat_list_models** - List all available AI models +- **aichat_get_usage_guide** - This guide + +## Example Usage + +### Basic Question +``` +aichat_conversation( + question="What is machine learning?" +) +``` + +### Using a Specific Model +``` +aichat_conversation( + question="Explain quantum entanglement", + model="gpt-4o" +) +``` + +### Multi-Turn Stateful Conversation +``` +# First turn - start a conversation +result = aichat_conversation( + question="What is Python?", + model="gpt-4.1", + stateful=True +) +# Extract conversation id from result["id"] + +# Second turn - continue the conversation +aichat_conversation( + question="Show me a simple example", + model="gpt-4.1", + stateful=True, + conversation_id="" +) +``` + +### Using DeepSeek for Reasoning +``` +aichat_conversation( + question="Solve this step by step: A train travels 120km in 2 hours...", + model="deepseek-r1" +) +``` + +### With References +``` +aichat_conversation( + question="Summarize the content at this URL", + model="gpt-4.1", + references=["https://example.com/article"] +) +``` + +## Best Practices + +1. **Model selection**: Use gpt-4.1 for general tasks, deepseek-r1 for complex reasoning +2. **Stateful conversations**: Use stateful=True and pass conversation_id for multi-turn chats +3. **References**: Provide URLs as references when you want the model to consider external content +4. **Preset**: Use preset to configure the model with specific behavior settings +""" diff --git a/sync.yaml b/sync.yaml index 9537811..af39021 100644 --- a/sync.yaml +++ b/sync.yaml @@ -63,3 +63,7 @@ mappings: repo: AceDataCloud/OpenAIMCP exclude: - .gitbooks + aichat: + repo: AceDataCloud/AIChatMCP + exclude: + - .gitbooks