app.ai_providers.kimi_provider
Moonshot Kimi API provider implementation.
Uses the openai Python SDK pointed at the Moonshot Kimi API base URL.
Supports synchronous and streaming generation, CSV file attachments via
the Responses API, and automatic model-alias mapping for deprecated Kimi
model identifiers.
Attributes:
- logger: Module-level logger for Kimi provider operations.
1"""Moonshot Kimi API provider implementation. 2 3Uses the ``openai`` Python SDK pointed at the Moonshot Kimi API base URL. 4Supports synchronous and streaming generation, CSV file attachments via 5the Responses API, and automatic model-alias mapping for deprecated Kimi 6model identifiers. 7 8Attributes: 9 logger: Module-level logger for Kimi provider operations. 10""" 11 12from __future__ import annotations 13 14import logging 15from typing import Any, Callable, Iterator, Mapping 16 17from .base import ( 18 AIProvider, 19 AIProviderError, 20 DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 21 DEFAULT_KIMI_BASE_URL, 22 DEFAULT_KIMI_FILE_UPLOAD_PURPOSE, 23 DEFAULT_KIMI_MODEL, 24 DEFAULT_MAX_TOKENS, 25 _is_attachment_unsupported_error, 26 _is_context_length_error, 27 _is_kimi_model_not_available_error, 28 _normalize_api_key_value, 29 _normalize_kimi_model_name, 30 _normalize_openai_compatible_base_url, 31 _resolve_timeout_seconds, 32 _run_with_rate_limit_retries, 33 _T, 34) 35from .utils import ( 36 _extract_openai_delta_text, 37 _extract_openai_text, 38 _inline_attachment_data_into_prompt, 39 upload_and_request_via_responses_api, 40) 41 42logger = logging.getLogger(__name__) 43 44 45class KimiProvider(AIProvider): 46 """Moonshot Kimi API provider implementation. 47 48 Attributes: 49 api_key (str): The Moonshot/Kimi API key. 50 model (str): The Kimi model identifier. 51 base_url (str): The normalized Kimi API base URL. 52 attach_csv_as_file (bool): Whether to upload CSV artifacts as 53 file attachments. 54 request_timeout_seconds (float): HTTP timeout in seconds. 55 client: The ``openai.OpenAI`` SDK client instance configured for Kimi. 56 """ 57 58 def __init__( 59 self, 60 api_key: str, 61 model: str = DEFAULT_KIMI_MODEL, 62 base_url: str = DEFAULT_KIMI_BASE_URL, 63 attach_csv_as_file: bool = True, 64 request_timeout_seconds: float = DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 65 ) -> None: 66 """Initialize the Kimi provider. 67 68 Args: 69 api_key: Moonshot/Kimi API key. Must be non-empty. 70 model: Kimi model identifier. Deprecated aliases are mapped. 71 base_url: Kimi API base URL. 72 attach_csv_as_file: If ``True``, attempt file uploads. 73 request_timeout_seconds: HTTP timeout in seconds. 74 75 Raises: 76 AIProviderError: If the ``openai`` SDK is not installed or 77 the API key is empty. 78 """ 79 try: 80 import openai 81 except ImportError as error: 82 raise AIProviderError( 83 "openai SDK is not installed. Install it with `pip install openai`." 84 ) from error 85 86 normalized_api_key = _normalize_api_key_value(api_key) 87 if not normalized_api_key: 88 raise AIProviderError( 89 "Kimi API key is not configured. " 90 "Set `ai.kimi.api_key` in config.yaml or the MOONSHOT_API_KEY environment variable." 91 ) 92 93 self._openai = openai 94 self.api_key = normalized_api_key 95 self.model = _normalize_kimi_model_name(model) 96 self.base_url = _normalize_openai_compatible_base_url( 97 base_url=base_url, 98 default_base_url=DEFAULT_KIMI_BASE_URL, 99 ) 100 self.attach_csv_as_file = bool(attach_csv_as_file) 101 self._csv_attachment_supported: bool | None = None 102 self.request_timeout_seconds = _resolve_timeout_seconds( 103 request_timeout_seconds, 104 DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 105 ) 106 self.client = openai.OpenAI( 107 api_key=normalized_api_key, 108 base_url=self.base_url, 109 timeout=self.request_timeout_seconds, 110 ) 111 logger.info("Initialized Kimi provider at %s with model %s (timeout %.1fs)", self.base_url, self.model, self.request_timeout_seconds) 112 113 def analyze( 114 self, 115 system_prompt: str, 116 user_prompt: str, 117 max_tokens: int = DEFAULT_MAX_TOKENS, 118 ) -> str: 119 """Send a prompt to Kimi and return the generated text. 120 121 Args: 122 system_prompt: The system-level instruction text. 123 user_prompt: The user-facing prompt with investigation context. 124 max_tokens: Maximum completion tokens. 125 126 Returns: 127 The generated analysis text. 128 129 Raises: 130 AIProviderError: On any API or network failure. 131 """ 132 return self.analyze_with_attachments( 133 system_prompt=system_prompt, 134 user_prompt=user_prompt, 135 attachments=None, 136 max_tokens=max_tokens, 137 ) 138 139 def analyze_stream( 140 self, 141 system_prompt: str, 142 user_prompt: str, 143 max_tokens: int = DEFAULT_MAX_TOKENS, 144 ) -> Iterator[str]: 145 """Stream generated text chunks from Kimi. 146 147 Args: 148 system_prompt: The system-level instruction text. 149 user_prompt: The user-facing prompt with investigation context. 150 max_tokens: Maximum completion tokens. 151 152 Yields: 153 Text chunk strings as they are generated. 154 155 Raises: 156 AIProviderError: On empty response or API failure. 157 """ 158 def _stream() -> Iterator[str]: 159 stream = self._run_kimi_request( 160 lambda: self.client.chat.completions.create( 161 model=self.model, 162 max_tokens=max_tokens, 163 messages=[ 164 {"role": "system", "content": system_prompt}, 165 {"role": "user", "content": user_prompt}, 166 ], 167 stream=True, 168 ) 169 ) 170 emitted = False 171 try: 172 for chunk in stream: 173 choices = getattr(chunk, "choices", None) 174 if not choices: 175 continue 176 choice = choices[0] 177 delta = getattr(choice, "delta", None) 178 if delta is None and isinstance(choice, dict): 179 delta = choice.get("delta") 180 chunk_text = _extract_openai_delta_text( 181 delta, 182 ("content", "reasoning_content", "reasoning", "refusal"), 183 ) 184 if not chunk_text: 185 continue 186 emitted = True 187 yield chunk_text 188 except AIProviderError: 189 raise 190 except self._openai.APIConnectionError as error: 191 raise AIProviderError( 192 "Unable to connect to Kimi API. Check `ai.kimi.base_url` and network access." 193 ) from error 194 except self._openai.AuthenticationError as error: 195 raise AIProviderError( 196 "Kimi authentication failed. Check `ai.kimi.api_key`, MOONSHOT_API_KEY, or KIMI_API_KEY." 197 ) from error 198 except self._openai.BadRequestError as error: 199 if _is_context_length_error(error): 200 raise AIProviderError( 201 "Kimi request exceeded the model context length. Reduce prompt size and retry." 202 ) from error 203 raise AIProviderError(f"Kimi request was rejected: {error}") from error 204 except self._openai.APIError as error: 205 if _is_kimi_model_not_available_error(error): 206 raise AIProviderError( 207 "Kimi rejected the configured model. " 208 f"Current model: `{self.model}`. " 209 "Set `ai.kimi.model` to a model enabled for your Moonshot account " 210 "(for example `kimi-k2-turbo-preview`) and retry." 211 ) from error 212 raise AIProviderError(f"Kimi API error: {error}") from error 213 except Exception as error: 214 raise AIProviderError(f"Unexpected Kimi provider error: {error}") from error 215 216 if not emitted: 217 raise AIProviderError("Kimi returned an empty response.") 218 219 return _stream() 220 221 def analyze_with_attachments( 222 self, 223 system_prompt: str, 224 user_prompt: str, 225 attachments: list[Mapping[str, str]] | None, 226 max_tokens: int = DEFAULT_MAX_TOKENS, 227 ) -> str: 228 """Analyze with optional CSV file attachments via the Kimi Responses API. 229 230 Args: 231 system_prompt: The system-level instruction text. 232 user_prompt: The user-facing prompt with investigation context. 233 attachments: Optional list of attachment descriptors. 234 max_tokens: Maximum completion tokens. 235 236 Returns: 237 The generated analysis text. 238 239 Raises: 240 AIProviderError: On any API or network failure. 241 """ 242 def _request() -> str: 243 return self._request_non_stream( 244 system_prompt=system_prompt, 245 user_prompt=user_prompt, 246 max_tokens=max_tokens, 247 attachments=attachments, 248 ) 249 250 return self._run_kimi_request(_request) 251 252 def _run_kimi_request(self, request_fn: Callable[[], _T]) -> _T: 253 """Execute a Kimi request with rate-limit retries and error mapping. 254 255 Args: 256 request_fn: A zero-argument callable that performs the request. 257 258 Returns: 259 The return value of ``request_fn`` on success. 260 261 Raises: 262 AIProviderError: On any OpenAI SDK error (with Kimi messages). 263 """ 264 try: 265 return _run_with_rate_limit_retries( 266 request_fn=request_fn, 267 rate_limit_error_type=self._openai.RateLimitError, 268 provider_name="Kimi", 269 ) 270 except AIProviderError: 271 raise 272 except self._openai.APIConnectionError as error: 273 raise AIProviderError( 274 "Unable to connect to Kimi API. Check `ai.kimi.base_url` and network access." 275 ) from error 276 except self._openai.AuthenticationError as error: 277 raise AIProviderError( 278 "Kimi authentication failed. Check `ai.kimi.api_key`, MOONSHOT_API_KEY, or KIMI_API_KEY." 279 ) from error 280 except self._openai.BadRequestError as error: 281 if _is_context_length_error(error): 282 raise AIProviderError( 283 "Kimi request exceeded the model context length. Reduce prompt size and retry." 284 ) from error 285 raise AIProviderError(f"Kimi request was rejected: {error}") from error 286 except self._openai.APIError as error: 287 if _is_kimi_model_not_available_error(error): 288 raise AIProviderError( 289 "Kimi rejected the configured model. " 290 f"Current model: `{self.model}`. " 291 "Set `ai.kimi.model` to a model enabled for your Moonshot account " 292 "(for example `kimi-k2-turbo-preview`) and retry." 293 ) from error 294 raise AIProviderError(f"Kimi API error: {error}") from error 295 except Exception as error: 296 raise AIProviderError(f"Unexpected Kimi provider error: {error}") from error 297 298 def _request_non_stream( 299 self, 300 system_prompt: str, 301 user_prompt: str, 302 max_tokens: int, 303 attachments: list[Mapping[str, str]] | None = None, 304 ) -> str: 305 """Perform a non-streaming Kimi request with attachment handling. 306 307 Args: 308 system_prompt: The system-level instruction text. 309 user_prompt: The user-facing prompt text. 310 max_tokens: Maximum completion tokens. 311 attachments: Optional list of attachment descriptors. 312 313 Returns: 314 The generated analysis text. 315 316 Raises: 317 AIProviderError: If the response is empty. 318 """ 319 attachment_response = self._request_with_csv_attachments( 320 system_prompt=system_prompt, 321 user_prompt=user_prompt, 322 max_tokens=max_tokens, 323 attachments=attachments, 324 ) 325 if attachment_response: 326 return attachment_response 327 328 prompt_for_completion = user_prompt 329 if attachments: 330 prompt_for_completion, inlined = _inline_attachment_data_into_prompt( 331 user_prompt=user_prompt, 332 attachments=attachments, 333 ) 334 if inlined: 335 logger.info("Kimi attachment fallback inlined attachment data into prompt.") 336 337 response = self.client.chat.completions.create( 338 model=self.model, 339 max_tokens=max_tokens, 340 messages=[ 341 {"role": "system", "content": system_prompt}, 342 {"role": "user", "content": prompt_for_completion}, 343 ], 344 ) 345 text = _extract_openai_text(response) 346 if not text: 347 raise AIProviderError("Kimi returned an empty response.") 348 return text 349 350 def _request_with_csv_attachments( 351 self, 352 system_prompt: str, 353 user_prompt: str, 354 max_tokens: int, 355 attachments: list[Mapping[str, str]] | None, 356 ) -> str | None: 357 """Attempt to send a request with CSV files via the Kimi Responses API. 358 359 Args: 360 system_prompt: The system-level instruction text. 361 user_prompt: The user-facing prompt text. 362 max_tokens: Maximum completion tokens. 363 attachments: Optional list of attachment descriptors. 364 365 Returns: 366 The generated text if succeeded, or ``None`` if skipped. 367 """ 368 normalized_attachments = self._prepare_csv_attachments( 369 attachments, 370 supports_file_attachments=hasattr(self.client, "files") and hasattr(self.client, "responses"), 371 ) 372 if not normalized_attachments: 373 return None 374 375 try: 376 text = upload_and_request_via_responses_api( 377 client=self.client, 378 openai_module=self._openai, 379 model=self.model, 380 normalized_attachments=normalized_attachments, 381 system_prompt=system_prompt, 382 user_prompt=user_prompt, 383 max_tokens=max_tokens, 384 provider_name="Kimi", 385 upload_purpose=DEFAULT_KIMI_FILE_UPLOAD_PURPOSE, 386 convert_csv_to_txt=False, 387 ) 388 self._csv_attachment_supported = True 389 return text 390 except Exception as error: 391 if _is_attachment_unsupported_error(error): 392 self._csv_attachment_supported = False 393 logger.info( 394 "Kimi endpoint does not support CSV attachments via /files + /responses; " 395 "falling back to chat.completions text mode." 396 ) 397 return None 398 raise 399 400 def get_model_info(self) -> dict[str, str]: 401 """Return Kimi provider and model metadata. 402 403 Returns: 404 A dictionary with ``"provider"`` and ``"model"`` keys. 405 """ 406 return {"provider": "kimi", "model": self.model}
logger =
<Logger app.ai_providers.kimi_provider (WARNING)>
46class KimiProvider(AIProvider): 47 """Moonshot Kimi API provider implementation. 48 49 Attributes: 50 api_key (str): The Moonshot/Kimi API key. 51 model (str): The Kimi model identifier. 52 base_url (str): The normalized Kimi API base URL. 53 attach_csv_as_file (bool): Whether to upload CSV artifacts as 54 file attachments. 55 request_timeout_seconds (float): HTTP timeout in seconds. 56 client: The ``openai.OpenAI`` SDK client instance configured for Kimi. 57 """ 58 59 def __init__( 60 self, 61 api_key: str, 62 model: str = DEFAULT_KIMI_MODEL, 63 base_url: str = DEFAULT_KIMI_BASE_URL, 64 attach_csv_as_file: bool = True, 65 request_timeout_seconds: float = DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 66 ) -> None: 67 """Initialize the Kimi provider. 68 69 Args: 70 api_key: Moonshot/Kimi API key. Must be non-empty. 71 model: Kimi model identifier. Deprecated aliases are mapped. 72 base_url: Kimi API base URL. 73 attach_csv_as_file: If ``True``, attempt file uploads. 74 request_timeout_seconds: HTTP timeout in seconds. 75 76 Raises: 77 AIProviderError: If the ``openai`` SDK is not installed or 78 the API key is empty. 79 """ 80 try: 81 import openai 82 except ImportError as error: 83 raise AIProviderError( 84 "openai SDK is not installed. Install it with `pip install openai`." 85 ) from error 86 87 normalized_api_key = _normalize_api_key_value(api_key) 88 if not normalized_api_key: 89 raise AIProviderError( 90 "Kimi API key is not configured. " 91 "Set `ai.kimi.api_key` in config.yaml or the MOONSHOT_API_KEY environment variable." 92 ) 93 94 self._openai = openai 95 self.api_key = normalized_api_key 96 self.model = _normalize_kimi_model_name(model) 97 self.base_url = _normalize_openai_compatible_base_url( 98 base_url=base_url, 99 default_base_url=DEFAULT_KIMI_BASE_URL, 100 ) 101 self.attach_csv_as_file = bool(attach_csv_as_file) 102 self._csv_attachment_supported: bool | None = None 103 self.request_timeout_seconds = _resolve_timeout_seconds( 104 request_timeout_seconds, 105 DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 106 ) 107 self.client = openai.OpenAI( 108 api_key=normalized_api_key, 109 base_url=self.base_url, 110 timeout=self.request_timeout_seconds, 111 ) 112 logger.info("Initialized Kimi provider at %s with model %s (timeout %.1fs)", self.base_url, self.model, self.request_timeout_seconds) 113 114 def analyze( 115 self, 116 system_prompt: str, 117 user_prompt: str, 118 max_tokens: int = DEFAULT_MAX_TOKENS, 119 ) -> str: 120 """Send a prompt to Kimi and return the generated text. 121 122 Args: 123 system_prompt: The system-level instruction text. 124 user_prompt: The user-facing prompt with investigation context. 125 max_tokens: Maximum completion tokens. 126 127 Returns: 128 The generated analysis text. 129 130 Raises: 131 AIProviderError: On any API or network failure. 132 """ 133 return self.analyze_with_attachments( 134 system_prompt=system_prompt, 135 user_prompt=user_prompt, 136 attachments=None, 137 max_tokens=max_tokens, 138 ) 139 140 def analyze_stream( 141 self, 142 system_prompt: str, 143 user_prompt: str, 144 max_tokens: int = DEFAULT_MAX_TOKENS, 145 ) -> Iterator[str]: 146 """Stream generated text chunks from Kimi. 147 148 Args: 149 system_prompt: The system-level instruction text. 150 user_prompt: The user-facing prompt with investigation context. 151 max_tokens: Maximum completion tokens. 152 153 Yields: 154 Text chunk strings as they are generated. 155 156 Raises: 157 AIProviderError: On empty response or API failure. 158 """ 159 def _stream() -> Iterator[str]: 160 stream = self._run_kimi_request( 161 lambda: self.client.chat.completions.create( 162 model=self.model, 163 max_tokens=max_tokens, 164 messages=[ 165 {"role": "system", "content": system_prompt}, 166 {"role": "user", "content": user_prompt}, 167 ], 168 stream=True, 169 ) 170 ) 171 emitted = False 172 try: 173 for chunk in stream: 174 choices = getattr(chunk, "choices", None) 175 if not choices: 176 continue 177 choice = choices[0] 178 delta = getattr(choice, "delta", None) 179 if delta is None and isinstance(choice, dict): 180 delta = choice.get("delta") 181 chunk_text = _extract_openai_delta_text( 182 delta, 183 ("content", "reasoning_content", "reasoning", "refusal"), 184 ) 185 if not chunk_text: 186 continue 187 emitted = True 188 yield chunk_text 189 except AIProviderError: 190 raise 191 except self._openai.APIConnectionError as error: 192 raise AIProviderError( 193 "Unable to connect to Kimi API. Check `ai.kimi.base_url` and network access." 194 ) from error 195 except self._openai.AuthenticationError as error: 196 raise AIProviderError( 197 "Kimi authentication failed. Check `ai.kimi.api_key`, MOONSHOT_API_KEY, or KIMI_API_KEY." 198 ) from error 199 except self._openai.BadRequestError as error: 200 if _is_context_length_error(error): 201 raise AIProviderError( 202 "Kimi request exceeded the model context length. Reduce prompt size and retry." 203 ) from error 204 raise AIProviderError(f"Kimi request was rejected: {error}") from error 205 except self._openai.APIError as error: 206 if _is_kimi_model_not_available_error(error): 207 raise AIProviderError( 208 "Kimi rejected the configured model. " 209 f"Current model: `{self.model}`. " 210 "Set `ai.kimi.model` to a model enabled for your Moonshot account " 211 "(for example `kimi-k2-turbo-preview`) and retry." 212 ) from error 213 raise AIProviderError(f"Kimi API error: {error}") from error 214 except Exception as error: 215 raise AIProviderError(f"Unexpected Kimi provider error: {error}") from error 216 217 if not emitted: 218 raise AIProviderError("Kimi returned an empty response.") 219 220 return _stream() 221 222 def analyze_with_attachments( 223 self, 224 system_prompt: str, 225 user_prompt: str, 226 attachments: list[Mapping[str, str]] | None, 227 max_tokens: int = DEFAULT_MAX_TOKENS, 228 ) -> str: 229 """Analyze with optional CSV file attachments via the Kimi Responses API. 230 231 Args: 232 system_prompt: The system-level instruction text. 233 user_prompt: The user-facing prompt with investigation context. 234 attachments: Optional list of attachment descriptors. 235 max_tokens: Maximum completion tokens. 236 237 Returns: 238 The generated analysis text. 239 240 Raises: 241 AIProviderError: On any API or network failure. 242 """ 243 def _request() -> str: 244 return self._request_non_stream( 245 system_prompt=system_prompt, 246 user_prompt=user_prompt, 247 max_tokens=max_tokens, 248 attachments=attachments, 249 ) 250 251 return self._run_kimi_request(_request) 252 253 def _run_kimi_request(self, request_fn: Callable[[], _T]) -> _T: 254 """Execute a Kimi request with rate-limit retries and error mapping. 255 256 Args: 257 request_fn: A zero-argument callable that performs the request. 258 259 Returns: 260 The return value of ``request_fn`` on success. 261 262 Raises: 263 AIProviderError: On any OpenAI SDK error (with Kimi messages). 264 """ 265 try: 266 return _run_with_rate_limit_retries( 267 request_fn=request_fn, 268 rate_limit_error_type=self._openai.RateLimitError, 269 provider_name="Kimi", 270 ) 271 except AIProviderError: 272 raise 273 except self._openai.APIConnectionError as error: 274 raise AIProviderError( 275 "Unable to connect to Kimi API. Check `ai.kimi.base_url` and network access." 276 ) from error 277 except self._openai.AuthenticationError as error: 278 raise AIProviderError( 279 "Kimi authentication failed. Check `ai.kimi.api_key`, MOONSHOT_API_KEY, or KIMI_API_KEY." 280 ) from error 281 except self._openai.BadRequestError as error: 282 if _is_context_length_error(error): 283 raise AIProviderError( 284 "Kimi request exceeded the model context length. Reduce prompt size and retry." 285 ) from error 286 raise AIProviderError(f"Kimi request was rejected: {error}") from error 287 except self._openai.APIError as error: 288 if _is_kimi_model_not_available_error(error): 289 raise AIProviderError( 290 "Kimi rejected the configured model. " 291 f"Current model: `{self.model}`. " 292 "Set `ai.kimi.model` to a model enabled for your Moonshot account " 293 "(for example `kimi-k2-turbo-preview`) and retry." 294 ) from error 295 raise AIProviderError(f"Kimi API error: {error}") from error 296 except Exception as error: 297 raise AIProviderError(f"Unexpected Kimi provider error: {error}") from error 298 299 def _request_non_stream( 300 self, 301 system_prompt: str, 302 user_prompt: str, 303 max_tokens: int, 304 attachments: list[Mapping[str, str]] | None = None, 305 ) -> str: 306 """Perform a non-streaming Kimi request with attachment handling. 307 308 Args: 309 system_prompt: The system-level instruction text. 310 user_prompt: The user-facing prompt text. 311 max_tokens: Maximum completion tokens. 312 attachments: Optional list of attachment descriptors. 313 314 Returns: 315 The generated analysis text. 316 317 Raises: 318 AIProviderError: If the response is empty. 319 """ 320 attachment_response = self._request_with_csv_attachments( 321 system_prompt=system_prompt, 322 user_prompt=user_prompt, 323 max_tokens=max_tokens, 324 attachments=attachments, 325 ) 326 if attachment_response: 327 return attachment_response 328 329 prompt_for_completion = user_prompt 330 if attachments: 331 prompt_for_completion, inlined = _inline_attachment_data_into_prompt( 332 user_prompt=user_prompt, 333 attachments=attachments, 334 ) 335 if inlined: 336 logger.info("Kimi attachment fallback inlined attachment data into prompt.") 337 338 response = self.client.chat.completions.create( 339 model=self.model, 340 max_tokens=max_tokens, 341 messages=[ 342 {"role": "system", "content": system_prompt}, 343 {"role": "user", "content": prompt_for_completion}, 344 ], 345 ) 346 text = _extract_openai_text(response) 347 if not text: 348 raise AIProviderError("Kimi returned an empty response.") 349 return text 350 351 def _request_with_csv_attachments( 352 self, 353 system_prompt: str, 354 user_prompt: str, 355 max_tokens: int, 356 attachments: list[Mapping[str, str]] | None, 357 ) -> str | None: 358 """Attempt to send a request with CSV files via the Kimi Responses API. 359 360 Args: 361 system_prompt: The system-level instruction text. 362 user_prompt: The user-facing prompt text. 363 max_tokens: Maximum completion tokens. 364 attachments: Optional list of attachment descriptors. 365 366 Returns: 367 The generated text if succeeded, or ``None`` if skipped. 368 """ 369 normalized_attachments = self._prepare_csv_attachments( 370 attachments, 371 supports_file_attachments=hasattr(self.client, "files") and hasattr(self.client, "responses"), 372 ) 373 if not normalized_attachments: 374 return None 375 376 try: 377 text = upload_and_request_via_responses_api( 378 client=self.client, 379 openai_module=self._openai, 380 model=self.model, 381 normalized_attachments=normalized_attachments, 382 system_prompt=system_prompt, 383 user_prompt=user_prompt, 384 max_tokens=max_tokens, 385 provider_name="Kimi", 386 upload_purpose=DEFAULT_KIMI_FILE_UPLOAD_PURPOSE, 387 convert_csv_to_txt=False, 388 ) 389 self._csv_attachment_supported = True 390 return text 391 except Exception as error: 392 if _is_attachment_unsupported_error(error): 393 self._csv_attachment_supported = False 394 logger.info( 395 "Kimi endpoint does not support CSV attachments via /files + /responses; " 396 "falling back to chat.completions text mode." 397 ) 398 return None 399 raise 400 401 def get_model_info(self) -> dict[str, str]: 402 """Return Kimi provider and model metadata. 403 404 Returns: 405 A dictionary with ``"provider"`` and ``"model"`` keys. 406 """ 407 return {"provider": "kimi", "model": self.model}
Moonshot Kimi API provider implementation.
Attributes:
- api_key (str): The Moonshot/Kimi API key.
- model (str): The Kimi model identifier.
- base_url (str): The normalized Kimi API base URL.
- attach_csv_as_file (bool): Whether to upload CSV artifacts as file attachments.
- request_timeout_seconds (float): HTTP timeout in seconds.
- client: The
openai.OpenAISDK client instance configured for Kimi.
KimiProvider( api_key: str, model: str = 'kimi-k2-turbo-preview', base_url: str = 'https://api.moonshot.ai/v1', attach_csv_as_file: bool = True, request_timeout_seconds: float = 600.0)
59 def __init__( 60 self, 61 api_key: str, 62 model: str = DEFAULT_KIMI_MODEL, 63 base_url: str = DEFAULT_KIMI_BASE_URL, 64 attach_csv_as_file: bool = True, 65 request_timeout_seconds: float = DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 66 ) -> None: 67 """Initialize the Kimi provider. 68 69 Args: 70 api_key: Moonshot/Kimi API key. Must be non-empty. 71 model: Kimi model identifier. Deprecated aliases are mapped. 72 base_url: Kimi API base URL. 73 attach_csv_as_file: If ``True``, attempt file uploads. 74 request_timeout_seconds: HTTP timeout in seconds. 75 76 Raises: 77 AIProviderError: If the ``openai`` SDK is not installed or 78 the API key is empty. 79 """ 80 try: 81 import openai 82 except ImportError as error: 83 raise AIProviderError( 84 "openai SDK is not installed. Install it with `pip install openai`." 85 ) from error 86 87 normalized_api_key = _normalize_api_key_value(api_key) 88 if not normalized_api_key: 89 raise AIProviderError( 90 "Kimi API key is not configured. " 91 "Set `ai.kimi.api_key` in config.yaml or the MOONSHOT_API_KEY environment variable." 92 ) 93 94 self._openai = openai 95 self.api_key = normalized_api_key 96 self.model = _normalize_kimi_model_name(model) 97 self.base_url = _normalize_openai_compatible_base_url( 98 base_url=base_url, 99 default_base_url=DEFAULT_KIMI_BASE_URL, 100 ) 101 self.attach_csv_as_file = bool(attach_csv_as_file) 102 self._csv_attachment_supported: bool | None = None 103 self.request_timeout_seconds = _resolve_timeout_seconds( 104 request_timeout_seconds, 105 DEFAULT_CLOUD_REQUEST_TIMEOUT_SECONDS, 106 ) 107 self.client = openai.OpenAI( 108 api_key=normalized_api_key, 109 base_url=self.base_url, 110 timeout=self.request_timeout_seconds, 111 ) 112 logger.info("Initialized Kimi provider at %s with model %s (timeout %.1fs)", self.base_url, self.model, self.request_timeout_seconds)
Initialize the Kimi provider.
Arguments:
- api_key: Moonshot/Kimi API key. Must be non-empty.
- model: Kimi model identifier. Deprecated aliases are mapped.
- base_url: Kimi API base URL.
- attach_csv_as_file: If
True, attempt file uploads. - request_timeout_seconds: HTTP timeout in seconds.
Raises:
- AIProviderError: If the
openaiSDK is not installed or the API key is empty.
def
analyze( self, system_prompt: str, user_prompt: str, max_tokens: int = 256000) -> str:
114 def analyze( 115 self, 116 system_prompt: str, 117 user_prompt: str, 118 max_tokens: int = DEFAULT_MAX_TOKENS, 119 ) -> str: 120 """Send a prompt to Kimi and return the generated text. 121 122 Args: 123 system_prompt: The system-level instruction text. 124 user_prompt: The user-facing prompt with investigation context. 125 max_tokens: Maximum completion tokens. 126 127 Returns: 128 The generated analysis text. 129 130 Raises: 131 AIProviderError: On any API or network failure. 132 """ 133 return self.analyze_with_attachments( 134 system_prompt=system_prompt, 135 user_prompt=user_prompt, 136 attachments=None, 137 max_tokens=max_tokens, 138 )
Send a prompt to Kimi and return the generated text.
Arguments:
- system_prompt: The system-level instruction text.
- user_prompt: The user-facing prompt with investigation context.
- max_tokens: Maximum completion tokens.
Returns:
The generated analysis text.
Raises:
- AIProviderError: On any API or network failure.
def
analyze_stream( self, system_prompt: str, user_prompt: str, max_tokens: int = 256000) -> Iterator[str]:
140 def analyze_stream( 141 self, 142 system_prompt: str, 143 user_prompt: str, 144 max_tokens: int = DEFAULT_MAX_TOKENS, 145 ) -> Iterator[str]: 146 """Stream generated text chunks from Kimi. 147 148 Args: 149 system_prompt: The system-level instruction text. 150 user_prompt: The user-facing prompt with investigation context. 151 max_tokens: Maximum completion tokens. 152 153 Yields: 154 Text chunk strings as they are generated. 155 156 Raises: 157 AIProviderError: On empty response or API failure. 158 """ 159 def _stream() -> Iterator[str]: 160 stream = self._run_kimi_request( 161 lambda: self.client.chat.completions.create( 162 model=self.model, 163 max_tokens=max_tokens, 164 messages=[ 165 {"role": "system", "content": system_prompt}, 166 {"role": "user", "content": user_prompt}, 167 ], 168 stream=True, 169 ) 170 ) 171 emitted = False 172 try: 173 for chunk in stream: 174 choices = getattr(chunk, "choices", None) 175 if not choices: 176 continue 177 choice = choices[0] 178 delta = getattr(choice, "delta", None) 179 if delta is None and isinstance(choice, dict): 180 delta = choice.get("delta") 181 chunk_text = _extract_openai_delta_text( 182 delta, 183 ("content", "reasoning_content", "reasoning", "refusal"), 184 ) 185 if not chunk_text: 186 continue 187 emitted = True 188 yield chunk_text 189 except AIProviderError: 190 raise 191 except self._openai.APIConnectionError as error: 192 raise AIProviderError( 193 "Unable to connect to Kimi API. Check `ai.kimi.base_url` and network access." 194 ) from error 195 except self._openai.AuthenticationError as error: 196 raise AIProviderError( 197 "Kimi authentication failed. Check `ai.kimi.api_key`, MOONSHOT_API_KEY, or KIMI_API_KEY." 198 ) from error 199 except self._openai.BadRequestError as error: 200 if _is_context_length_error(error): 201 raise AIProviderError( 202 "Kimi request exceeded the model context length. Reduce prompt size and retry." 203 ) from error 204 raise AIProviderError(f"Kimi request was rejected: {error}") from error 205 except self._openai.APIError as error: 206 if _is_kimi_model_not_available_error(error): 207 raise AIProviderError( 208 "Kimi rejected the configured model. " 209 f"Current model: `{self.model}`. " 210 "Set `ai.kimi.model` to a model enabled for your Moonshot account " 211 "(for example `kimi-k2-turbo-preview`) and retry." 212 ) from error 213 raise AIProviderError(f"Kimi API error: {error}") from error 214 except Exception as error: 215 raise AIProviderError(f"Unexpected Kimi provider error: {error}") from error 216 217 if not emitted: 218 raise AIProviderError("Kimi returned an empty response.") 219 220 return _stream()
Stream generated text chunks from Kimi.
Arguments:
- system_prompt: The system-level instruction text.
- user_prompt: The user-facing prompt with investigation context.
- max_tokens: Maximum completion tokens.
Yields:
Text chunk strings as they are generated.
Raises:
- AIProviderError: On empty response or API failure.
def
analyze_with_attachments( self, system_prompt: str, user_prompt: str, attachments: list[typing.Mapping[str, str]] | None, max_tokens: int = 256000) -> str:
222 def analyze_with_attachments( 223 self, 224 system_prompt: str, 225 user_prompt: str, 226 attachments: list[Mapping[str, str]] | None, 227 max_tokens: int = DEFAULT_MAX_TOKENS, 228 ) -> str: 229 """Analyze with optional CSV file attachments via the Kimi Responses API. 230 231 Args: 232 system_prompt: The system-level instruction text. 233 user_prompt: The user-facing prompt with investigation context. 234 attachments: Optional list of attachment descriptors. 235 max_tokens: Maximum completion tokens. 236 237 Returns: 238 The generated analysis text. 239 240 Raises: 241 AIProviderError: On any API or network failure. 242 """ 243 def _request() -> str: 244 return self._request_non_stream( 245 system_prompt=system_prompt, 246 user_prompt=user_prompt, 247 max_tokens=max_tokens, 248 attachments=attachments, 249 ) 250 251 return self._run_kimi_request(_request)
Analyze with optional CSV file attachments via the Kimi Responses API.
Arguments:
- system_prompt: The system-level instruction text.
- user_prompt: The user-facing prompt with investigation context.
- attachments: Optional list of attachment descriptors.
- max_tokens: Maximum completion tokens.
Returns:
The generated analysis text.
Raises:
- AIProviderError: On any API or network failure.
def
get_model_info(self) -> dict[str, str]:
401 def get_model_info(self) -> dict[str, str]: 402 """Return Kimi provider and model metadata. 403 404 Returns: 405 A dictionary with ``"provider"`` and ``"model"`` keys. 406 """ 407 return {"provider": "kimi", "model": self.model}
Return Kimi provider and model metadata.
Returns:
A dictionary with
"provider"and"model"keys.