Spaces:
Sleeping
Sleeping
| import os | |
| from typing import Any, Dict, List, Optional, Generator | |
| import requests | |
| class LaasApiClient: | |
| """ | |
| A client for interacting with the LAAS API. | |
| Usage: | |
| client = LaasApiClient() | |
| response = client.call_llm_preset( | |
| params={}, | |
| model="your_model", | |
| messages=[], | |
| service_type="AZURE", | |
| max_tokens=0, | |
| function_call={}, | |
| response_format={"type": "string"}, | |
| source_count=0 | |
| ) | |
| print(response) | |
| # For chat completions: | |
| chat_response = client.chat_completions(messages=[{"role": "user", "content": "Hello!"}]) | |
| print(chat_response) | |
| # For streaming: | |
| for chunk in client.stream(messages=[{"role": "user", "content": "Tell me a story"}]): | |
| print(chunk, end="", flush=True) | |
| """ | |
| BASE_URL = "https://api-laas.wanted.co.kr" | |
| def __init__( | |
| self, | |
| base_url: str = BASE_URL, | |
| api_key: Optional[str] = None, | |
| project: Optional[str] = None, | |
| hash: Optional[str] = None, | |
| ): | |
| self.base_url = base_url | |
| self.hash = hash or os.environ.get("LAAS_HASH") | |
| self.headers = { | |
| "Content-Type": "application/json", | |
| "apiKey": api_key or os.environ.get("LAAS_API_KEY"), | |
| "project": project or os.environ.get("LAAS_PROJECT"), | |
| } | |
| if not self.headers["apiKey"]: | |
| raise ValueError("API key is required to use the LAAS API.") | |
| if not self.headers["project"]: | |
| raise ValueError("Project is required to use the LAAS API.") | |
| if not self.hash: | |
| raise ValueError("Hash is required to use the LAAS API.") | |
| def _make_api_call(self, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]: | |
| """Make an API call to the specified endpoint with the given payload.""" | |
| url = f"{self.base_url}/api/{endpoint}" | |
| response = requests.post( | |
| url, headers=self.headers, json={"hash": self.hash, **payload} | |
| ) | |
| response.raise_for_status() # Raise an exception for HTTP errors | |
| return response.json() | |
| def call_llm_preset(self, complete: bool = False, **kwargs) -> Dict[str, Any]: | |
| """ | |
| Call the LLM preset API. | |
| Args: | |
| complete (bool): Whether to use the 'complete' endpoint. | |
| **kwargs: Keyword arguments for the API call. | |
| Returns: | |
| Dict[str, Any]: The JSON response from the API. | |
| """ | |
| endpoint = "preset/complete" if complete else "preset" | |
| return self._make_api_call(endpoint, kwargs) | |
| def chat_completions( | |
| self, messages: List[Dict[str, str]], **kwargs | |
| ) -> Dict[str, Any]: | |
| """ | |
| Call the chat completions API. | |
| Args: | |
| messages (List[Dict[str, str]]): List of message dictionaries. | |
| **kwargs: Additional keyword arguments for the API call. | |
| Returns: | |
| Dict[str, Any]: The JSON response from the API. | |
| """ | |
| payload = { | |
| "messages": messages, | |
| **kwargs, | |
| } | |
| return self._make_api_call("preset/chat/completions", payload) | |
| def stream( | |
| self, messages: List[Dict[str, str]], **kwargs | |
| ) -> Generator[str, None, None]: | |
| """ | |
| Stream the chat completions API response. | |
| Args: | |
| messages (List[Dict[str, str]]): List of message dictionaries. | |
| **kwargs: Additional keyword arguments for the API call. | |
| Yields: | |
| str: Chunks of the streaming response. | |
| """ | |
| url = f"{self.base_url}/api/preset/chat/completions" | |
| payload = { | |
| "hash": self.hash, | |
| "messages": messages, | |
| "stream": True, | |
| **kwargs, | |
| } | |
| with requests.post( | |
| url, headers=self.headers, json=payload, stream=True | |
| ) as response: | |
| response.raise_for_status() | |
| for line in response.iter_lines(): | |
| if line: | |
| yield line.decode("utf-8") | |