File size: 10,638 Bytes
cf45a37
0ad77a1
cf45a37
aa4f694
69fbdcb
e65d286
09eecef
69fbdcb
cf45a37
d051ea8
3e68ccf
0ad77a1
 
958127a
ae905ab
c2882ca
 
ae905ab
 
 
a396d84
 
ae905ab
 
0ad77a1
 
 
958127a
3e68ccf
69fbdcb
4184417
a0d261f
c2882ca
958127a
724babe
9c0dccd
aa4f694
9c0dccd
4184417
69fbdcb
 
 
 
4184417
 
69fbdcb
4184417
69fbdcb
4184417
 
 
 
 
 
 
 
 
 
15a4e19
 
 
54db4e6
 
 
 
15a4e19
 
 
 
54db4e6
15a4e19
 
4184417
69fbdcb
54db4e6
 
 
 
69fbdcb
 
 
e65d286
 
 
 
 
 
 
 
9c0dccd
cf45a37
 
 
 
 
 
 
e65d286
 
 
cf45a37
9c0dccd
cf45a37
 
 
3179f23
 
 
 
17a0c62
3179f23
 
e65d286
 
 
 
 
 
 
9c0dccd
cf45a37
3e68ccf
 
09eecef
0ad77a1
 
80f53c9
 
 
 
 
 
 
0ad77a1
a101741
0ad77a1
80f53c9
 
 
0ad77a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15a4e19
 
 
 
 
80f53c9
15a4e19
 
09eecef
 
0ad77a1
 
 
54db4e6
 
 
 
 
0ad77a1
 
038ba10
0ad77a1
54db4e6
15a4e19
54db4e6
15a4e19
54db4e6
 
80f53c9
 
15a4e19
 
54db4e6
0ad77a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80f53c9
0ad77a1
 
 
 
69fbdcb
 
 
e65d286
 
 
 
0ad77a1
69fbdcb
0ad77a1
69fbdcb
0ad77a1
cf45a37
 
 
e65d286
 
 
0ad77a1
69fbdcb
0ad77a1
80f53c9
0ad77a1
 
 
54db4e6
 
 
 
0ad77a1
 
 
 
 
 
 
b4ec8f7
0ad77a1
54db4e6
0ad77a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e68ccf
 
8537019
69fbdcb
44d6df8
69fbdcb
 
 
3e68ccf
69fbdcb
54db4e6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
"""
Helper functions to access LLMs using LiteLLM.
"""
import logging
import re
import urllib3
from typing import Tuple, Union, Iterator, Optional


from ..global_config import GlobalConfig

try:
    import litellm
    from litellm import completion

    litellm.drop_params = True

    # Ask LiteLLM to suppress debug information if possible
    try:
        litellm.suppress_debug_info = True
    except AttributeError:
        # Attribute not available in this version of LiteLLM
        pass

except ImportError:
    litellm = None
    completion = None


LLM_PROVIDER_MODEL_REGEX = re.compile(r'\[(.*?)\](.*)')
OLLAMA_MODEL_REGEX = re.compile(r'[a-zA-Z0-9._:-]+$')
# 200 characters long, only containing alphanumeric characters, hyphens, and underscores
API_KEY_REGEX = re.compile(r'^[a-zA-Z0-9_-]{6,200}$')


logger = logging.getLogger(__name__)


def get_provider_model(provider_model: str, use_ollama: bool) -> Tuple[str, str]:
    """
    Parse and get LLM provider and model name from strings like `[provider]model/name-version`.

    :param provider_model: The provider, model name string from `GlobalConfig`.
    :param use_ollama: Whether Ollama is used (i.e., running in offline mode).
    :return: The provider and the model name; empty strings in case no matching pattern found.
    """
    provider_model = provider_model.strip()

    if use_ollama:
        match = OLLAMA_MODEL_REGEX.match(provider_model)
        if match:
            return GlobalConfig.PROVIDER_OLLAMA, match.group(0)
    else:
        match = LLM_PROVIDER_MODEL_REGEX.match(provider_model)

        if match:
            inside_brackets = match.group(1)
            outside_brackets = match.group(2)
            
            # Validate that the provider is in the valid providers list
            if inside_brackets not in GlobalConfig.VALID_PROVIDERS:
                logger.warning(
                    "Provider '%s' not in VALID_PROVIDERS: %s",
                    inside_brackets, GlobalConfig.VALID_PROVIDERS
                )
                return '', ''
            
            # Validate that the model name is not empty
            if not outside_brackets.strip():
                logger.warning("Empty model name for provider '%s'", inside_brackets)
                return '', ''
            
            return inside_brackets, outside_brackets

    logger.warning(
        "Could not parse provider_model: '%s' (use_ollama=%s)",
        provider_model, use_ollama
    )
    return '', ''


def is_valid_llm_provider_model(
        provider: str,
        model: str,
        api_key: str,
        azure_endpoint_url: str = '',
        azure_deployment_name: str = '',
        azure_api_version: str = '',
) -> bool:
    """
    Verify whether LLM settings are proper.
    This function does not verify whether `api_key` is correct. It only confirms that the key has
    at least five characters. Key verification is done when the LLM is created.

    :param provider: Name of the LLM provider.
    :param model: Name of the model.
    :param api_key: The API key or access token.
    :param azure_endpoint_url: Azure OpenAI endpoint URL.
    :param azure_deployment_name: Azure OpenAI deployment name.
    :param azure_api_version: Azure OpenAI API version.
    :return: `True` if the settings "look" OK; `False` otherwise.
    """
    if not provider or not model or provider not in GlobalConfig.VALID_PROVIDERS:
        return False

    if provider != GlobalConfig.PROVIDER_OLLAMA:
        # No API key is required for offline Ollama models
        if not api_key:
            return False

        if api_key and API_KEY_REGEX.match(api_key) is None:
            return False

    if provider == GlobalConfig.PROVIDER_AZURE_OPENAI:
        valid_url = urllib3.util.parse_url(azure_endpoint_url)
        all_status = all(
            [azure_api_version, azure_deployment_name, str(valid_url)]
        )
        return all_status

    return True


def get_litellm_model_name(provider: str, model: str) -> Optional[str]:
    """
    Convert provider and model to LiteLLM model name format.
    
    Note: Azure OpenAI models are handled separately in stream_litellm_completion()
    and should not be passed to this function.
    
    :param provider: The LLM provider.
    :param model: The model name.
    :return: LiteLLM-compatible model name, or None if provider is not supported.
    """
    prefix = GlobalConfig.LITELLM_PROVIDER_MAPPING.get(provider)
    if prefix:
        return f'{prefix}/{model}'
    # LiteLLM always expects a prefix for model names; if not found, return None
    return None


def stream_litellm_completion(
        provider: str,
        model: str,
        messages: list,
        max_tokens: int,
        api_key: str = '',
        azure_endpoint_url: str = '',
        azure_deployment_name: str = '',
        azure_api_version: str = '',
) -> Iterator[str]:
    """
    Stream completion from LiteLLM.

    :param provider: The LLM provider.
    :param model: The name of the LLM.
    :param messages: List of messages for the chat completion.
    :param max_tokens: The maximum number of tokens to generate.
    :param api_key: API key or access token to use.
    :param azure_endpoint_url: Azure OpenAI endpoint URL.
    :param azure_deployment_name: Azure OpenAI deployment name.
    :param azure_api_version: Azure OpenAI API version.
    :return: Iterator of response chunks.
    """
    if litellm is None:
        raise ImportError("LiteLLM is not installed. Please install it with: pip install litellm")
    
    # Convert to LiteLLM model name
    if provider == GlobalConfig.PROVIDER_AZURE_OPENAI:
        # For Azure OpenAI, use the deployment name as the model
        # This is consistent with Azure OpenAI's requirement to use deployment names
        if not azure_deployment_name:
            raise ValueError("Azure deployment name is required for Azure OpenAI provider")
        litellm_model = f'azure/{azure_deployment_name}'
    else:
        litellm_model = get_litellm_model_name(provider, model)
        if not litellm_model:
            raise ValueError(f"Invalid model name: {model} for provider: {provider}")
    
    # Prepare the request parameters
    request_params = {
        'model': litellm_model,
        'messages': messages,
        'max_tokens': max_tokens,
        'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
        'stream': True,
    }
    
    # Set API key and any provider-specific params
    if provider != GlobalConfig.PROVIDER_OLLAMA:
        # For OpenRouter, pass API key as parameter
        if provider == GlobalConfig.PROVIDER_OPENROUTER:
            request_params['api_key'] = api_key
        elif provider == GlobalConfig.PROVIDER_AZURE_OPENAI:
            # For Azure OpenAI, pass credentials as parameters
            request_params['api_key'] = api_key
            request_params['api_base'] = azure_endpoint_url
            request_params['api_version'] = azure_api_version
        else:
            # For other providers, pass API key as parameter
            request_params['api_key'] = api_key
    
    logger.debug('Streaming completion via LiteLLM: %s', litellm_model)
    
    try:
        response = litellm.completion(**request_params)
        
        for chunk in response:
            if hasattr(chunk, 'choices') and chunk.choices:
                choice = chunk.choices[0]
                if hasattr(choice, 'delta') and hasattr(choice.delta, 'content'):
                    if choice.delta.content:
                        yield choice.delta.content
                elif hasattr(choice, 'message') and hasattr(choice.message, 'content'):
                    if choice.message.content:
                        yield choice.message.content
                        
    except Exception as e:
        logger.exception('Error in LiteLLM completion: %s', e)
        raise


def get_litellm_llm(
        provider: str,
        model: str,
        max_new_tokens: int,
        api_key: str = '',
        azure_endpoint_url: str = '',
        azure_deployment_name: str = '',
        azure_api_version: str = '',
) -> Union[object, None]:
    """
    Get a LiteLLM-compatible object for streaming.

    :param provider: The LLM provider.
    :param model: The name of the LLM.
    :param max_new_tokens: The maximum number of tokens to generate.
    :param api_key: API key or access token to use.
    :param azure_endpoint_url: Azure OpenAI endpoint URL.
    :param azure_deployment_name: Azure OpenAI deployment name.
    :param azure_api_version: Azure OpenAI API version.
    :return: A LiteLLM-compatible object for streaming; `None` in case of any error.
    """
    if litellm is None:
        raise ImportError("LiteLLM is not installed. Please install it with: pip install litellm")
    
    # Create a simple wrapper object that mimics the LangChain streaming interface
    class LiteLLMWrapper:
        def __init__(
                self, provider, model, max_tokens, api_key, azure_endpoint_url,
                azure_deployment_name, azure_api_version
        ):
            self.provider = provider
            self.model = model
            self.max_tokens = max_tokens
            self.api_key = api_key
            self.azure_endpoint_url = azure_endpoint_url
            self.azure_deployment_name = azure_deployment_name
            self.azure_api_version = azure_api_version
        
        def stream(self, prompt: str):
            messages = [{'role': 'user', 'content': prompt}]
            return stream_litellm_completion(
                provider=self.provider,
                model=self.model,
                messages=messages,
                max_tokens=self.max_tokens,
                api_key=self.api_key,
                azure_endpoint_url=self.azure_endpoint_url,
                azure_deployment_name=self.azure_deployment_name,
                azure_api_version=self.azure_api_version,
            )
    
    logger.debug('Creating LiteLLM wrapper for: %s', model)
    return LiteLLMWrapper(
        provider=provider,
        model=model,
        max_tokens=max_new_tokens,
        api_key=api_key,
        azure_endpoint_url=azure_endpoint_url,
        azure_deployment_name=azure_deployment_name,
        azure_api_version=azure_api_version,
    )


# Keep the old function name for backward compatibility
get_langchain_llm = get_litellm_llm


if __name__ == '__main__':
    inputs = [
        '[co]Cohere',
        '[hf]mistralai/Mistral-7B-Instruct-v0.2',
        '[gg]gemini-1.5-flash-002'
    ]

    for text in inputs:
        print(get_provider_model(text, use_ollama=False))