Spaces:
Runtime error
Runtime error
| import os | |
| import requests | |
| import logging | |
| import openai | |
| from contextlib import contextmanager | |
| from tenacity import retry, wait_random_exponential, stop_after_attempt | |
| ## 处理代理: | |
| http_proxy = "" | |
| https_proxy = "" | |
| http_proxy = os.environ.get("HTTP_PROXY", http_proxy) | |
| https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) | |
| # 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 | |
| os.environ["HTTP_PROXY"] = "" | |
| os.environ["HTTPS_PROXY"] = "" | |
| # GPT_MODEL = "gpt-3.5-turbo-0613" | |
| GPT_MODEL = "gpt-4" | |
| def retrieve_proxy(proxy=None): | |
| """ | |
| 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 | |
| 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 | |
| """ | |
| global http_proxy, https_proxy | |
| if proxy is not None: | |
| old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] | |
| os.environ["HTTP_PROXY"] = proxy | |
| os.environ["HTTPS_PROXY"] = proxy | |
| yield proxy, proxy | |
| # return old proxy | |
| os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var | |
| else: | |
| old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] | |
| os.environ["HTTP_PROXY"] = http_proxy | |
| os.environ["HTTPS_PROXY"] = https_proxy | |
| yield http_proxy, https_proxy # return new proxy | |
| # return old proxy | |
| os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var | |
| def get_geoip(proxy=None): | |
| try: | |
| with retrieve_proxy(proxy): | |
| response = requests.get("https://ipapi.co/json/", timeout=5) | |
| data = response.json() | |
| except Exception as e: | |
| data = {"error": True, "reason": f"{e}"} | |
| return e | |
| country = data["country_name"] | |
| if country == "China": | |
| text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" | |
| logging.error(text) | |
| return -1, text | |
| else: | |
| text = f"您的IP区域:{country}。" | |
| logging.info(text) | |
| return 1, text | |
| def chat_completion_request( | |
| messages, | |
| functions=None, | |
| function_call=None, | |
| model=GPT_MODEL, | |
| proxy=None, | |
| **kwargs, | |
| ): | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": "Bearer " + openai.api_key, | |
| } | |
| json_data = {"model": model, "messages": messages} | |
| if functions is not None: | |
| json_data.update({"functions": functions}) | |
| if function_call is not None: | |
| json_data.update({"function_call": function_call}) | |
| if kwargs is not None: | |
| json_data.update(kwargs) | |
| with retrieve_proxy(proxy): | |
| try: | |
| response = requests.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers=headers, | |
| json=json_data, | |
| ) | |
| return response | |
| except Exception as e: | |
| print("Unable to generate ChatCompletion response") | |
| print(f"Exception: {e}") | |
| return e | |
| def generate_messages(prompt): | |
| messages = [] | |
| messages.append({"role": "system", "content": "You are an robot auto control code generator."}) | |
| messages.append({"role": "user", "content": f"{prompt}"}) | |
| return messages | |