Spaces:
Sleeping
Sleeping
| import os | |
| from dotenv import load_dotenv | |
| from lagent.llms import ( | |
| GPTAPI, | |
| INTERNLM2_META, | |
| HFTransformerCasualLM, | |
| LMDeployClient, | |
| LMDeployServer, | |
| ) | |
| internlm_server = dict( | |
| type=LMDeployServer, | |
| path="internlm/internlm2_5-7b-chat", | |
| model_name="internlm2_5-7b-chat", | |
| meta_template=INTERNLM2_META, | |
| top_p=0.8, | |
| top_k=1, | |
| temperature=0, | |
| max_new_tokens=8192, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) | |
| internlm_client = dict( | |
| type=LMDeployClient, | |
| model_name="internlm2_5-7b-chat", | |
| url="http://127.0.0.1:23333", | |
| meta_template=INTERNLM2_META, | |
| top_p=0.8, | |
| top_k=1, | |
| temperature=0, | |
| max_new_tokens=8192, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) | |
| internlm_hf = dict( | |
| type=HFTransformerCasualLM, | |
| path="internlm/internlm2_5-7b-chat", | |
| meta_template=INTERNLM2_META, | |
| top_p=0.8, | |
| top_k=None, | |
| temperature=1e-6, | |
| max_new_tokens=8192, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) | |
| # openai_api_base needs to fill in the complete chat api address, such as: https://api.openai.com/v1/chat/completions | |
| gpt4 = dict( | |
| type=GPTAPI, | |
| model_type="gpt-4-turbo", | |
| key=os.environ.get("OPENAI_API_KEY", "YOUR OPENAI API KEY"), | |
| api_base=os.environ.get("OPENAI_API_BASE", | |
| "https://api.openai.com/v1/chat/completions"), | |
| ) | |
| url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" | |
| qwen = dict( | |
| type=GPTAPI, | |
| model_type="qwen-max-longcontext", | |
| key=os.environ.get("QWEN_API_KEY", "YOUR QWEN API KEY"), | |
| api_base=url, | |
| meta_template=[ | |
| dict(role="system", api_role="system"), | |
| dict(role="user", api_role="user"), | |
| dict(role="assistant", api_role="assistant"), | |
| dict(role="environment", api_role="system"), | |
| ], | |
| top_p=0.8, | |
| top_k=1, | |
| temperature=0, | |
| max_new_tokens=4096, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) | |
| internlm_silicon = dict( | |
| type=GPTAPI, | |
| model_type="internlm/internlm2_5-7b-chat", | |
| key=os.environ.get("SILICON_API_KEY", "YOUR SILICON API KEY"), | |
| api_base="https://api.siliconflow.cn/v1/chat/completions", | |
| meta_template=[ | |
| dict(role="system", api_role="system"), | |
| dict(role="user", api_role="user"), | |
| dict(role="assistant", api_role="assistant"), | |
| dict(role="environment", api_role="system"), | |
| ], | |
| top_p=0.8, | |
| top_k=1, | |
| temperature=0, | |
| max_new_tokens=8192, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) | |
| internlm_api = dict( | |
| type=GPTAPI, | |
| model_type="internlm2.5-latest", | |
| key=os.environ.get("InternLM_API_KEY", "YOUR InternLM API KEY https://internlm.intern-ai.org.cn/api/document"), | |
| api_base="https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions", | |
| meta_template=[ | |
| dict(role="system", api_role="system"), | |
| dict(role="user", api_role="user"), | |
| dict(role="assistant", api_role="assistant"), | |
| dict(role="environment", api_role="system"), | |
| ], | |
| top_p=0.8, | |
| top_k=1, | |
| temperature=0, | |
| max_new_tokens=8192, | |
| repetition_penalty=1.02, | |
| stop_words=["<|im_end|>"], | |
| ) |