samfy01222 / main.py
Samuraiog's picture
Upload 7 files
a5dd77d verified
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, ConfigDict
from typing import List, Optional, Dict, Any, Union
import httpx
import json
import uuid
import time
import logging
from config import MODEL_MAPPING
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(title="Smithery AI Reverse API", version="1.0.0")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
import os
from dotenv import load_dotenv
# Load environment variables from .env file (if exists)
load_dotenv()
SMITHERY_API_URL = "https://smithery.ai/api/chat"
def get_backend_model(requested_model: str) -> str:
return MODEL_MAPPING.get(requested_model, "claude-haiku-4.5")
# Get cookies from environment variable or use default
SMITHERY_COOKIE = os.getenv("SMITHERY_COOKIE", 'sb-spjawbfpwezjfmicopsl-auth-token.0=base64-eyJhY2Nlc3NfdG9rZW4iOiJleUpoYkdjaU9pSklVekkxTmlJc0ltdHBaQ0k2SWtrNE4wTjBVMVUyVUhGcldsVlZWMFFpTENKMGVYQWlPaUpLVjFRaWZRLmV5SnBjM01pT2lKb2RIUndjem92TDNOd2FtRjNZbVp3ZDJWNmFtWnRhV052Y0hOc0xuTjFjR0ZpWVhObExtTnZMMkYxZEdndmRqRWlMQ0p6ZFdJaU9pSTNNV0k1TldZNE1DMHlaVEJtTFRRNE9ESXRPR0UxTXkxaU9HVm1abUU1WVRGaU5qVWlMQ0poZFdRaU9pSmhkWFJvWlc1MGFXTmhkR1ZrSWl3aVpYaHdJam94TnpZeU1UY3pORFl4TENKcFlYUWlPakUzTmpJeE5qazROakVzSW1WdFlXbHNJam9pWTI5emJXbGpZM0psWVhScGIyNHhNRFpBWjIxaGFXd3VZMjl0SWl3aWNHaHZibVVpT2lJaUxDSmhjSEJmYldWMFlXUmhkR0VpT25zaWNISnZkbWxrWlhJaU9pSm5iMjluYkdVaUxDSndjbTkyYVdSbGNuTWlPbHNpWjI5dloyeGxJbDE5TENKMWMyVnlYMjFsZEdGa1lYUmhJanA3SW1GMllYUmhjbDkxY213aU9pSm9kSFJ3Y3pvdkwyeG9NeTVuYjI5bmJHVjFjMlZ5WTI5dWRHVnVkQzVqYjIwdllTOUJRMmM0YjJOTExXUXhhMjFETUVoTFdXaDJWV2RsY0ZOS1YyNTRWMlZFUm14eldXbENhMDV6V0U5dFVWVnhUVEZWVFMxSE5tcE5QWE01Tmkxaklpd2laVzFoYVd3aU9pSmpiM050YVdOamNtVmhkR2x2YmpFd05rQm5iV0ZwYkM1amIyMGlMQ0psYldGcGJGOTJaWEpwWm1sbFpDSTZkSEoxWlN3aVpuVnNiRjl1WVcxbElqb2lRMjl6YldsaklFTnlaV0YwYVc5dWN5SXNJbWx6Y3lJNkltaDBkSEJ6T2k4dllXTmpiM1Z1ZEhNdVoyOXZaMnhsTG1OdmJTSXNJbTVoYldVaU9pSkRiM050YVdNZ1EzSmxZWFJwYjI1eklpd2ljR2h2Ym1WZmRtVnlhV1pwWldRaU9tWmhiSE5sTENKd2FXTjBkWEpsSWpvaWFIUjBjSE02THk5c2FETXVaMjl2WjJ4bGRYTmxjbU52Ym5SbGJuUXVZMjl0TDJFdlFVTm5PRzlqU3kxa01XdHRRekJJUzFsb2RsVm5aWEJUU2xkdWVGZGxSRVpzYzFscFFtdE9jMWhQYlZGVmNVMHhWVTB0UnpacVRUMXpPVFl0WXlJc0luQnliM1pwWkdWeVgybGtJam9pTVRBNU9EUTNNVEl5TmpJd016QTFOemsyT1RrMklpd2ljM1ZpSWpvaU1UQTVPRFEzTVRJeU5qSXdNekExTnprMk9UazJJbjBzSW5KdmJHVWlPaUpoZFhSb1pXNTBhV05oZEdWa0lpd2lZV0ZzSWpvaVlXRnNNU0lzSW1GdGNpSTZXM3NpYldWMGFHOWtJam9pYjJGMWRHZ2lMQ0owYVcxbGMzUmhiWEFpT2pFM05qSXhOams0TmpGOVhTd2ljMlZ6YzJsdmJsOXBaQ0k2SW1KbFpHTTJZbVkyTFRRNFkyVXROREkwWXkxaE1XSmxMVGt5TXpkaFpHUTBOekJoWkNJc0ltbHpYMkZ1YjI1NWJXOTFjeUk2Wm1Gc2MyVjkuaGtIM1R6VUpMMEd1NVNBZHVRd0FUQmM4LUlPcDNENFIwdVp3YUFhU19SNCIsInRva2VuX3R5cGUiOiJiZWFyZXIiLCJleHBpcmVzX2luIjozNjAwLCJleHBpcmVzX2F0IjoxNzYyMTczNDYxLCJyZWZyZXNoX3Rva2VuIjoiczd6aDJxYmp3b3k2IiwidXNlciI6eyJpZCI6IjcxYjk1ZjgwLTJlMGYtNDg4Mi04YTUzLWI4ZWZmYTlhMWI2NSIsImF1ZCI6ImF1dGhlbnRpY2F0ZWQiLCJyb2xlIjoiYXV0aGVudGljYXRlZCIsImVtYWlsIjoiY29zbWljY3JlYXRpb24xMDZAZ21haWwuY29tIiwiZW1haWxfY29uZmlybWVkX2F0IjoiMjAyNS0xMS0wM1QxMTozNzozOS45NjQwN1oiLCJwaG9uZSI6IiIsImNvbmZpcm1lZF9hdCI6IjIwMjUtMTEtMDNUMTE6Mzc6MzkuOTY0MDdaIiwibGFzdF9zaWduX2luX2F0IjoiMjAyNS0xMS0wM1QxMTozNzo0MS44MTk0OTQzNTJaIiwiYXBwX21ldGFkYXRhIjp7InByb3ZpZGVyIjoiZ29vZ2xlIiwicHJvdmlkZXJzIjpbImdvb2dsZSJdfSwidXNlcl9tZXRhZGF0YSI6eyJhdmF0YXJfdXJsIjoiaHR0cHM6Ly9saDMuZ29vZ2xldXNlcmNvbnRlbnQuY29tL2EvQUNnOG9jSy1kMWttQzBIS1lodlVnZXBTSldueFdlREZsc1lpQmtOc1hPbVFVcU0xVU0tRzZqTT1zOTYtYyIsImVtYWlsIjoiY29zbWljY3JlYXRpb24xMDZAZ21haWwuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImZ1bGxfbmFtZSI6IkNvc21pYyBDcmVhdGlvbnMiLCJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJuYW1lIjoiQ29zbWljIENyZWF0aW9ucyIsInBob25lX3ZlcmlmaWVkIjpmYWxzZSwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hL0FDZzhvY0stZDFrbUMwSEtZaHZVZ2VwU0pXbnhXZURGbHNZaUJrTnNYT21RVXFNMVVNLUc2ak09czk2LWMiLCJwcm92aWRlcl9pZCI6IjEwOTg0NzEyMjYyMDMwNTc5Njk5NiIsInN1YiI6IjEwOTg0NzEyMjYyMDMwNTc5Njk5NiJ9LCJpZGVudGl0aWVzIjpbeyJpZGVudGl0eV9pZCI6ImYwOGI4NDBkLTdhNWEtNDdmNy1iMzBmLTI1MjFhNWJjM2IyMSIsImlkIjoiMTA5ODQ3MTIyNjIwMzA1Nzk2OTk2Iiwid; sb-spjawbfpwezjfmicopsl-auth-token.1=XNlcl9pZCI6IjcxYjk1ZjgwLTJlMGYtNDg4Mi04YTUzLWI4ZWZmYTlhMWI2NSIsImlkZW50aXR5X2RhdGEiOnsiYXZhdGFyX3VybCI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hL0FDZzhvY0stZDFrbUMwSEtZaHZVZ2VwU0pXbnhXZURGbHNZaUJrTnNYT21RVXFNMVVNLUc2ak09czk2LWMiLCJlbWFpbCI6ImNvc21pY2NyZWF0aW9uMTA2QGdtYWlsLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJmdWxsX25hbWUiOiJDb3NtaWMgQ3JlYXRpb25zIiwiaXNzIjoiaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tIiwibmFtZSI6IkNvc21pYyBDcmVhdGlvbnMiLCJwaG9uZV92ZXJpZmllZCI6ZmFsc2UsInBpY3R1cmUiOiJodHRwczovL2xoMy5nb29nbGV1c2VyY29udGVudC5jb20vYS9BQ2c4b2NLLWQxa21DMEhLWWh2VWdlcFNKV254V2VERmxzWWlCa05zWE9tUVVxTTFVTS1HNmpNPXM5Ni1jIiwicHJvdmlkZXJfaWQiOiIxMDk4NDcxMjI2MjAzMDU3OTY5OTYiLCJzdWIiOiIxMDk4NDcxMjI2MjAzMDU3OTY5OTYifSwicHJvdmlkZXIiOiJnb29nbGUiLCJsYXN0X3NpZ25faW5fYXQiOiIyMDI1LTExLTAzVDExOjM3OjM5Ljk1NTcwOFoiLCJjcmVhdGVkX2F0IjoiMjAyNS0xMS0wM1QxMTozNzozOS45NTU3NTZaIiwidXBkYXRlZF9hdCI6IjIwMjUtMTEtMDNUMTE6Mzc6MzkuOTU1NzU2WiIsImVtYWlsIjoiY29zbWljY3JlYXRpb24xMDZAZ21haWwuY29tIn1dLCJjcmVhdGVkX2F0IjoiMjAyNS0xMS0wM1QxMTozNzozOS45NTEzNjZaIiwidXBkYXRlZF9hdCI6IjIwMjUtMTEtMDNUMTE6Mzc6NDEuODI0MzU1WiIsImlzX2Fub255bW91cyI6ZmFsc2V9LCJwcm92aWRlcl90b2tlbiI6InlhMjkuYTBBVGk2SzJ0NnpLV1dqRDdQdi1HNDU3TlpFWElja2Y1WHN4SFlheUR6cms5TzZiSHVRY0xwT3dvZFpzWE9pSXEyMC1NdzViRm9CNE9RcGJ4LUV3eHhMM3BfT1kxZ2liQ3dTU3ExU2FsYndmbW8zN3ZlUjVDSTJiQVhlM3E3d1NnX3NCVnp5a1hoc3Z5blFnNHdKWVJqOXQ0NUZEQ1NEYmFNQmZkTVpmeWhITjJTYVByNkFzLUJjMUFuQXU1NEs2a1lQRWlvMGRNYUNnWUtBV2tTQVJVU0ZRSEdYMk1pVnM5a2N2NHBfZmxTcGthOE1rRkNLUTAyMDYifQ; ph_phc_WiMP1Rj0YvrdwYVYdE0AdRBNmB8MTdbsWY8oalxSrts_posthog=%7B%22distinct_id%22%3A%2271b95f80-2e0f-4882-8a53-b8effa9a1b65%22%2C%22%24sesid%22%3A%5B1762170188623%2C%22019a4981-a7e1-7778-9297-f569f82af9bb%22%2C1762169825241%5D%2C%22%24epp%22%3Atrue%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22https%3A%2F%2Fsearch.brave.com%2F%22%2C%22u%22%3A%22https%3A%2F%2Fsmithery.ai%2F%22%7D%7D')
SMITHERY_HEADERS = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.5",
"content-type": "application/json",
"origin": "https://smithery.ai",
"priority": "u=1, i",
"referer": "https://smithery.ai/chat?mcp=@LinkupPlatform/linkup-mcp-server",
"sec-ch-ua": '"Chromium";v="142", "Brave";v="142", "Not_A Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"sec-gpc": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
"Cookie": SMITHERY_COOKIE
}
class Message(BaseModel):
model_config = ConfigDict(extra="allow")
role: str
content: Union[str, List[Dict[str, Any]]]
name: Optional[str] = None
class ChatCompletionRequest(BaseModel):
model_config = ConfigDict(extra="allow")
model: Optional[str] = "claude-haiku-4.5"
messages: List[Message]
temperature: Optional[float] = None
max_tokens: Optional[int] = None
top_p: Optional[float] = None
n: Optional[int] = None
stream: Optional[bool] = False
stop: Optional[Union[str, List[str]]] = None
presence_penalty: Optional[float] = None
frequency_penalty: Optional[float] = None
logit_bias: Optional[Dict[str, float]] = None
user: Optional[str] = None
tools: Optional[List[Dict[str, Any]]] = None
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
response_format: Optional[Dict[str, Any]] = None
class Choice(BaseModel):
index: int
message: Message
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[Choice]
usage: Usage
def convert_to_smithery_format(messages: List[Message]) -> List[Dict]:
smithery_messages = []
for msg in messages:
role = "user" if msg.role == "user" else "assistant"
if isinstance(msg.content, str):
content = msg.content
elif isinstance(msg.content, list):
content = " ".join([part.get("text", "") for part in msg.content if isinstance(part, dict) and part.get("type") == "text"])
else:
content = str(msg.content)
smithery_msg = {
"parts": [{"type": "text", "text": content}],
"id": str(uuid.uuid4()).replace("-", "")[:16],
"role": role
}
smithery_messages.append(smithery_msg)
return smithery_messages
def convert_from_smithery_format(smithery_response: str) -> str:
return smithery_response
async def stream_smithery_response(messages: List[Message], model: str, tools: Optional[List] = None):
smithery_messages = convert_to_smithery_format(messages)
backend_model = get_backend_model(model)
payload = {
"messages": smithery_messages,
"tools": [],
"model": backend_model,
"systemPrompt": "You are a helpful assistant.",
"chatId": "default"
}
chat_id = f"chatcmpl-{uuid.uuid4()}"
created = int(time.time())
first_chunk = True
logger.info(f"Streaming request for model: {model} -> {backend_model}")
try:
timeout = httpx.Timeout(120.0, connect=30.0)
async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
async with client.stream(
"POST",
SMITHERY_API_URL,
headers=SMITHERY_HEADERS,
json=payload
) as response:
logger.info(f"Backend response status: {response.status_code}")
if response.status_code != 200:
error_text = await response.aread()
error_msg = error_text.decode()
logger.error(f"Backend error: {error_msg}")
error_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {"role": "assistant", "content": f"Error: {error_msg}"},
"finish_reason": None
}]
}
yield f"data: {json.dumps(error_chunk)}\n\n"
yield "data: [DONE]\n\n"
return
chunk_count = 0
async for line in response.aiter_lines():
if line.startswith("data: "):
data_str = line[6:]
if data_str == "[DONE]":
logger.info(f"Stream completed, sent {chunk_count} chunks")
final_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {},
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(final_chunk)}\n\n"
yield "data: [DONE]\n\n"
break
try:
data = json.loads(data_str)
if data.get("type") == "text-delta":
delta_text = data.get("delta", "")
chunk_count += 1
if first_chunk:
chunk_data = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {"role": "assistant", "content": delta_text},
"finish_reason": None
}]
}
first_chunk = False
else:
chunk_data = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {"content": delta_text},
"finish_reason": None
}]
}
yield f"data: {json.dumps(chunk_data)}\n\n"
except Exception as parse_error:
logger.warning(f"Failed to parse chunk: {parse_error}")
pass
except httpx.TimeoutException as e:
logger.error(f"Timeout error: {str(e)}")
error_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {"role": "assistant", "content": "Error: Request timeout. The backend took too long to respond."},
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(error_chunk)}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Stream error: {str(e)}")
error_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": {"role": "assistant", "content": f"Error: {str(e)}"},
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(error_chunk)}\n\n"
yield "data: [DONE]\n\n"
async def get_smithery_response(messages: List[Message], model: str, tools: Optional[List] = None) -> str:
smithery_messages = convert_to_smithery_format(messages)
backend_model = get_backend_model(model)
payload = {
"messages": smithery_messages,
"tools": [],
"model": backend_model,
"systemPrompt": "You are a helpful assistant.",
"chatId": "default"
}
try:
async with httpx.AsyncClient(timeout=60.0, follow_redirects=True) as client:
response = await client.post(
SMITHERY_API_URL,
headers=SMITHERY_HEADERS,
json=payload
)
if response.status_code != 200:
error_detail = f"Smithery API returned {response.status_code}: {response.text}"
return error_detail
return response.text
except Exception as e:
return f"Error calling Smithery API: {str(e)}"
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
try:
logger.info(f"Chat request: model={request.model}, stream={request.stream}, messages={len(request.messages)}")
if request.stream:
return StreamingResponse(
stream_smithery_response(request.messages, request.model, request.tools),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
)
else:
content = await get_smithery_response(request.messages, request.model, request.tools)
response = ChatCompletionResponse(
id=f"chatcmpl-{uuid.uuid4()}",
created=int(time.time()),
model=request.model,
choices=[
Choice(
index=0,
message=Message(role="assistant", content=content),
finish_reason="stop"
)
],
usage=Usage(
prompt_tokens=len(str(request.messages)),
completion_tokens=len(content),
total_tokens=len(str(request.messages)) + len(content)
)
)
logger.info(f"Non-streaming response completed, content length: {len(content)}")
return response
except httpx.TimeoutException as e:
logger.error(f"Timeout in chat_completions: {str(e)}")
raise HTTPException(status_code=504, detail="Gateway timeout - backend took too long to respond")
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error in chat_completions: {e.response.status_code}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except Exception as e:
logger.error(f"Error in chat_completions: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/v1/models")
async def list_models():
models_list = []
for model_id in MODEL_MAPPING.keys():
models_list.append({
"id": model_id,
"object": "model",
"created": int(time.time()),
"owned_by": "smithery-ai"
})
return {
"object": "list",
"data": models_list
}
@app.get("/")
async def root():
return {
"message": "Smithery AI Reverse API",
"version": "1.0.0",
"status": "online",
"endpoints": {
"chat_completions": "/v1/chat/completions",
"models": "/v1/models"
},
"docs": "/docs"
}
@app.get("/health")
async def health():
return {"status": "healthy", "service": "smithery-ai-reverse-api"}
@app.get("/test-backend")
async def test_backend():
"""Test if backend is accessible"""
try:
test_messages = [{"parts": [{"type": "text", "text": "hi"}], "id": "test123", "role": "user"}]
payload = {
"messages": test_messages,
"tools": [],
"model": "claude-haiku-4.5",
"systemPrompt": "You are a helpful assistant.",
"chatId": "test"
}
timeout = httpx.Timeout(30.0, connect=10.0)
async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
response = await client.post(
SMITHERY_API_URL,
headers=SMITHERY_HEADERS,
json=payload
)
return {
"backend_accessible": True,
"status_code": response.status_code,
"response_length": len(response.text),
"smithery_url": SMITHERY_API_URL
}
except Exception as e:
logger.error(f"Backend test failed: {str(e)}")
return {
"backend_accessible": False,
"error": str(e),
"smithery_url": SMITHERY_API_URL
}
if __name__ == "__main__":
import uvicorn
import os
port = int(os.getenv("PORT", 7860))
uvicorn.run(app, host="0.0.0.0", port=port)