|
from fastapi import FastAPI, HTTPException, Header |
|
from fastapi.middleware.cors import CORSMiddleware |
|
from fastapi.responses import StreamingResponse |
|
from pydantic import BaseModel |
|
import openai |
|
from typing import List, Optional, Union |
|
import logging |
|
import httpx |
|
import uuid |
|
import time |
|
import json |
|
from datetime import datetime, timezone |
|
import requests |
|
import uvicorn |
|
import random |
|
|
|
logging.basicConfig( |
|
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
app = FastAPI() |
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["*"], |
|
allow_credentials=True, |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
) |
|
|
|
MAX_RETRIES = 3 |
|
|
|
class ChatRequest(BaseModel): |
|
messages: List[dict] |
|
model: str |
|
temperature: Optional[float] = 0.7 |
|
stream: Optional[bool] = False |
|
tools: Optional[List[dict]] = [] |
|
tool_choice: Optional[str] = "auto" |
|
|
|
class EmbeddingRequest(BaseModel): |
|
input: Union[str, List[str]] |
|
model: str |
|
encoding_format: Optional[str] = "float" |
|
|
|
async def verify_authorization(authorization: str = Header(None)): |
|
print("Authorization header:", authorization) |
|
if not authorization: |
|
logger.error("Missing Authorization header") |
|
raise HTTPException(status_code=401, detail="Missing Authorization header") |
|
if not authorization.startswith("Bearer "): |
|
logger.error("Invalid Authorization header format") |
|
raise HTTPException( |
|
status_code=401, detail="Invalid Authorization header format" |
|
) |
|
token = authorization.replace("Bearer ", "") |
|
return token |
|
|
|
def get_openai_models(api_keys): |
|
api_key = random.choice(api_keys) |
|
try: |
|
client = openai.OpenAI(api_key=api_key) |
|
models = client.models.list() |
|
return models.model_dump() |
|
except Exception as e: |
|
logger.error(f"Error getting models from OpenAI with key {api_key}: {e}") |
|
return {"error": str(e)} |
|
|
|
def get_gemini_models(api_keys): |
|
api_key = random.choice(api_keys) |
|
base_url = "https://generativelanguage.googleapis.com/v1beta" |
|
url = f"{base_url}/models?key={api_key}" |
|
|
|
try: |
|
response = requests.get(url) |
|
if response.status_code == 200: |
|
gemini_models = response.json() |
|
return convert_to_openai_models_format(gemini_models) |
|
else: |
|
logger.error(f"Error getting models from Gemini with key {api_key}: {response.status_code} - {response.text}") |
|
return {"error": f"Gemini API error: {response.status_code} - {response.text}"} |
|
|
|
except requests.RequestException as e: |
|
logger.error(f"Request failed: {e}") |
|
return {"error": f"Request failed: {e}"} |
|
|
|
def convert_to_openai_models_format(gemini_models): |
|
openai_format = {"object": "list", "data": []} |
|
|
|
for model in gemini_models.get("models", []): |
|
openai_model = { |
|
"id": model["name"].split("/")[-1], |
|
"object": "model", |
|
"created": int(datetime.now(timezone.utc).timestamp()), |
|
"owned_by": "google", |
|
"permission": [], |
|
"root": model["name"], |
|
"parent": None, |
|
} |
|
openai_format["data"].append(openai_model) |
|
|
|
return openai_format |
|
|
|
def convert_messages_to_gemini_format(messages): |
|
gemini_messages = [] |
|
for msg in messages: |
|
role = "user" if msg["role"] == "user" else "model" |
|
parts = [] |
|
if isinstance(msg["content"], str): |
|
parts.append({"text": msg["content"]}) |
|
elif isinstance(msg["content"], list): |
|
for content in msg["content"]: |
|
if isinstance(content, str): |
|
parts.append({"text": content}) |
|
elif isinstance(content, dict) and content["type"] == "text": |
|
parts.append({"text": content["text"]}) |
|
elif isinstance(content, dict) and content["type"] == "image_url": |
|
image_url = content["image_url"]["url"] |
|
if image_url.startswith("data:image"): |
|
parts.append( |
|
{ |
|
"inline_data": { |
|
"mime_type": "image/jpeg", |
|
"data": image_url.split(",")[1], |
|
} |
|
} |
|
) |
|
else: |
|
parts.append( |
|
{ |
|
"image_url": { |
|
"url": image_url, |
|
} |
|
} |
|
) |
|
gemini_messages.append({"role": role, "parts": parts}) |
|
return gemini_messages |
|
|
|
async def convert_gemini_response_to_openai(response, model, stream=False): |
|
if stream: |
|
chunk = response |
|
if not chunk["candidates"]: |
|
return None |
|
|
|
return { |
|
"id": "chatcmpl-" + str(uuid.uuid4()), |
|
"object": "chat.completion.chunk", |
|
"created": int(time.time()), |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"delta": { |
|
"content": chunk["candidates"][0]["content"]["parts"][0]["text"] |
|
}, |
|
"finish_reason": None, |
|
} |
|
], |
|
} |
|
else: |
|
content = response["candidates"][0]["content"]["parts"][0]["text"] |
|
return { |
|
"id": "chatcmpl-" + str(uuid.uuid4()), |
|
"object": "chat.completion", |
|
"created": int(time.time()), |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"message": { |
|
"role": "assistant", |
|
"content": content, |
|
}, |
|
"finish_reason": "stop", |
|
} |
|
], |
|
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, |
|
} |
|
|
|
@app.get("/v1/models") |
|
@app.get("/hf/v1/models") |
|
async def list_models(authorization: str = Header(None)): |
|
token = await verify_authorization(authorization) |
|
api_keys = [key.strip() for key in token.split(',')] |
|
|
|
all_models = [] |
|
error_messages = [] |
|
|
|
for api_key in api_keys: |
|
if api_key.startswith("sk-"): |
|
response = get_openai_models([api_key]) |
|
else: |
|
response = get_gemini_models([api_key]) |
|
|
|
if "error" in response: |
|
error_messages.append(response["error"]) |
|
else: |
|
if isinstance(response, dict) and 'data' in response: |
|
all_models.extend(response['data']) |
|
else: |
|
logger.warning(f"Unexpected response format from model list API for key {api_key}: {response}") |
|
|
|
if error_messages and not all_models: |
|
raise HTTPException(status_code=500, detail=f"Errors encountered: {', '.join(error_messages)}") |
|
|
|
return {"data": all_models, "object": "list"} |
|
|
|
@app.post("/v1/chat/completions") |
|
@app.post("/hf/v1/chat/completions") |
|
async def chat_completion(request: ChatRequest, authorization: str = Header(None)): |
|
token = await verify_authorization(authorization) |
|
api_keys = [key.strip() for key in token.split(',')] |
|
logger.info(f"Chat completion request - Model: {request.model}") |
|
|
|
retries = 0 |
|
|
|
while retries < MAX_RETRIES: |
|
api_key = random.choice(api_keys) |
|
try: |
|
logger.info(f"Attempt {retries + 1} with API key: {api_key}") |
|
|
|
if api_key.startswith("sk-"): |
|
client = openai.OpenAI(api_key=api_key) |
|
|
|
if request.stream: |
|
logger.info("Streaming response enabled") |
|
|
|
async def generate(): |
|
try: |
|
stream_response = client.chat.completions.create( |
|
model=request.model, |
|
messages=request.messages, |
|
temperature=request.temperature, |
|
stream=True, |
|
) |
|
|
|
for chunk in stream_response: |
|
chunk_json = chunk.model_dump_json() |
|
yield f"data: {chunk_json}\n\n" |
|
yield "data: [DONE]\n\n" |
|
except Exception as e: |
|
logger.error(f"Stream error: {str(e)}") |
|
raise |
|
|
|
return StreamingResponse(content=generate(), media_type="text/event-stream") |
|
|
|
else: |
|
response = client.chat.completions.create( |
|
model=request.model, |
|
messages=request.messages, |
|
temperature=request.temperature, |
|
) |
|
logger.info("Chat completion successful") |
|
return response.model_dump() |
|
else: |
|
gemini_messages = convert_messages_to_gemini_format(request.messages) |
|
payload = { |
|
"contents": gemini_messages, |
|
"generationConfig": { |
|
"temperature": request.temperature, |
|
} |
|
} |
|
|
|
if request.stream: |
|
logger.info("Streaming response enabled") |
|
|
|
async def generate(): |
|
nonlocal api_key, retries, api_keys |
|
|
|
while retries < MAX_RETRIES: |
|
try: |
|
async with httpx.AsyncClient() as client: |
|
stream_url = f"https://generativelanguage.googleapis.com/v1beta/models/{request.model}:streamGenerateContent?alt=sse&key={api_key}" |
|
async with client.stream("POST", stream_url, json=payload, timeout=60.0) as response: |
|
if response.status_code == 429: |
|
logger.warning(f"Rate limit reached for key: {api_key}") |
|
retries += 1 |
|
if retries >= MAX_RETRIES: |
|
yield f"data: {json.dumps({'error': 'Max retries reached'})}\n\n" |
|
break |
|
|
|
api_keys.remove(api_key) |
|
if not api_keys: |
|
yield f"data: {json.dumps({'error': 'All API keys exhausted'})}\n\n" |
|
break |
|
|
|
api_key = random.choice(api_keys) |
|
logger.info(f"Retrying with a new API key: {api_key}") |
|
continue |
|
|
|
if response.status_code != 200: |
|
logger.error(f"Error in streaming response with key {api_key}: {response.status_code} - {response.text}") |
|
|
|
retries += 1 |
|
if retries >= MAX_RETRIES: |
|
yield f"data: {json.dumps({'error': 'Max retries reached'})}\n\n" |
|
break |
|
|
|
api_keys.remove(api_key) |
|
if not api_keys: |
|
yield f"data: {json.dumps({'error': 'All API keys exhausted'})}\n\n" |
|
break |
|
|
|
api_key = random.choice(api_keys) |
|
logger.info(f"Retrying with a new API key: {api_key}") |
|
continue |
|
|
|
async for line in response.aiter_lines(): |
|
if line.startswith("data: "): |
|
try: |
|
chunk = json.loads(line[6:]) |
|
if not chunk.get("candidates"): |
|
continue |
|
|
|
content = chunk["candidates"][0]["content"]["parts"][0]["text"] |
|
|
|
new_chunk = { |
|
"id": "chatcmpl-" + str(uuid.uuid4()), |
|
"object": "chat.completion.chunk", |
|
"created": int(time.time()), |
|
"model": request.model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"delta": { |
|
"content": content |
|
}, |
|
"finish_reason": None, |
|
} |
|
], |
|
} |
|
yield f"data: {json.dumps(new_chunk)}\n\n" |
|
|
|
except json.JSONDecodeError: |
|
continue |
|
yield "data: [DONE]\n\n" |
|
return |
|
except Exception as e: |
|
logger.error(f"Stream error: {str(e)}") |
|
retries += 1 |
|
if retries >= MAX_RETRIES: |
|
yield f"data: {json.dumps({'error': 'Max retries reached'})}\n\n" |
|
break |
|
|
|
api_keys.remove(api_key) |
|
if not api_keys: |
|
yield f"data: {json.dumps({'error': 'All API keys exhausted'})}\n\n" |
|
break |
|
|
|
api_key = random.choice(api_keys) |
|
logger.info(f"Retrying with a new API key: {api_key}") |
|
continue |
|
|
|
return StreamingResponse(content=generate(), media_type="text/event-stream") |
|
else: |
|
async with httpx.AsyncClient() as client: |
|
non_stream_url = f"https://generativelanguage.googleapis.com/v1beta/models/{request.model}:generateContent?key={api_key}" |
|
response = await client.post(non_stream_url, json=payload) |
|
|
|
if response.status_code != 200: |
|
logger.error(f"Error in non-streaming response with key {api_key}: {response.status_code} - {response.text}") |
|
|
|
retries += 1 |
|
if retries >= MAX_RETRIES: |
|
raise HTTPException(status_code=500, detail="Max retries reached") |
|
|
|
api_keys.remove(api_key) |
|
if not api_keys: |
|
raise HTTPException(status_code=500, detail="All API keys exhausted") |
|
|
|
api_key = random.choice(api_keys) |
|
logger.info(f"Retrying with a new API key: {api_key}") |
|
continue |
|
|
|
gemini_response = response.json() |
|
logger.info("Chat completion successful") |
|
return await convert_gemini_response_to_openai(gemini_response, request.model) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in chat completion: {str(e)}") |
|
if isinstance(e, HTTPException): |
|
raise e |
|
|
|
retries += 1 |
|
if retries >= MAX_RETRIES: |
|
logger.error("Max retries reached, giving up") |
|
raise HTTPException(status_code=500, detail="Max retries reached") |
|
|
|
api_keys.remove(api_key) |
|
if not api_keys: |
|
raise HTTPException(status_code=500, detail="All API keys exhausted") |
|
|
|
api_key = random.choice(api_keys) |
|
logger.info(f"Retrying with a new API key: {api_key}") |
|
continue |
|
|
|
raise HTTPException(status_code=500, detail="Unexpected error in chat completion") |
|
|
|
|
|
@app.get("/health") |
|
@app.get("/") |
|
async def health_check(): |
|
logger.info("Health check endpoint called") |
|
return {"status": "healthy"} |
|
|
|
if __name__ == "__main__": |
|
uvicorn.run(app, host="0.0.0.0", port=8080) |