Spaces:
Sleeping
Sleeping
File size: 1,109 Bytes
a0d84aa 17427d1 6fca5d8 a0d84aa bf795ac a0d84aa b57470a a0d84aa e3e96e3 6fca5d8 e3e96e3 a0d84aa b57470a a0d84aa b57470a a0d84aa b57470a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langserve import add_routes
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv()
import os
api_url = os.getenv('PROXY_URL') + os.getenv('PROXY_PREFIX')
app = FastAPI(
title="Mav AI API",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
root_path_in_servers=True,
root_path=api_url,
debug=True,
)
@app.get("/")
async def root():
# return json response
return {"message": "MAV API is up and running!"}
add_routes(
path = "/openai",
app = app,
runnable= ChatOpenAI(model="gpt-4-1106-preview"),
disabled_endpoints=["playground"]
)
add_routes(
path = "/google",
app = app,
runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
disabled_endpoints=["playground"]
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=7860, root_path=api_url)
|