langchain-server / app /server.py
lucianotonet's picture
Udpate
b57470a
raw
history blame
1.11 kB
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langserve import add_routes
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv()
import os
api_url = os.getenv('PROXY_URL') + os.getenv('PROXY_PREFIX')
app = FastAPI(
title="Mav AI API",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
root_path_in_servers=True,
root_path=api_url,
debug=True,
)
@app.get("/")
async def root():
# return json response
return {"message": "MAV API is up and running!"}
add_routes(
path = "/openai",
app = app,
runnable= ChatOpenAI(model="gpt-4-1106-preview"),
disabled_endpoints=["playground"]
)
add_routes(
path = "/google",
app = app,
runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
disabled_endpoints=["playground"]
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=7860, root_path=api_url)