langchain-server / app /server.py
lucianotonet's picture
Teste
a16d5a9
raw
history blame
966 Bytes
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langserve import add_routes
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv()
import os
app_url = os.getenv('PROXY_URL')
app = FastAPI(
title="Mav AI API",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
openapi_url="/openapi.json"
)
@app.get("/")
async def root():
# return json response
return {"message": "Mav AI API is running"}
add_routes(
path = "/openai",
app = app,
runnable= ChatOpenAI(model="gpt-4-1106-preview"),
)
add_routes(
path = "/google",
app = app,
runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=7860, root_path=app_url)