langchain-server / app /server.py
lucianotonet's picture
Added root route that returns server status message
e3e96e3
raw
history blame
886 Bytes
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langserve import add_routes
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv()
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
)
@app.get("/")
async def root():
# return json response
return {"message": "LangChain Server is running"}
add_routes(
path = "/openai",
app = app,
runnable= ChatOpenAI(model="gpt-4-1106-preview"),
)
add_routes(
path = "/google",
app = app,
runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=7860)