langchain-server / app /server.py
lucianotonet's picture
First
a0d84aa
raw
history blame
772 Bytes
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from langserve import add_routes
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv()
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
)
add_routes(
path = "/openai",
app = app,
runnable= ChatOpenAI(model="gpt-4-1106-preview"),
)
add_routes(
path = "/google",
app = app,
runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=7860)