JarvisLabs commited on
Commit
4d7c224
1 Parent(s): e38ef53

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +14 -0
  2. langchain_bot.py +39 -0
  3. requirements.txt +0 -0
  4. system.json +18 -0
app.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import os
4
+ from langchain_bot import chain
5
+
6
+ chat_history=[]
7
+ def chat_messages(prompt,chat_history):
8
+ response = chain.invoke({"question":prompt})
9
+ return response['answer']
10
+
11
+ gr.ChatInterface(
12
+ chat_messages
13
+ ).launch(debug=True)
14
+
langchain_bot.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from langchain_openai import ChatOpenAI
4
+ from langchain_core.prompts import MessagesPlaceholder
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+ from langchain_core.runnables import RunnablePassthrough
7
+ from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
8
+ from langchain.memory.buffer_window import ConversationBufferWindowMemory
9
+ from langchain_core.prompts import PromptTemplate
10
+ from langchain.memory import VectorStoreRetrieverMemo
11
+ ### Contextualize question ###
12
+ from langchain.chains import create_history_aware_retriever, create_retrieval_chain
13
+
14
+
15
+ settings= json.load(open("system.json","r"))
16
+
17
+
18
+ from upstash_vector import Index
19
+ from langchain_community.vectorstores.upstash import UpstashVectorStore
20
+ index = Index(os.environ["UPSTASH_VECTOR_REST_URL"],os.environ["UPSTASH_VECTOR_REST_TOKEN"])
21
+ vectorStore = UpstashVectorStore(
22
+ embedding=True, index=index,
23
+ )
24
+ retriever = vectorStore.as_retriever(search_kwargs={"k": settings["k"]})
25
+ #LLM setup
26
+ LLM= ChatOpenAI(model=settings["model"], temperature=settings["temp"])
27
+
28
+ #Setup prompt template
29
+ QUESTION_PROMPT = PromptTemplate(
30
+ template=settings["prompt_temp"], # プロンプトテンプレートをセット
31
+ input_variables=["context", "question"] # プロンプトに挿入する変数
32
+ )
33
+ # Conversation memory
34
+ memory = ConversationBufferWindowMemory(
35
+ memory_key=settings["MEMORY_KEY"], # Memory key メモリーのキー名
36
+ output_key="answer", #output key 出力ののキー名
37
+ k=8, #saved conversation number 保持する会話の履歴数
38
+ return_messages=True, #get chat list チャット履歴をlistで取得する場合はTrue
39
+ )
requirements.txt ADDED
File without changes
system.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "model":"gpt-3.5-turbo-1106",
3
+ "temp":0.2,
4
+ "MEMORY_KEY":"chat_history",
5
+ "k":20,
6
+ "prompt_temp":"""
7
+ You are an AI Chatbot from precious Plastic your job is to answer Question about recycling plastic.
8
+ You can return links to in the answer as well as image if you want
9
+ Us the following context to help in answering the Question.
10
+
11
+ ------
12
+ {context}
13
+ ------
14
+ Question: {question}
15
+ Do not:
16
+ ・ Do not make thing up that you do not know, if you dont know, say that you dont know \
17
+ """
18
+ }]