Jawad138 commited on
Commit
b5afa6f
·
1 Parent(s): ed86b74

update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -13,8 +13,6 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
13
  import os
14
  from dotenv import load_dotenv
15
  import tempfile
16
- import os
17
- os.environ["REPLICATE_API_TOKEN"] = "r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl"
18
 
19
  load_dotenv()
20
 
@@ -53,15 +51,13 @@ def display_chat_history(chain):
53
 
54
  def create_conversational_chain(vector_store):
55
  load_dotenv()
56
- replicate_api_token = "r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl" # Replace with your actual token
57
- os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
58
 
59
  llm = Replicate(
60
  streaming=True,
61
  model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
62
  callbacks=[StreamingStdOutCallbackHandler()],
63
  input={"temperature": 0.01, "max_length": 500, "top_p": 1},
64
- replicate_api_token=replicate_api_token
65
  )
66
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
67
 
 
13
  import os
14
  from dotenv import load_dotenv
15
  import tempfile
 
 
16
 
17
  load_dotenv()
18
 
 
51
 
52
  def create_conversational_chain(vector_store):
53
  load_dotenv()
 
 
54
 
55
  llm = Replicate(
56
  streaming=True,
57
  model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
58
  callbacks=[StreamingStdOutCallbackHandler()],
59
  input={"temperature": 0.01, "max_length": 500, "top_p": 1},
60
+ replicate_api_token=os.environ.get("REPLICATE_API_TOKEN")
61
  )
62
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
63