LangChainGo / llms_cache_option.py
lizhen30
adb llms cache
91e0a96
raw
history blame
137 Bytes
from langchain.llms import OpenAI
llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2, cache=False)
%%time
llm("Tell me a joke")