|
import openai |
|
import os |
|
import dotenv |
|
|
|
|
|
dotenv.load_dotenv('.env') |
|
openai.api_key = os.getenv('OPENAI_API_KEY') |
|
llm_model = os.getenv('LLM_MODEL') |
|
|
|
|
|
def call_gpt(prompt): |
|
max_retry = 5 |
|
retry = 0 |
|
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode() |
|
while True: |
|
try: |
|
response = openai.ChatCompletion.create( |
|
model=llm_model, |
|
temperature=0.9, |
|
messages=[ |
|
{"role": "user", "content": prompt} |
|
] |
|
) |
|
|
|
text = response.choices[0].message.content |
|
return response |
|
except Exception as oops: |
|
retry += 1 |
|
if retry >= max_retry: |
|
return "GPT3 error: %s" % oops |
|
print('Error communicating with OpenAI:', oops) |
|
sleep(1) |
|
|
|
def generate_text(): |
|
prompt = open_file('prompt_response.txt') |
|
response = call_gpt(prompt) |
|
return response |
|
|