Spaces:
Runtime error
Runtime error
Update logging
Browse files- app.py +4 -0
- nim_gpt_functions.py +1 -1
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from nim_game_env import NimGameEnv
|
3 |
from nim_gpt_functions import plan_move, execute_move
|
4 |
|
@@ -55,6 +56,9 @@ def send_chat_msg(inp, chat_history, nim_game_env, temperature, openai_api_key):
|
|
55 |
text_obs, observation, reward, done, info = execute_move(output, nim_game_env, openai_api_key)
|
56 |
ascii_art = generate_game_state_ascii_art(observation, done, reward, AI_STR)
|
57 |
|
|
|
|
|
|
|
58 |
chat_history.append((HUMAN_STR + ": " + inp, AI_STR + ": " + output))
|
59 |
return chat_history, chat_history, ascii_art
|
60 |
|
|
|
1 |
import gradio as gr
|
2 |
+
import datetime
|
3 |
from nim_game_env import NimGameEnv
|
4 |
from nim_gpt_functions import plan_move, execute_move
|
5 |
|
|
|
56 |
text_obs, observation, reward, done, info = execute_move(output, nim_game_env, openai_api_key)
|
57 |
ascii_art = generate_game_state_ascii_art(observation, done, reward, AI_STR)
|
58 |
|
59 |
+
print("\n==== date/time: " + str(datetime.datetime.now() - datetime.timedelta(hours=5)) + " ====")
|
60 |
+
print("inp: " + inp, ", output: ", output, ", observation: ", observation)
|
61 |
+
|
62 |
chat_history.append((HUMAN_STR + ": " + inp, AI_STR + ": " + output))
|
63 |
return chat_history, chat_history, ascii_art
|
64 |
|
nim_gpt_functions.py
CHANGED
@@ -53,7 +53,7 @@ def plan_move(text_game_state, temperature, api_key):
|
|
53 |
llm = OpenAI(model_name='text-davinci-003', temperature=temperature, max_tokens=100,
|
54 |
openai_api_key=api_key)
|
55 |
llm_chain = LLMChain(llm=llm, prompt=PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False)
|
56 |
-
planned_move = llm_chain.run({'text_game_state': text_game_state})
|
57 |
return planned_move
|
58 |
|
59 |
|
|
|
53 |
llm = OpenAI(model_name='text-davinci-003', temperature=temperature, max_tokens=100,
|
54 |
openai_api_key=api_key)
|
55 |
llm_chain = LLMChain(llm=llm, prompt=PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False)
|
56 |
+
planned_move = llm_chain.run({'text_game_state': text_game_state}).strip()
|
57 |
return planned_move
|
58 |
|
59 |
|