jonpreamble commited on
Commit
1c3ae40
·
1 Parent(s): d9abdb6

Fix dependency issues and switch to use OpenAI's current engine & API

Browse files
Files changed (3) hide show
  1. app.py +14 -11
  2. decider_utils.py +18 -11
  3. requirements.txt +3 -2
app.py CHANGED
@@ -1,7 +1,7 @@
1
  # formatted with python black, line length 200
2
 
3
  import os, random
4
- import openai
5
  import gradio as gr
6
  from game_content import (
7
  INITIAL_WELCOME_TEXT,
@@ -27,27 +27,33 @@ N_COMPLETIONS_WHEN_ELABORATING = 1 # I previously had this set to 3, but that m
27
  MINIMUM_COMPLETION_LENGTH_CHARS_WHEN_ELABORATING = 7
28
 
29
 
 
 
 
 
 
30
  def elaborate(
31
  str_beginning,
32
  prevent_user_from_reaching_home=True,
33
  require_user_to_be_still_engaged_with_bandits=False,
34
  ):
 
35
 
36
  longest_completion = ""
37
 
38
  while len(longest_completion) < MINIMUM_COMPLETION_LENGTH_CHARS_WHEN_ELABORATING:
39
- completions = openai.Completion.create(
40
- engine="text-davinci-003",
41
- prompt=str_beginning,
42
  temperature=0.5,
43
  max_tokens=4000 - int(len(str_beginning) / 4),
44
  frequency_penalty=0.8,
45
  presence_penalty=0.6,
46
  n=N_COMPLETIONS_WHEN_ELABORATING,
47
- )["choices"]
48
 
49
  for i in range(0, N_COMPLETIONS_WHEN_ELABORATING):
50
- completion = completions[i]["text"]
51
  # debug_print(completion)
52
 
53
  allowed = True
@@ -149,9 +155,6 @@ def run_1_game_turn(s_narr_transcript, s_n_turns_elapsed, s_user_transcript, s_u
149
  return [s_narr_transcript, s_n_turns_elapsed, s_user_transcript, s_user_input]
150
 
151
 
152
- openai.organization = os.environ.get("OPENAI_ORGANIZATION")
153
- openai.api_key = os.environ.get("OPENAI_KEY")
154
-
155
 
156
  demo = gr.Blocks()
157
 
@@ -176,8 +179,8 @@ with demo:
176
  fn=run_1_game_turn, inputs=[gr_narr_transcript, gr_n_turns_elapsed, gr_user_transcript, gr_user_input], outputs=[gr_narr_transcript, gr_n_turns_elapsed, gr_user_transcript, gr_user_input]
177
  )
178
 
179
- # See https://discuss.huggingface.co/t/gradio-html-component-with-javascript-code-dont-work/37316/2
180
- demo.load(None, None, None, _js=PAGE_STYLING_JS)
181
 
182
 
183
  demo.launch()
 
1
  # formatted with python black, line length 200
2
 
3
  import os, random
4
+ from openai import OpenAI
5
  import gradio as gr
6
  from game_content import (
7
  INITIAL_WELCOME_TEXT,
 
27
  MINIMUM_COMPLETION_LENGTH_CHARS_WHEN_ELABORATING = 7
28
 
29
 
30
+ openai_client = OpenAI()
31
+ openai_client.organization = os.environ.get("OPENAI_ORGANIZATION")
32
+ openai_client.api_key = os.environ.get("OPENAI_KEY")
33
+
34
+
35
  def elaborate(
36
  str_beginning,
37
  prevent_user_from_reaching_home=True,
38
  require_user_to_be_still_engaged_with_bandits=False,
39
  ):
40
+ global openai_client
41
 
42
  longest_completion = ""
43
 
44
  while len(longest_completion) < MINIMUM_COMPLETION_LENGTH_CHARS_WHEN_ELABORATING:
45
+ completions = openai_client.chat.completions.create(
46
+ model="gpt-4-turbo",
47
+ messages=[{"role": "system", "content": "Continue the story that begins in the next message."}, {"role": "assistant", "content": str_beginning}],
48
  temperature=0.5,
49
  max_tokens=4000 - int(len(str_beginning) / 4),
50
  frequency_penalty=0.8,
51
  presence_penalty=0.6,
52
  n=N_COMPLETIONS_WHEN_ELABORATING,
53
+ ).choices
54
 
55
  for i in range(0, N_COMPLETIONS_WHEN_ELABORATING):
56
+ completion = completions[i].message.content
57
  # debug_print(completion)
58
 
59
  allowed = True
 
155
  return [s_narr_transcript, s_n_turns_elapsed, s_user_transcript, s_user_input]
156
 
157
 
 
 
 
158
 
159
  demo = gr.Blocks()
160
 
 
179
  fn=run_1_game_turn, inputs=[gr_narr_transcript, gr_n_turns_elapsed, gr_user_transcript, gr_user_input], outputs=[gr_narr_transcript, gr_n_turns_elapsed, gr_user_transcript, gr_user_input]
180
  )
181
 
182
+
183
+ demo.load(None, None, None, js=PAGE_STYLING_JS)
184
 
185
 
186
  demo.launch()
decider_utils.py CHANGED
@@ -1,4 +1,5 @@
1
- import openai
 
2
  import decider_questions
3
 
4
  YES = True
@@ -6,24 +7,29 @@ NO = False
6
 
7
  g_decider_utils_dbg_printing = False
8
 
 
 
 
 
9
 
10
  def yesno(question, text, default):
11
  global g_decider_utils_dbg_printing
 
12
 
13
  prompt = text + "\n\n" + question
14
 
15
  if g_decider_utils_dbg_printing:
16
  print(prompt)
17
 
18
- hopefully_word_yes_or_no = openai.Completion.create(
19
- engine="text-davinci-002",
20
- prompt=prompt,
21
  temperature=0,
22
- max_tokens=20, # At first I tried max_tokens = 1 or 2, but the davinci-002 model produced zero output (immediate stop) unless I increased max_token to around 20
23
  frequency_penalty=0,
24
  presence_penalty=0,
25
  n=1,
26
- )["choices"][0]["text"]
27
 
28
  if g_decider_utils_dbg_printing:
29
  print(hopefully_word_yes_or_no)
@@ -45,21 +51,22 @@ def yesno(question, text, default):
45
 
46
  def number(question, text, default=-1, maximum=6):
47
  global g_decider_utils_dbg_printing
 
48
 
49
  prompt = text + "\n\n" + question
50
 
51
  if g_decider_utils_dbg_printing:
52
  print(prompt)
53
 
54
- hopefully_number = openai.Completion.create(
55
- engine="text-davinci-002",
56
- prompt=prompt,
57
  temperature=0,
58
- max_tokens=20, # At first I tried max_tokens = 1 or 2, but the davinci-002 model produced zero output (immediate stop) unless I increased max_token to around 20
59
  frequency_penalty=0,
60
  presence_penalty=0,
61
  n=1,
62
- )["choices"][0]["text"]
63
 
64
  if g_decider_utils_dbg_printing:
65
  print(hopefully_number)
 
1
+ import os
2
+ from openai import OpenAI
3
  import decider_questions
4
 
5
  YES = True
 
7
 
8
  g_decider_utils_dbg_printing = False
9
 
10
+ openai_client = OpenAI()
11
+ openai_client.organization = os.environ.get("OPENAI_ORGANIZATION")
12
+ openai_client.api_key = os.environ.get("OPENAI_KEY")
13
+
14
 
15
  def yesno(question, text, default):
16
  global g_decider_utils_dbg_printing
17
+ global openai_client
18
 
19
  prompt = text + "\n\n" + question
20
 
21
  if g_decider_utils_dbg_printing:
22
  print(prompt)
23
 
24
+ hopefully_word_yes_or_no = openai_client.chat.completions.create(
25
+ model="gpt-4-turbo",
26
+ messages=[{"role": "system", "content": prompt}],
27
  temperature=0,
28
+ max_tokens=20, # A while ago when testing with gpt-4-turbo, at first I tried max_tokens = 1 or 2, but the davinci-002 model produced zero output (immediate stop) unless I increased max_token to around 20
29
  frequency_penalty=0,
30
  presence_penalty=0,
31
  n=1,
32
+ ).choices[0].message.content
33
 
34
  if g_decider_utils_dbg_printing:
35
  print(hopefully_word_yes_or_no)
 
51
 
52
  def number(question, text, default=-1, maximum=6):
53
  global g_decider_utils_dbg_printing
54
+ global openai_client
55
 
56
  prompt = text + "\n\n" + question
57
 
58
  if g_decider_utils_dbg_printing:
59
  print(prompt)
60
 
61
+ hopefully_number = openai_client.chat.completions.create(
62
+ model="gpt-4-turbo",
63
+ messages=[{"role": "system", "content": prompt}],
64
  temperature=0,
65
+ max_tokens=20, # A while ago when testing with gpt-4-turbo, at first I tried max_tokens = 1 or 2, but the davinci-002 model produced zero output (immediate stop) unless I increased max_token to around 20
66
  frequency_penalty=0,
67
  presence_penalty=0,
68
  n=1,
69
+ ).choices[0].message.content
70
 
71
  if g_decider_utils_dbg_printing:
72
  print(hopefully_number)
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
- gradio>=3.13.0
2
- openai==0.27.2
 
 
1
+ gradio==4.29.0
2
+ httpx==0.24.1
3
+ openai==1.26.0