Hyeonseo commited on
Commit
b3fcf69
1 Parent(s): 1b9fc54

Update presentation_assistant/presentation_assistant.py

Browse files
presentation_assistant/presentation_assistant.py CHANGED
@@ -76,22 +76,16 @@ def text2ppt(token_key, input_prompt, input_theme):
76
  output = query({"inputs": "You are a kind helpful PPT designer. "+input_prompt,
77
  "parameters": {
78
  "return_full_text": False,
79
- "max_new_tokens": 200}})
80
- # reply = output[0]['generated_text'][len("You are a kind helpful PPT designer. "+input_prompt):]
81
  reply = output[0]['generated_text']
82
- print(reply)
83
 
84
  md_text = reply[4:] if reply[:3] == "---" else reply
85
  md_text_list = md_text.split('\n')
86
- print(md_text_list)
87
 
88
  f = open("text2ppt_input.md", 'w')
89
  for i in range(0, len(md_text_list)):
90
- # data = ""
91
- # if md_text_list[i] and "<!--" in md_text_list[i]:
92
- # data = "---" + "\n"
93
  data = md_text_list[i].strip() + "\n"
94
- # print(data)
95
  f.write(data)
96
  f.close()
97
 
@@ -146,8 +140,7 @@ def ppt2script(token_key, input_file, input_type):
146
  output = query({"inputs": "You are a kind helpful PPT Assistant."+input_prompt,
147
  "parameters": {
148
  "return_full_text": False,
149
- "max_new_tokens": 200}})
150
- # reply = output[0]['generated_text'][len("You are a kind helpful PPT Assistant."+input_prompt):]
151
  reply = output[0]['generated_text']
152
 
153
  return reply
 
76
  output = query({"inputs": "You are a kind helpful PPT designer. "+input_prompt,
77
  "parameters": {
78
  "return_full_text": False,
79
+ "max_new_tokens": 1000}})
80
+
81
  reply = output[0]['generated_text']
 
82
 
83
  md_text = reply[4:] if reply[:3] == "---" else reply
84
  md_text_list = md_text.split('\n')
 
85
 
86
  f = open("text2ppt_input.md", 'w')
87
  for i in range(0, len(md_text_list)):
 
 
 
88
  data = md_text_list[i].strip() + "\n"
 
89
  f.write(data)
90
  f.close()
91
 
 
140
  output = query({"inputs": "You are a kind helpful PPT Assistant."+input_prompt,
141
  "parameters": {
142
  "return_full_text": False,
143
+ "max_new_tokens": 1000}})
 
144
  reply = output[0]['generated_text']
145
 
146
  return reply