seansullivan commited on
Commit
6f0b0ad
·
verified ·
1 Parent(s): 2c57ac6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -36
app.py CHANGED
@@ -19,28 +19,6 @@ from langchain_community.tools import ShellTool
19
  from langgraph.prebuilt import create_react_agent
20
  from langgraph.checkpoint.memory import MemorySaver
21
 
22
- st.markdown("""
23
- <style>
24
- .stCodeBlock {
25
- background-color: #f6f8fa;
26
- border: 1px solid #e1e4e8;
27
- border-radius: 6px;
28
- padding: 16px;
29
- margin-bottom: 16px;
30
- }
31
- .stCodeBlock pre {
32
- margin: 0;
33
- padding: 0;
34
- }
35
- code {
36
- padding: 2px 4px;
37
- background-color: #f6f8fa;
38
- border-radius: 3px;
39
- font-family: monospace;
40
- }
41
- </style>
42
- """, unsafe_allow_html=True)
43
-
44
 
45
  # Show title and description.
46
  # Add a radio button for mode selection
@@ -374,16 +352,13 @@ else:
374
  )
375
 
376
  def format_ai_response(response):
377
- # Remove any existing HTML tags that might interfere with markdown rendering
378
- response = re.sub(r'<[^>]+>', '', response)
379
-
380
- # Ensure code blocks are properly formatted for markdown
381
- response = re.sub(r'```(\w+)?\n(.*?)```', r'```\1\n\2\n```', response, flags=re.DOTALL)
382
 
383
- # Ensure inline code is properly formatted
384
- response = re.sub(r'`([^`\n]+)`', r'`\1`', response)
385
 
386
- return response
387
 
388
  async def run_github_editor(query: str, thread_id: str = "default"):
389
  inputs = {"messages": [HumanMessage(content=query)]}
@@ -412,17 +387,14 @@ else:
412
  full_response += text
413
  else:
414
  full_response += content
415
-
416
- formatted_response = format_ai_response(full_response)
417
- response_container.markdown(formatted_response)
418
  elif kind == "on_tool_start" and mode == "Task":
419
  response_container.write(f"\nUsing tool: {event['name']}")
420
  elif kind == "on_tool_end" and mode == "Task":
421
  response_container.write(f"Tool result: {event['data']['output']}\n")
422
 
423
- # Final formatted response
424
- final_formatted_response = format_ai_response(full_response)
425
- response_container.markdown(final_formatted_response)
426
 
427
  # Create a session state variable to store the chat messages. This ensures that the
428
  # messages persist across reruns.
 
19
  from langgraph.prebuilt import create_react_agent
20
  from langgraph.checkpoint.memory import MemorySaver
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Show title and description.
24
  # Add a radio button for mode selection
 
352
  )
353
 
354
  def format_ai_response(response):
355
+ # Remove custom code block formatting
356
+ formatted_response = re.sub(r'```(.*?)```', r'```\1```', response, flags=re.DOTALL)
 
 
 
357
 
358
+ # Remove custom inline code formatting
359
+ formatted_response = re.sub(r'`([^`\n]+)`', r'`\1`', formatted_response)
360
 
361
+ return formatted_response
362
 
363
  async def run_github_editor(query: str, thread_id: str = "default"):
364
  inputs = {"messages": [HumanMessage(content=query)]}
 
387
  full_response += text
388
  else:
389
  full_response += content
390
+ response_container.markdown(format_ai_response(full_response))
 
 
391
  elif kind == "on_tool_start" and mode == "Task":
392
  response_container.write(f"\nUsing tool: {event['name']}")
393
  elif kind == "on_tool_end" and mode == "Task":
394
  response_container.write(f"Tool result: {event['data']['output']}\n")
395
 
396
+ # Update the final response using Streamlit's markdown
397
+ response_container.markdown(format_ai_response(full_response))
 
398
 
399
  # Create a session state variable to store the chat messages. This ensures that the
400
  # messages persist across reruns.