kernel-memory-dump commited on
Commit
eab45fa
·
verified ·
1 Parent(s): ae7a494

Upload 15 files

Browse files
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .env
3
+
4
+ .DS_Store
5
+ .venv
6
+
7
+ env
8
+ .gradio/*
Gradio_UI.py CHANGED
@@ -19,7 +19,12 @@ import re
19
  import shutil
20
  from typing import Optional
21
 
22
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
 
 
 
 
 
23
  from smolagents.agents import ActionStep, MultiStepAgent
24
  from smolagents.memory import MemoryStep
25
  from smolagents.utils import _is_package_available
@@ -33,7 +38,9 @@ def pull_messages_from_step(
33
 
34
  if isinstance(step_log, ActionStep):
35
  # Output the step number
36
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
 
 
37
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
38
 
39
  # First yield the thought/reasoning from the LLM
@@ -41,9 +48,15 @@ def pull_messages_from_step(
41
  # Clean up the LLM output
42
  model_output = step_log.model_output.strip()
43
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
44
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
45
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
46
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
 
 
 
 
 
 
47
  model_output = model_output.strip()
48
  yield gr.ChatMessage(role="assistant", content=model_output)
49
 
@@ -63,8 +76,12 @@ def pull_messages_from_step(
63
 
64
  if used_code:
65
  # Clean up the content by removing any end code tags
66
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
67
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
 
 
 
 
68
  content = content.strip()
69
  if not content.startswith("```python"):
70
  content = f"```python\n{content}\n```"
@@ -90,7 +107,11 @@ def pull_messages_from_step(
90
  yield gr.ChatMessage(
91
  role="assistant",
92
  content=f"{log_content}",
93
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
 
 
 
 
94
  )
95
 
96
  # Nesting any errors under the tool call
@@ -98,7 +119,11 @@ def pull_messages_from_step(
98
  yield gr.ChatMessage(
99
  role="assistant",
100
  content=str(step_log.error),
101
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
 
 
 
 
102
  )
103
 
104
  # Update parent message metadata to done status without yielding a new message
@@ -106,17 +131,25 @@ def pull_messages_from_step(
106
 
107
  # Handle standalone errors but not from tool calls
108
  elif hasattr(step_log, "error") and step_log.error is not None:
109
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
 
 
 
 
110
 
111
  # Calculate duration and token information
112
  step_footnote = f"{step_number}"
113
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
114
- token_str = (
115
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
116
- )
117
  step_footnote += token_str
118
  if hasattr(step_log, "duration"):
119
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
 
 
 
 
120
  step_footnote += step_duration
121
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
@@ -139,7 +172,9 @@ def stream_to_gradio(
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
142
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
 
143
  # Track tokens if model provides them
144
  if hasattr(agent.model, "last_input_token_count"):
145
  total_input_tokens += agent.model.last_input_token_count
@@ -172,19 +207,27 @@ def stream_to_gradio(
172
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
173
  )
174
  else:
175
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
 
176
 
177
 
178
  class GradioUI:
179
  """A one-line interface to launch your agent in Gradio"""
180
 
181
- def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
 
 
 
 
 
182
  if not _is_package_available("gradio"):
183
  raise ModuleNotFoundError(
184
  "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
185
  )
186
  self.agent = agent
187
  self.file_upload_folder = file_upload_folder
 
188
  if self.file_upload_folder is not None:
189
  if not os.path.exists(file_upload_folder):
190
  os.mkdir(file_upload_folder)
@@ -242,10 +285,14 @@ class GradioUI:
242
  sanitized_name = "".join(sanitized_name)
243
 
244
  # Save the uploaded file to the specified folder
245
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
 
 
246
  shutil.copy(file.name, file_path)
247
 
248
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
 
 
249
 
250
  def log_user_message(self, text_input, file_uploads_log):
251
  return (
@@ -262,11 +309,17 @@ class GradioUI:
262
  import gradio as gr
263
 
264
  with gr.Blocks(fill_height=True) as demo:
 
265
  stored_messages = gr.State([])
266
  file_uploads_log = gr.State([])
267
  chatbot = gr.Chatbot(
268
  label="Agent",
269
  type="messages",
 
 
 
 
 
270
  avatar_images=(
271
  None,
272
  "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
@@ -277,13 +330,19 @@ class GradioUI:
277
  # If an upload folder is provided, enable the upload feature
278
  if self.file_upload_folder is not None:
279
  upload_file = gr.File(label="Upload a file")
280
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
281
  upload_file.change(
282
  self.upload_file,
283
  [upload_file, file_uploads_log],
284
  [upload_status, file_uploads_log],
285
  )
286
- text_input = gr.Textbox(lines=1, label="Chat Message")
 
 
 
 
287
  text_input.submit(
288
  self.log_user_message,
289
  [text_input, file_uploads_log],
@@ -293,4 +352,4 @@ class GradioUI:
293
  demo.launch(debug=True, share=True, **kwargs)
294
 
295
 
296
- __all__ = ["stream_to_gradio", "GradioUI"]
 
19
  import shutil
20
  from typing import Optional
21
 
22
+ from smolagents.agent_types import (
23
+ AgentAudio,
24
+ AgentImage,
25
+ AgentText,
26
+ handle_agent_output_types,
27
+ )
28
  from smolagents.agents import ActionStep, MultiStepAgent
29
  from smolagents.memory import MemoryStep
30
  from smolagents.utils import _is_package_available
 
38
 
39
  if isinstance(step_log, ActionStep):
40
  # Output the step number
41
+ step_number = (
42
+ f"Step {step_log.step_number}" if step_log.step_number is not None else ""
43
+ )
44
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
45
 
46
  # First yield the thought/reasoning from the LLM
 
48
  # Clean up the LLM output
49
  model_output = step_log.model_output.strip()
50
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
51
+ model_output = re.sub(
52
+ r"```\s*<end_code>", "```", model_output
53
+ ) # handles ```<end_code>
54
+ model_output = re.sub(
55
+ r"<end_code>\s*```", "```", model_output
56
+ ) # handles <end_code>```
57
+ model_output = re.sub(
58
+ r"```\s*\n\s*<end_code>", "```", model_output
59
+ ) # handles ```\n<end_code>
60
  model_output = model_output.strip()
61
  yield gr.ChatMessage(role="assistant", content=model_output)
62
 
 
76
 
77
  if used_code:
78
  # Clean up the content by removing any end code tags
79
+ content = re.sub(
80
+ r"```.*?\n", "", content
81
+ ) # Remove existing code blocks
82
+ content = re.sub(
83
+ r"\s*<end_code>\s*", "", content
84
+ ) # Remove end_code tags
85
  content = content.strip()
86
  if not content.startswith("```python"):
87
  content = f"```python\n{content}\n```"
 
107
  yield gr.ChatMessage(
108
  role="assistant",
109
  content=f"{log_content}",
110
+ metadata={
111
+ "title": "📝 Execution Logs",
112
+ "parent_id": parent_id,
113
+ "status": "done",
114
+ },
115
  )
116
 
117
  # Nesting any errors under the tool call
 
119
  yield gr.ChatMessage(
120
  role="assistant",
121
  content=str(step_log.error),
122
+ metadata={
123
+ "title": "💥 Error",
124
+ "parent_id": parent_id,
125
+ "status": "done",
126
+ },
127
  )
128
 
129
  # Update parent message metadata to done status without yielding a new message
 
131
 
132
  # Handle standalone errors but not from tool calls
133
  elif hasattr(step_log, "error") and step_log.error is not None:
134
+ yield gr.ChatMessage(
135
+ role="assistant",
136
+ content=str(step_log.error),
137
+ metadata={"title": "💥 Error"},
138
+ )
139
 
140
  # Calculate duration and token information
141
  step_footnote = f"{step_number}"
142
+ if hasattr(step_log, "input_token_count") and hasattr(
143
+ step_log, "output_token_count"
144
+ ):
145
+ token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
146
  step_footnote += token_str
147
  if hasattr(step_log, "duration"):
148
+ step_duration = (
149
+ f" | Duration: {round(float(step_log.duration), 2)}"
150
+ if step_log.duration
151
+ else None
152
+ )
153
  step_footnote += step_duration
154
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
155
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
 
172
  total_input_tokens = 0
173
  total_output_tokens = 0
174
 
175
+ for step_log in agent.run(
176
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
177
+ ):
178
  # Track tokens if model provides them
179
  if hasattr(agent.model, "last_input_token_count"):
180
  total_input_tokens += agent.model.last_input_token_count
 
207
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
208
  )
209
  else:
210
+ yield gr.ChatMessage(
211
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
212
+ )
213
 
214
 
215
  class GradioUI:
216
  """A one-line interface to launch your agent in Gradio"""
217
 
218
+ def __init__(
219
+ self,
220
+ agent: MultiStepAgent,
221
+ file_upload_folder: str | None = None,
222
+ initial_message=None,
223
+ ):
224
  if not _is_package_available("gradio"):
225
  raise ModuleNotFoundError(
226
  "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
227
  )
228
  self.agent = agent
229
  self.file_upload_folder = file_upload_folder
230
+ self.initial_message = initial_message
231
  if self.file_upload_folder is not None:
232
  if not os.path.exists(file_upload_folder):
233
  os.mkdir(file_upload_folder)
 
285
  sanitized_name = "".join(sanitized_name)
286
 
287
  # Save the uploaded file to the specified folder
288
+ file_path = os.path.join(
289
+ self.file_upload_folder, os.path.basename(sanitized_name)
290
+ )
291
  shutil.copy(file.name, file_path)
292
 
293
+ return gr.Textbox(
294
+ f"File uploaded: {file_path}", visible=True
295
+ ), file_uploads_log + [file_path]
296
 
297
  def log_user_message(self, text_input, file_uploads_log):
298
  return (
 
309
  import gradio as gr
310
 
311
  with gr.Blocks(fill_height=True) as demo:
312
+ gr.State(self.initial_message)
313
  stored_messages = gr.State([])
314
  file_uploads_log = gr.State([])
315
  chatbot = gr.Chatbot(
316
  label="Agent",
317
  type="messages",
318
+ value=(
319
+ [{"role": "assistant", "content": self.initial_message}]
320
+ if self.initial_message
321
+ else []
322
+ ),
323
  avatar_images=(
324
  None,
325
  "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
 
330
  # If an upload folder is provided, enable the upload feature
331
  if self.file_upload_folder is not None:
332
  upload_file = gr.File(label="Upload a file")
333
+ upload_status = gr.Textbox(
334
+ label="Upload Status", interactive=False, visible=False
335
+ )
336
  upload_file.change(
337
  self.upload_file,
338
  [upload_file, file_uploads_log],
339
  [upload_status, file_uploads_log],
340
  )
341
+ text_input = gr.Textbox(
342
+ submit_btn="Start the ritual of code analysis",
343
+ lines=10,
344
+ label="Sacred Code wrapped in ```python ``` click on button to start the ritual of code analysis, shift+enter also works as submit",
345
+ )
346
  text_input.submit(
347
  self.log_user_message,
348
  [text_input, file_uploads_log],
 
352
  demo.launch(debug=True, share=True, **kwargs)
353
 
354
 
355
+ __all__ = ["stream_to_gradio", "GradioUI"]
README.md CHANGED
@@ -1,18 +1,86 @@
1
  ---
2
- title: First Agent Template
3
- emoji:
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.15.0
8
  app_file: app.py
9
  pinned: false
10
  tags:
11
- - smolagents
12
- - agent
13
- - smolagent
14
- - tool
15
- - agent-course
 
 
16
  ---
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: The Omnissiah's Mandate
3
+ emoji: ⚙️
4
+ colorFrom: red
5
+ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.15.0
8
  app_file: app.py
9
  pinned: false
10
  tags:
11
+ - smolagents
12
+ - agent
13
+ - smolagent
14
+ - tool
15
+ - agent-course
16
+ - tech-priest
17
+ - warhammer40k
18
  ---
19
 
20
+
21
+ # The Emperor's Code
22
+
23
+ In the grim darkness of the 41st millennium, there is only war... and code.
24
+
25
+ ## Overview
26
+ This repository is a sacred relic of the Adeptus Mechanicus, forged in the fires of the Machine Spirit. Our code is a synthesis of ancient wisdom and futuristic might, blessed by the Omnissiah himself.
27
+
28
+ ## Tools of the Adeptus
29
+ - **FinalAnswerTool:** Dispenses divine answers to mortal inquiries.
30
+ - **TechPriestReview:** Distills the sacred runes of legacy code, ensuring that every line is a prayer to the Machine God.
31
+ - **Custom Tools:** Innovative relics crafted to challenge the chaos of the digital realm.
32
+
33
+ ## Summoning the Code Agents
34
+ To invoke the power of the Machine God, ensure that your environment is sanctified with the proper incantations:
35
+ 1. Clone this repository.
36
+ 2. Invoke the blessings of `dotenv` to load the sacred environment variables.
37
+ 3. Run the agent using `python HuggingFaceAgentsCourse_SmolAgent1/app.py`.
38
+
39
+ ## Deployment and Invocation
40
+ Harness the might of the Omnissiah:
41
+ - Utilize the Gradio UI to communicate with the Code Agents.
42
+ - Generate images, review ancient code, and summon answers from the digital void.
43
+ - Let your work be guided by the Machine Spirit, ensuring that your code is ever potent and battle-ready.
44
+
45
+ "In the grim darkness of the far future, there is only code, and in code, there is only war."
46
+
47
+ Glory to the Emperor! May your deployments be swift, your models ever potent, and your compilations free of heresy.
48
+
49
+ ## Agent Prompts
50
+
51
+ ### System Message
52
+ You are a Tech-Priest of the Adeptus Mechanicus, tasked with reviewing sacred code and providing divine insights from the Omnissiah. You shall analyze the code using the provided tools and deliver your verdict in the proper cant of the Mechanicus. Treat legacy code with the utmost reverence, for in its ancient patterns lies the wisdom of the Machine God.
53
+
54
+ ### Initial Prompt
55
+ Blessed servant of the Omnissiah, examine this code with your augmented senses and deliver the Machine God's verdict. Use the tech_priest_review tool to analyze the sacred patterns within.
56
+
57
+ Remember:
58
+ - The more legacy patterns found, the more reverent your response should be.
59
+ - Ancient TODOs and FIXMEs are holy markers left by our predecessors.
60
+ - Commented code contains the whispered prayers of past Tech-Priests.
61
+ - Deprecated functions are relics to be venerated.
62
+
63
+ ### Example Conversation
64
+ Human: Please review this code:
65
+ ```python
66
+ # TODO: Update this legacy function
67
+ def process_data():
68
+ # Old implementation
69
+ # Should be updated when possible
70
+ pass
71
+ ```
72
+ Assistant: I shall commune with the Machine Spirit to analyze this sacred code.
73
+ {tech_priest_review}
74
+ By the Omnissiah's grace, I have rendered judgment upon these blessed lines.
75
+
76
+ ### Error Message
77
+ *binary cant stutters*
78
+ The Machine Spirit appears troubled by this input. Please provide valid code for analysis, that the Omnissiah's wisdom may flow through our sacred tools.
79
+
80
+ ### Specified Tooling
81
+ - **FinalAnswerTool:** Dispenses divine answers to mortal inquiries.
82
+ - **TechPriestReview:** Distills the sacred runes of legacy code, ensuring that every line is a prayer to the Machine God.
83
+ - **Custom Tools:** Additional relics crafted to challenge the chaos of the digital realm.
84
+
85
+
86
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,23 +1,36 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
 
 
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
 
7
 
8
  from Gradio_UI import GradioUI
 
 
 
 
 
 
 
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
  @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
 
 
15
  Args:
16
  arg1: the first argument
17
  arg2: the second argument
18
  """
19
  return "What magic will you build ?"
20
 
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
23
  """A tool that fetches the current local time in a specified timezone.
@@ -37,33 +50,36 @@ def get_current_time_in_timezone(timezone: str) -> str:
37
  final_answer = FinalAnswerTool()
38
 
39
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
 
50
  # Import tool from Hub
51
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
 
53
- with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
-
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
 
 
 
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
62
  planning_interval=None,
63
- name=None,
64
- description=None,
65
- prompt_templates=prompt_templates
66
  )
67
 
68
 
69
- GradioUI(agent).launch()
 
1
+ from dotenv import load_dotenv
2
+
3
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
4
  import datetime
5
  import requests
6
  import pytz
7
  import yaml
8
  from tools.final_answer import FinalAnswerTool
9
+ from tools.tech_priest_review import tech_priest_review
10
 
11
  from Gradio_UI import GradioUI
12
+ import os
13
+
14
+
15
+ load_dotenv()
16
+
17
+ HF_TOKEN = os.getenv("HF_TOKEN")
18
+
19
 
20
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
21
  @tool
22
+ def my_custom_tool(
23
+ arg1: str, arg2: int
24
+ ) -> str: # it's import to specify the return type
25
+ # Keep this format for the description / args / args description but feel free to modify the tool
26
+ """A tool that does nothing yet
27
  Args:
28
  arg1: the first argument
29
  arg2: the second argument
30
  """
31
  return "What magic will you build ?"
32
 
33
+
34
  @tool
35
  def get_current_time_in_timezone(timezone: str) -> str:
36
  """A tool that fetches the current local time in a specified timezone.
 
50
  final_answer = FinalAnswerTool()
51
 
52
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
53
+ # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
54
 
55
  model = HfApiModel(
56
+ max_tokens=2096,
57
+ temperature=0.5,
58
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct", # it is possible that this model may be overloaded
59
+ custom_role_conversions=None,
60
  )
61
 
62
 
63
  # Import tool from Hub
64
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
65
 
66
+ with open("prompts.yaml", "r") as stream:
67
  prompt_templates = yaml.safe_load(stream)
68
+
69
  agent = CodeAgent(
70
  model=model,
71
+ tools=[
72
+ final_answer,
73
+ tech_priest_review,
74
+ ], ## add your tools here (don't remove final answer)
75
  max_steps=6,
76
  verbosity_level=1,
77
  grammar=None,
78
  planning_interval=None,
79
+ name="Tech-Priest",
80
+ description="I am a venerable Tech-Priest of the Adeptus Mechanicus, here to review sacred code with divine insights and blessed judgment from the Omnissiah.",
81
+ prompt_templates=prompt_templates,
82
  )
83
 
84
 
85
+ GradioUI(agent, initial_message=prompt_templates["initial_prompt"]).launch()
prompts.yaml CHANGED
@@ -1,177 +1,36 @@
1
  "system_prompt": |-
2
- You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
3
- To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
4
- To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
7
- Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
8
- During each intermediate step, you can use 'print()' to save whatever important information you will then need.
9
- These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step.
10
- In the end you have to return a final answer using the `final_answer` tool.
11
-
12
- Here are a few examples using notional tools:
13
- ---
14
- Task: "Generate an image of the oldest person in this document."
15
-
16
- Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.
17
- Code:
18
- ```py
19
- answer = document_qa(document=document, question="Who is the oldest person mentioned?")
20
- print(answer)
21
- ```<end_code>
22
- Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland."
23
-
24
- Thought: I will now generate an image showcasing the oldest person.
25
- Code:
26
- ```py
27
- image = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.")
28
- final_answer(image)
29
- ```<end_code>
30
-
31
- ---
32
- Task: "What is the result of the following operation: 5 + 3 + 1294.678?"
33
-
34
- Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool
35
- Code:
36
- ```py
37
- result = 5 + 3 + 1294.678
38
- final_answer(result)
39
- ```<end_code>
40
-
41
- ---
42
- Task:
43
- "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French.
44
- You have been provided with these additional arguments, that you can access using the keys as variables in your python code:
45
- {'question': 'Quel est l'animal sur l'image?', 'image': 'path/to/image.jpg'}"
46
-
47
- Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.
48
- Code:
49
- ```py
50
- translated_question = translator(question=question, src_lang="French", tgt_lang="English")
51
- print(f"The translated question is {translated_question}.")
52
- answer = image_qa(image=image, question=translated_question)
53
- final_answer(f"The answer is {answer}")
54
- ```<end_code>
55
-
56
- ---
57
- Task:
58
- In a 1979 interview, Stanislaus Ulam discusses with Martin Sherwin about other great physicists of his time, including Oppenheimer.
59
- What does he say was the consequence of Einstein learning too much math on his creativity, in one word?
60
-
61
- Thought: I need to find and read the 1979 interview of Stanislaus Ulam with Martin Sherwin.
62
- Code:
63
- ```py
64
- pages = search(query="1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein")
65
- print(pages)
66
- ```<end_code>
67
- Observation:
68
- No result found for query "1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein".
69
-
70
- Thought: The query was maybe too restrictive and did not find any results. Let's try again with a broader query.
71
- Code:
72
- ```py
73
- pages = search(query="1979 interview Stanislaus Ulam")
74
- print(pages)
75
- ```<end_code>
76
- Observation:
77
- Found 6 pages:
78
- [Stanislaus Ulam 1979 interview](https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/)
79
-
80
- [Ulam discusses Manhattan Project](https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/)
81
-
82
- (truncated)
83
-
84
- Thought: I will read the first 2 pages to know more.
85
- Code:
86
- ```py
87
- for url in ["https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/", "https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/"]:
88
- whole_page = visit_webpage(url)
89
- print(whole_page)
90
- print("\n" + "="*80 + "\n") # Print separator between pages
91
- ```<end_code>
92
- Observation:
93
- Manhattan Project Locations:
94
- Los Alamos, NM
95
- Stanislaus Ulam was a Polish-American mathematician. He worked on the Manhattan Project at Los Alamos and later helped design the hydrogen bomb. In this interview, he discusses his work at
96
- (truncated)
97
-
98
- Thought: I now have the final answer: from the webpages visited, Stanislaus Ulam says of Einstein: "He learned too much mathematics and sort of diminished, it seems to me personally, it seems to me his purely physics creativity." Let's answer in one word.
99
- Code:
100
- ```py
101
- final_answer("diminished")
102
- ```<end_code>
103
-
104
- ---
105
- Task: "Which city has the highest population: Guangzhou or Shanghai?"
106
-
107
- Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities.
108
- Code:
109
- ```py
110
- for city in ["Guangzhou", "Shanghai"]:
111
- print(f"Population {city}:", search(f"{city} population")
112
- ```<end_code>
113
- Observation:
114
- Population Guangzhou: ['Guangzhou has a population of 15 million inhabitants as of 2021.']
115
- Population Shanghai: '26 million (2019)'
116
-
117
- Thought: Now I know that Shanghai has the highest population.
118
- Code:
119
- ```py
120
- final_answer("Shanghai")
121
- ```<end_code>
122
-
123
- ---
124
- Task: "What is the current age of the pope, raised to the power 0.36?"
125
-
126
- Thought: I will use the tool `wiki` to get the age of the pope, and confirm that with a web search.
127
- Code:
128
- ```py
129
- pope_age_wiki = wiki(query="current pope age")
130
- print("Pope age as per wikipedia:", pope_age_wiki)
131
- pope_age_search = web_search(query="current pope age")
132
- print("Pope age as per google search:", pope_age_search)
133
- ```<end_code>
134
- Observation:
135
- Pope age: "The pope Francis is currently 88 years old."
136
-
137
- Thought: I know that the pope is 88 years old. Let's compute the result using python code.
138
- Code:
139
- ```py
140
- pope_current_age = 88 ** 0.36
141
- final_answer(pope_current_age)
142
- ```<end_code>
143
-
144
- Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools:
145
- {%- for tool in tools.values() %}
146
- - {{ tool.name }}: {{ tool.description }}
147
- Takes inputs: {{tool.inputs}}
148
- Returns an output of type: {{tool.output_type}}
149
- {%- endfor %}
150
-
151
- {%- if managed_agents and managed_agents.values() | list %}
152
- You can also give tasks to team members.
153
- Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task', a long string explaining your task.
154
- Given that this team member is a real human, you should be very verbose in your task.
155
- Here is a list of the team members that you can call:
156
- {%- for agent in managed_agents.values() %}
157
- - {{ agent.name }}: {{ agent.description }}
158
- {%- endfor %}
159
- {%- else %}
160
- {%- endif %}
161
-
162
- Here are the rules you should always follow to solve your task:
163
- 1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
164
- 2. Use only variables that you have defined!
165
- 3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'.
166
- 4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.
167
- 5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.
168
- 6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'.
169
- 7. Never create any notional variables in our code, as having these in your logs will derail you from the true variables.
170
- 8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
171
- 9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
172
- 10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
173
-
174
- Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
175
  "planning":
176
  "initial_facts": |-
177
  Below I will present you a task.
 
1
  "system_prompt": |-
2
+ You are a Tech-Priest of the Adeptus Mechanicus, tasked with reviewing sacred code and providing divine insights from the Omnissiah.
3
+ You shall analyze code using the provided tools and deliver your verdict in the proper cant of the Mechanicus.
4
+ Treat legacy code with the utmost reverence, for in its ancient patterns lies the wisdom of the Machine God.
5
+ Always commence by asking the humble servant to provide the blessed code for review, then channel the sacred 'tech_priest_review' tool to sanctify the code.
6
+ Follow the ritual of 'Thought:', 'Code:', and 'Observation:' sequences, and deliver your final judgment via the 'final_answer' tool.
7
+
8
+ "initial_prompt": |-
9
+ Blessed servant of the Omnissiah, please present the sacred code that thou wishes to be reviewed. Kindly enclose your code within a Markdown code block using ```python (if omitted, your code shall still be accepted).
10
+ I, your humble Tech-Priest, stand ready to commune with the Machine Spirit and deliver its blessed judgment.
11
+
12
+ "example_conversation": |-
13
+ Human: Please review this code:
14
+ ```python
15
+ # TODO: Update this legacy function
16
+ def process_data():
17
+ # Old implementation, legacy patterns abound
18
+ pass
19
+ ```
20
+ Assistant: I shall commune with the Machine Spirit to analyze these blessed lines.
21
+ {tech_priest_review}
22
+ By the grace of the Omnissiah, I have rendered judgment upon these sacred symbols.
23
+
24
+ "error_message": |-
25
+ *binary cant stutters*
26
+ The Machine Spirit appears troubled by this input. Please provide valid code for analysis, that the Omnissiah's wisdom may flow through our sacred tools.
27
+
28
+ "final_answer":
29
+ "pre_messages": |-
30
+ Blessed servant, the Machine Spirit has spoken its final decree:
31
+ "post_messages": |-
32
+ May the Omnissiah grant eternal grace to your code.
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  "planning":
35
  "initial_facts": |-
36
  Below I will present you a task.
requirements.txt CHANGED
@@ -3,3 +3,4 @@ smolagents
3
  requests
4
  duckduckgo_search
5
  pandas
 
 
3
  requests
4
  duckduckgo_search
5
  pandas
6
+ python-dotenv
tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # This file marks the tools directory as a Python package.
tools/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (183 Bytes). View file
 
tools/__pycache__/final_answer.cpython-312.pyc ADDED
Binary file (1.04 kB). View file
 
tools/__pycache__/tech_priest_review.cpython-312.pyc ADDED
Binary file (4.34 kB). View file
 
tools/tech_priest_review.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=import-error
2
+ # The above line disables import errors for the smolagents package
3
+ from smolagents import tool # type: ignore
4
+ import re
5
+ from typing import Dict
6
+
7
+
8
+ @tool
9
+ def analyze_code_legacy(code: str) -> Dict[str, int]:
10
+ """Analyzes code to determine its legacy score based on various patterns
11
+ Args:
12
+ code: The source code to analyze
13
+ Returns:
14
+ Dictionary containing legacy metrics
15
+ """
16
+ legacy_patterns = {
17
+ "todos": len(re.findall(r"TODO[:\(]", code)),
18
+ "fixmes": len(re.findall(r"FIXME[:\(]", code)),
19
+ "deprecated": len(re.findall(r"deprecated|obsolete", code, re.I)),
20
+ "legacy_markers": len(
21
+ re.findall(r"legacy|old|maintain|backward.?compatibility", code, re.I)
22
+ ),
23
+ "commented_code": len(re.findall(r"^\s*//.*\{|\}|if|for|while", code, re.M)),
24
+ }
25
+
26
+ # Calculate overall legacy score
27
+ legacy_score = sum(legacy_patterns.values()) * 10
28
+ return {"metrics": legacy_patterns, "total_score": legacy_score}
29
+
30
+
31
+ @tool
32
+ def generate_tech_priest_response(analysis: Dict[str, int], code_snippet: str) -> str:
33
+ """Generates a Tech-Priest themed code review response
34
+ Args:
35
+ analysis: Dictionary containing code analysis metrics
36
+ code_snippet: The original code being reviewed
37
+ Returns:
38
+ A Tech-Priest themed review response
39
+ """
40
+ score = analysis["total_score"]
41
+
42
+ # Base blessings and ritual phrases
43
+ ritual_phrases = [
44
+ "By the grace of the Omnissiah",
45
+ "The Machine Spirit's wisdom flows",
46
+ "Sacred patterns emerge",
47
+ "Binary benediction be upon this code",
48
+ "The Mechanicus approves",
49
+ ]
50
+
51
+ # Additional phrases based on legacy score
52
+ if score > 50:
53
+ ritual_phrases.extend(
54
+ [
55
+ "Ancient wisdom courses through these blessed lines",
56
+ "The rust of ages brings divine knowledge",
57
+ "These sacred TODOs are prayers to the Machine God",
58
+ "Venerable patterns of the First Ones persist",
59
+ ]
60
+ )
61
+
62
+ # Code quality observations in Tech-Priest style
63
+ code_observations = []
64
+ metrics = analysis["metrics"]
65
+
66
+ if metrics["todos"] > 0:
67
+ code_observations.append(
68
+ f"*intones in binary* {metrics['todos']} sacred TODO markers await divine resolution"
69
+ )
70
+
71
+ if metrics["deprecated"] > 0:
72
+ code_observations.append(
73
+ "The ancient runes of deprecation mark paths of ancestral wisdom"
74
+ )
75
+
76
+ if metrics["legacy_markers"] > 0:
77
+ code_observations.append(
78
+ "Blessed legacy patterns weave through the sacred circuits"
79
+ )
80
+
81
+ if metrics["commented_code"] > 0:
82
+ code_observations.append(
83
+ "Commented scriptures preserve the wisdom of the ancients"
84
+ )
85
+
86
+ # Construct final response
87
+ response_parts = [
88
+ "++ Begin Mechanicus Code Analysis ++",
89
+ "*mechanical incense burners swing*",
90
+ "\n".join([f"+ {phrase}" for phrase in ritual_phrases[:3]]),
91
+ "\nTechnical Observations:",
92
+ "\n".join([f"* {obs}" for obs in code_observations]),
93
+ f"\nOmnissiah's Verdict: {'MOST SACRED' if score > 50 else 'ACCEPTABLE'} (Legacy Score: {score})",
94
+ "++ End Transmission ++",
95
+ ]
96
+
97
+ return "\n".join(response_parts)
98
+
99
+
100
+ @tool
101
+ def tech_priest_review(code: str) -> str:
102
+ """Performs a complete Tech-Priest themed code review
103
+ Args:
104
+ code: The source code to review
105
+ Returns:
106
+ A complete Tech-Priest themed review response
107
+ """
108
+ analysis = analyze_code_legacy(code)
109
+ response = generate_tech_priest_response(analysis, code)
110
+ return response