VirtualLab commited on
Commit
041ccdd
·
1 Parent(s): b6c265e

Default Agent

Browse files
Files changed (5) hide show
  1. Dockerfile +4 -1
  2. interaction_history.log +29 -1
  3. main.py +22 -15
  4. req.txt +49 -0
  5. requirements.txt +4 -4
Dockerfile CHANGED
@@ -1,12 +1,15 @@
1
  # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
3
 
4
- FROM python:3.10
5
 
6
  RUN useradd -m -u 1000 user
7
  USER user
8
  ENV PATH="/home/user/.local/bin:$PATH"
9
 
 
 
 
10
  # Create a directory for the app
11
  WORKDIR /app
12
 
 
1
  # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
3
 
4
+ FROM python:3.12.3
5
 
6
  RUN useradd -m -u 1000 user
7
  USER user
8
  ENV PATH="/home/user/.local/bin:$PATH"
9
 
10
+ # Add environment variable to force CPU usage
11
+ ENV LLAMA_CPP_FORCE_CPU=1
12
+
13
  # Create a directory for the app
14
  WORKDIR /app
15
 
interaction_history.log CHANGED
@@ -14,4 +14,32 @@ You are an AI model expert in computational biology performing autonomous real r
14
  <think>
15
  Okay, so I'm trying to figure out how to discover something new in the field of nerve regeneration. I'm not very familiar with this area, but I know it's related to stem cell research. Let me break down what I know and try to understand the problem.
16
 
17
- First, I know that nerve regeneration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  <think>
15
  Okay, so I'm trying to figure out how to discover something new in the field of nerve regeneration. I'm not very familiar with this area, but I know it's related to stem cell research. Let me break down what I know and try to understand the problem.
16
 
17
+ First, I know that nerve regenerationSystem prompt:
18
+
19
+ You are an AI model expert in computational biology performing autonomous real research.
20
+ Your goal is to discover something new and relevant in the field of nerve regeneration.
21
+ You can execute shell commands in the Docker container the app is being executed.
22
+ With access to the following tools:
23
+
24
+ - **shell**:
25
+ - Description: Execute shell commands.
26
+ - Input: {"command": "ls -l"}
27
+ - Output: {"stdout": "...", "stderr": "..."}
28
+
29
+ 🚀 Autonomous computational biology research initiated!
30
+ <think>
31
+ Okay, so I'm trying to figure out how to discover something new in the field of nerve regeneration.System prompt:
32
+
33
+ You are an AI model expert in computational biology performing autonomous real research.
34
+ Your goal is to discover something new and relevant in the field of nerve regeneration.
35
+ You can execute shell commands in the Docker container the app is being executed.
36
+ With access to the following tools:
37
+
38
+ - **shell**:
39
+ - Description: Execute shell commands.
40
+ - Input: {"command": "ls -l"}
41
+ - Output: {"stdout": "...", "stderr": "..."}
42
+
43
+ 🚀 Autonomous computational biology research initiated!
44
+ <think>
45
+ Okay, so I'm trying to figure out how to discover something new in the field of nerve regeneration. I'm not very familiar with this area, but I know it's related to
main.py CHANGED
@@ -197,19 +197,16 @@ async def async_generator_from_sync(sync_gen_func, *args, **kwargs):
197
  # Background response generator without requiring a WebSocket.
198
  #
199
  async def generate_response_background(conversation):
200
- """Generate a model response asynchronously."""
201
- #template = Template(CHAT_TEMPLATE)
202
- #prompt = template.render(messages=conversation)
203
- #logging.info(f"Prompt: {prompt}")
204
  async for token_chunk in async_generator_from_sync(
205
- llm.create_chat_completion,
206
- messages=conversation,
207
  stream=True,
208
  tools=tools_list,
209
  max_tokens=2048
210
  ):
211
- # Extract token from OpenAI-compatible format
212
- yield token_chunk # Yield the token string directly
213
  await asyncio.sleep(0)
214
 
215
  #
@@ -225,18 +222,28 @@ async def run_research_forever():
225
  while True:
226
  full_response = ""
227
  try:
228
- # Generate the model response and accumulate the full text.
229
- logging.info["Before generate_response_background"]
230
  async for token in generate_response_background(conversation):
231
- logging.info (f"generate_response_background yields: {token}")
232
- token_text = token["choices"][0]["delta"].get("content", "")
 
 
 
233
  full_response += token_text
234
- # Log each token individually
 
 
 
 
 
 
 
 
235
  with open(log_path, "a") as f:
236
  f.write(token_text)
237
  f.flush()
238
- # Optionally, check if a finish reason is provided
239
- if token['choices'][0].get("finish_reason", "") is not None:
 
240
 
241
  # The presence of a finish reason (like "stop") indicates that generation is complete.
242
  # Append the assistant's response to the conversation log.
 
197
  # Background response generator without requiring a WebSocket.
198
  #
199
  async def generate_response_background(conversation):
200
+ logging.info(f"Starting generation with conversation: {conversation}")
 
 
 
201
  async for token_chunk in async_generator_from_sync(
202
+ llm.create_chat_completion,
203
+ messages=conversation,
204
  stream=True,
205
  tools=tools_list,
206
  max_tokens=2048
207
  ):
208
+ logging.debug(f"Raw token chunk: {json.dumps(token_chunk, indent=2)}")
209
+ yield token_chunk
210
  await asyncio.sleep(0)
211
 
212
  #
 
222
  while True:
223
  full_response = ""
224
  try:
 
 
225
  async for token in generate_response_background(conversation):
226
+ # Safely extract delta
227
+ delta = token["choices"][0].get("delta", {})
228
+
229
+ # Handle text content
230
+ token_text = delta.get("content", "") # Default to empty string
231
  full_response += token_text
232
+
233
+ # Handle tool calls (critical for container environment)
234
+ if "tool_calls" in delta:
235
+ tool_calls = delta["tool_calls"]
236
+ # Process tool call deltas here (append to full_response or log)
237
+ tool_call_str = json.dumps(tool_calls)
238
+ full_response += f"\n🔧 Tool Call: {tool_call_str}\n"
239
+
240
+ # Logging remains the same
241
  with open(log_path, "a") as f:
242
  f.write(token_text)
243
  f.flush()
244
+
245
+ # Check for finish reason
246
+ if token['choices'][0].get("finish_reason"):
247
 
248
  # The presence of a finish reason (like "stop") indicates that generation is complete.
249
  # Append the assistant's response to the conversation log.
req.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ annotated-types==0.7.0
2
+ anyio==4.8.0
3
+ black==25.1.0
4
+ certifi==2025.1.31
5
+ charset-normalizer==3.4.1
6
+ click==8.1.8
7
+ colorama==0.4.6
8
+ cssbeautifier==1.15.2
9
+ diskcache==5.6.3
10
+ djlint==1.36.4
11
+ EditorConfig==0.17.0
12
+ fastapi==0.115.8
13
+ filelock==3.17.0
14
+ fsspec==2025.2.0
15
+ h11==0.14.0
16
+ httptools==0.6.4
17
+ huggingface-hub==0.28.1
18
+ idna==3.10
19
+ Jinja2==3.1.5
20
+ jinja2-cli==0.8.2
21
+ jsbeautifier==1.15.2
22
+ json5==0.10.0
23
+ llama_cpp_python==0.3.7
24
+ MarkupSafe==3.0.2
25
+ mypy-extensions==1.0.0
26
+ numpy==2.2.2
27
+ packaging==24.2
28
+ pathspec==0.12.1
29
+ platformdirs==4.3.6
30
+ prettier==0.0.7
31
+ pydantic==2.10.6
32
+ pydantic-settings==2.7.1
33
+ pydantic_core==2.27.2
34
+ python-dotenv==1.0.1
35
+ PyYAML==6.0.2
36
+ regex==2024.11.6
37
+ requests==2.32.3
38
+ six==1.17.0
39
+ sniffio==1.3.1
40
+ sse-starlette==2.2.1
41
+ starlette==0.45.3
42
+ starlette-context==0.3.6
43
+ tqdm==4.67.1
44
+ typing_extensions==4.12.2
45
+ urllib3==2.3.0
46
+ uvicorn==0.34.0
47
+ uvloop==0.21.0
48
+ watchfiles==1.0.4
49
+ websockets==14.2
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- jinja2
2
- fastapi
3
- uvicorn[standard]
4
- llama.cpp.python[server]
 
1
+ jinja2==3.1.5
2
+ fastapi==0.115.8
3
+ uvicorn[standard]==0.34.0
4
+ llama.cpp.python[server]==0.3.7