ollama-cpu-llama3.1 / entrypoint.sh
subhrajit-mohanty's picture
Create entrypoint.sh
01f0289 verified
raw
history blame
333 Bytes
#!/bin/bash
# Start Ollama server in the background
ollama serve --port 7860 &
# Wait for the server to be ready
while ! nc -z localhost 7860; do
echo "Waiting for Ollama server to start..."
sleep 1
done
# Pull the model
echo "Pulling the model..."
ollama pull steamdj/llama3.1-cpu-only
# Keep the container running
wait