#!/bin/bash # Start Ollama server in the background ollama serve & # Wait for the server to be ready while ! nc -z localhost 7860; do echo "Waiting for Ollama server to start..." sleep 1 done # Pull the model echo "Pulling the model..." ollama pull steamdj/llama3-cpu-only ollama pull smollm:1.7b-instruct-v0.2-q6_K # hf.co/unsloth/phi-4-GGUF:Q8_0 # Keep the container running wait