在 Linux 上安装并运行 Llama 3 的简明步骤
一 准备与环境
二 安装 Ollama 并启动服务
curl -fsSL https://ollama.com/install.sh | shollama servesudo tee /etc/systemd/system/ollama.service >/dev/null <<'EOF'
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/usr/local/bin/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin"
[Install]
WantedBy=default.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable --now ollamaollama --version默认监听端口为 11434。
三 拉取并运行 Llama 3
ollama run llama3ollama run llama3:8b
ollama run llama3:70bollama pull llama3:8bollama listollama rm llama3:8b四 可选 Docker 与 GPU 运行
docker run -d \
-v /opt/ai/ollama:/root/.ollama \
-p 11434:11434 \
--name ollama \
ollama/ollamadocker run --gpus all -d \
-v /opt/ai/ollama:/root/.ollama \
-p 11434:11434 \
--name ollama \
ollama/ollamadocker exec -it ollama ollama run llama3:8bdocker run -d \
-p 8080:8080 \
--add-host=host.docker.internal:host-gateway \
--name ollama-webui \
--restart always \
ghcr.io/ollama-webui/ollama-webui:main浏览器访问 http://服务器IP:8080。
五 常用配置与 API 调用
sudo mkdir -p /home/ollama/.ollama
sudo tee /etc/systemd/system/ollama.service >/dev/null <<'EOF'
[Service]
Environment="OLLAMA_MODELS=/home/ollama/.ollama/models"
EOF
sudo systemctl daemon-reload
sudo systemctl restart ollamasudo tee -a /etc/systemd/system/ollama.service >/dev/null <<'EOF'
[Service]
Environment="OLLAMA_HOST=0.0.0.0"
Environment="OLLAMA_ORIGINS=*"
EOF
sudo systemctl daemon-reload
sudo systemctl restart ollamacurl http://localhost:11434/api/generate -d '{
"model": "llama3",
"prompt": "请用中文介绍 Llama 3。",
"stream": false
}'