在 Linux 上安装并运行 Llama 3 的简明步骤
一 准备与环境要求
二 方式一 Ollama 一键安装与运行(推荐)
curl -fsSL https://ollama.com/install.sh | shcurl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama.tgz
sudo mkdir -p /opt/ollama
sudo tar -xzf ollama.tgz -C /opt/ollama[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/opt/ollama/bin/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin"
# 允许远程访问(生产环境请按需限制来源)
Environment="OLLAMA_HOST=0.0.0.0"
# 跨域(如通过浏览器前端调用)
Environment="OLLAMA_ORIGINS=*"
# 自定义模型存放路径(可选)
Environment="OLLAMA_MODELS=/opt/ollama/models"
[Install]
WantedBy=multi-user.targetsudo systemctl daemon-reload
sudo systemctl enable ollama
sudo systemctl start ollamaollama run llama3ollama run llama3:70bollama pull llama3:8b
ollama list三 方式二 Docker 与 Open WebUI 可视化
docker run -d -p 3000:8080 \
--add-host=host.docker.internal:host-gateway \
-v open-webui:/app/backend/data \
--restart always \
ghcr.io/open-webui/open-webui:maindocker run -d -p 3000:8080 \
--add-host=host.docker.internal:host-gateway \
-v /opt/ollama/models:/app/backend/models \
-v open-webui:/app/backend/data \
--restart always \
ghcr.io/open-webui/open-webui:main四 验证与 API 调用
ollama run llama3 "用一句话介绍 Llama 3"curl http://localhost:11434/api/generate -d '{
"model": "llama3:8b",
"prompt": "用一句话介绍 Llama 3",
"stream": false
}'curl http://localhost:11434/api/chat -d '{
"model": "llama3:8b",
"messages": [
{"role": "system", "content": "你是一名专业翻译家。"},
{"role": "user", "content": "Hello, world!"}
],
"stream": false
}'五 常见问题与优化