docker run -d -p 3000:3000 \
-e CUSTOM_MODELS=-all,+Qwen1.5-110B-Chat-AWQ \
-e BASE_URL=http://192.168.100.142:8200 \
yidadaa/chatgpt-next-web
docker run -d -p 3001:3000 -e CUSTOM_MODELS=-all,+qwen2.5-72b-instruct-awq -e BASE_URL=http://192.168.100.160:8200 --name chatgpt-next-web --restart=always yidadaa/chatgpt-next-web
docker run -d -p 3002:3000 --name chatgpt-next-web-custom --restart=always yidadaa/chatgpt-next-web
docker run -d \
-p 8080:8080 \
-v /home/free/python_project/Stirling-PDF/trainingData:/usr/share/tessdata \
-v /home/free/python_project/Stirling-PDF/extraConfigs:/configs \
-v /home/free/python_project/Stirling-PDF/logs:/logs \
-v /home/free/python_project/Stirling-PDF/customFiles:/customFiles \
-e DOCKER_ENABLE_SECURITY=true \
-e INSTALL_BOOK_AND_ADVANCED_HTML_OPS=true \
-e LANGS=zh_CN \
-e http_proxy=socks5://192.168.20.21:1080 \
-e https_proxy=socks5://192.168.20.21:1080 \
--name stirling-pdf-nologin \
frooodle/s-pdf:latest-fat
docker run -d \
-p 8080:8080 \
-v /home/free/python_project/Stirling-PDF/trainingData:/usr/share/tessdata \
-v /home/free/python_project/Stirling-PDF/extraConfigs:/configs \
-v /home/free/python_project/Stirling-PDF/logs:/logs \
-v /home/free/python_project/Stirling-PDF/customFiles:/customFiles \
-e DOCKER_ENABLE_SECURITY=true \
-e INSTALL_BOOK_AND_ADVANCED_HTML_OPS=true \
-e SECURITY_ENABLE_LOGIN=true \
-e SECURITY_INITIALLOGIN_USERNAME=admin \
-e SECURITY_INITIALLOGIN_PASSWORD=123 \
-e LANGS=zh_CN \
--name stirling-pdf-login \
frooodle/s-pdf:latest-fat
docker run -d -p 3002:8080 -v E:/temp/Stirling-PDF/trainingData:/usr/share/tessdata -v E:/temp/Stirling-PDF/extraConfigs:/configs -v E:/temp/Stirling-PDF/logs:/logs -v E:/temp/Stirling-PDF/customFiles:/customFiles -e DOCKER_ENABLE_SECURITY=true -e INSTALL_BOOK_AND_ADVANCED_HTML_OPS=true -e LANGS=zh_CN --name stirling-pdf-nologin --restart=always frooodle/s-pdf:0.27.0-fat
docker run -d -v F:/.xinference:/root/.xinference \
-v F:/.xinference/.cache/huggingface:/root/.cache/huggingface \
-v F:/.xinference/.cache/modelscope:/root/.cache/modelscope \
-v F:/.xinference/logs:/workspace/xinference/logs \
-e XINFERENCE_MODEL_SRC=modelscope \
-p 9997:9997 \
--gpus all \
--name xinference \
--restart=always \
xprobe/xinference:v0.15.2 \
xinference-local -H 0.0.0.0 \
--log-level debug
docker run -d -v F:/.xinference:/root/.xinference -v F:/.xinference/.cache/huggingface:/root/.cache/huggingface -v F:/.xinference/.cache/modelscope:/root/.cache/modelscope -v F:/.xinference/logs:/workspace/xinference/logs -p 9997:9997 --gpus all --name xinference --restart=always xprobe/xinference:v0.15.2 xinference-local -H 0.0.0.0 --log-level debug
docker run -d -p 3210:3210 \
-e OPENAI_API_KEY=sk-123 \
-e OPENAI_PROXY_URL=http://192.168.100.143:8090/v1 \
--name lobe-chat \
lobehub/lobe-chat:v1.19.33
docker run -d -p 3210:3210 -e OPENAI_API_KEY=sk-123 -e OPENAI_PROXY_URL=http://192.168.100.143:8090/v1 --name lobe-chat lobehub/lobe-chat:v1.19.33
docker pull vllm/vllm-openai:latest
docker run -d
--runtime nvidia \
--gpus all \
--name vllm_Qwen2.5-7B-Instruct \
-v F:/.vllm/model:/root/model \
-v F:/.vllm/vllm:/root/.cache/vllm \
-v F:/.vllm/huggingface:/root/.cache/huggingface \
--env "HUGGING_FACE_HUB_TOKEN=<secret>" \
-p 8888:8000 \
--ipc=host \
vllm/vllm-openai:v0.6.3.post1 \ # 以上是docker配置,以下是vllm配置
--model /root/model/Qwen2.5-7B-Instruct \
--gpu-memory-utilization 0.8 \
--tensor-parallel-size 2 \
--max-model-len 8129 \
--served-model-name Qwen2.5-7B-Instruct
docker run -d --runtime nvidia --gpus "device=1" --name vllm_Qwen2.5-7B-Instruct -v F:/.vllm/model:/root/model -v F:/.vllm/vllm:/root/.cache/vllm -v F:/.vllm/huggingface:/root/.cache/huggingface --env "HUGGING_FACE_HUB_TOKEN=<secret>" -p 8888:8000 --ipc=host vllm/vllm-openai:v0.6.6.post1 --model /root/model/Qwen2.5-7B-Instruct --gpu-memory-utilization 0.8 --tensor-parallel-size 1 --max-model-len 16384 --served-model-name Qwen2.5-7B-Instruct
docker run -d --runtime nvidia --gpus all --name vllm_Qwen2.5-7B-Instruct-lora -v F:/.vllm/model:/root/model -v F:/.vllm/vllm:/root/.cache/vllm -v F:/.vllm/huggingface:/root/.cache/huggingface --env "HUGGING_FACE_HUB_TOKEN=<secret>" -p 8888:8000 --ipc=host vllm/vllm-openai:v0.6.3.post1 --model /root/model/outputs-Qwen2.5-7B-Instruct-lora --gpu-memory-utilization 0.8 --tensor-parallel-size 2 --max-model-len 8129 --served-model-name Qwen2.5-7B-Instruct-lora
# 采用vllm.entrypoints.openai.api_server启动
CUDA_VISIBLE_DEVICES=2 python -m vllm.entrypoints.openai.api_server \
--model /home/ubuntu/luo/Corrective_fintune_v1/models/Qwen/outputs-Qwen2.5-7B-Instruct-lora \
--tokenizer /home/ubuntu/luo/Corrective_fintune_v1/models/Qwen/outputs-Qwen2.5-7B-Instruct-lora \
--max-model-len 2048 \
--gpu-memory-utilization 1 \
--enforce-eager \
--dtype half \
--port 8888
# DeepSeek-R1-Distill-Qwen-7B
docker run -d --runtime nvidia --gpus "device=1" --name vllm-DeepSeek-R1-Distill-Qwen-7B -v F:/.vllm/model:/root/model -v F:/.vllm/vllm:/root/.cache/vllm -v F:/.vllm/huggingface:/root/.cache/huggingface --env "HUGGING_FACE_HUB_TOKEN=<secret>" -p 8888:8000 --ipc=host vllm/vllm-openai:v0.6.6.post1 --model /root/model/DeepSeek-R1-Distill-Qwen-7B --gpu-memory-utilization 0.8 --tensor-parallel-size 1 --max-model-len 16384 --served-model-name DeepSeek-R1-Distill-Qwen-7B
curl http://192.168.100.50:8000/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "/home/data/models/Qwen1___5-7B-Chat",
"messages": [
{"role": "system", "content": "你是一个有用的助手."},
{"role": "user", "content": "告诉我关于尼米兹号航母的的一些知识."}
]
}'
# 构建镜像
docker build -t omniparse .
# 启动镜像
docker run -d --runtime nvidia --gpus "device=1" -p 8001:8001 -v F:/.omniparse:/tmp --name omniparse omniparse:latest
docker run \
-d \
--name mysql_9.1.0 \
-e MYSQL_ROOT_PASSWORD=1qaz2wsx \
-p 33306:3306 \
-v F:/.mysql:/etc/mysql/conf.d \
-v F:/.mysql:/var/lib/mysql \
--restart=always \
mysql:9.1.0
# 注意mysql9.1.0中删除了以下服务器选项和变量:
–mysql本机密码服务器选项
–mysql本机密码代理用户服务器选项
–default_authentication_plugin服务器系统变量
9版本推荐使用caching_sha2_password远程密码连接
docker run -d --name mysql_8.0 -e MYSQL_ROOT_PASSWORD=1qaz2wsx -p 33306:3306 -v F:/.mysql:/etc/mysql/conf.d -v F:/.mysql:/var/lib/mysql --restart=always mysql:8.0
docker exec -it mysql_8.0 bin/bash
docker run -d \
--privileged
--runtime nvidia --gpus all \
-p 98080:8080 \
-p 90022:22 \
-p 97000:7000 \
-p 98000:8000 \
-v F:/pythonProject:/home/pythonProject \
--restart=always \
--name dev_env \
dev_env_cuda12.5:latest
docker run -d --privileged --runtime nvidia --gpus "device=1" -p 18080:8080 -p 10022:22 -p 17000:7000 -p 18000:8000 -v F:/pythonProject:/home/pythonProject --restart=always --name dev_env dev_env_conda_ssh:12.6.3-cudnn-devel-ubuntu22.04
自己在容器中安装ssh
docker build -t datatransfer:v1 .
docker run -d --restart=always --name DataTransfer datatransfer:v1
docker build -t notes_synchronization:v1 .
docker run -d -v D:/MyNotes:/app/MyNotes --restart=always --name notes_synchronization notes_synchronization:v1
sudo docker run -i -t -d -p 80:80 --restart=always \
-v F:/.onlyoffice/DocumentServer/logs:/var/log/onlyoffice \
-v F:/.onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data \
-v F:/.onlyoffice/DocumentServer/lib:/var/lib/onlyoffice \
-v F:/.onlyoffice/DocumentServer/db:/var/lib/postgresql -e JWT_SECRET=false onlyoffice/documentserver:8.3.0
docker run -i -t -d -p 80:80 --restart=always -v F:/.onlyoffice/DocumentServer/logs:/var/log/onlyoffice -v F:/.onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data -v F:/.onlyoffice/DocumentServer/lib:/var/lib/onlyoffice -v F:/.onlyoffice/DocumentServer/db:/var/lib/postgresql -e JWT_SECRET=false onlyoffice/documentserver:8.3.0