安裝
在線下載
curl -L "https://github.com/docker/compose/releases/download/v2.12.2/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
# 代理下載
curl -L "https://github.com/docker/compose/releases/download/v2.12.2/docker-compose-`uname -s`-`uname -m`" -x http://192.168.1.4:1080 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
離線下載
- 訪問:https://github.com/docker/compose/releases
- 下載:docker-compose-Linux-x86_64
- 重命名文件爲:
docker-compose
- 拷貝至Linux:
/usr/local/bin/docker-compose
- 執行腳本:
chmod +x /usr/local/bin/docker-compose
常用文檔
常用
查看版本
docker-compose --version
Run容器
#會自動尋找當前目錄的docker-compose.yml
docker-compose up -d
#指定yml安裝
docker-compose -f docker-compose.yml up -d
#添加變量啓動
sudo env ASPNETCORE_ENVIRONMENT=${ASPNETCORE_ENVIRONMENT} docker-compose -f ./docker-compose.yml up -d
停止
docker-compose stop
查看服務運行狀態
docker-compose ps
移除容器和網絡
docker-compose down
Yml
案例
version: '3.7'
services:
prometheus:
image: prom/prometheus:v2.36.2
container_name: prometheus
volumes:
- ./prometheus/:/etc/prometheus/
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
environment:
- ASPNETCORE_ENVIRONMENT=${ASPNETCORE_ENVIRONMENT}
- TZ=Asia/Shanghai
ports:
- 9090:9090
語法
version: "3" # 指定docker-compose語法版本
services: # 從以下定義服務配置列表
server_name: # 可將server_name替換爲自定義的名字,如mysql/php都可以
container_name: container_name # 指定實例化後的容器名,可將container_name替換爲自定義名
image: xxx:latest # 指定使用的鏡像名及標籤
build: # 如果沒有現成的鏡像,需要自己構建使用這個選項
context: /xxx/xxx/Dockerfile # 指定構建鏡像文件的路徑
dockerfile: .... # 指定Dockerfile文件名,上一條指定,這一條就不要了
ports:
- "00:00" # 容器內的映射端口,本地端口:容器內端口
- "00:00" # 可指定多個
volumes:
- test1:/xx/xx # 這裏使用managed volume的方法,將容器內的目錄映射到物理機,方便管理
- test2:/xx/xx # 前者是volumes目錄下的名字,後者是容器內目錄
- test3:/xx/xx # 在文件的最後還要使用volumes指定這幾個tests
volumes_from: # 指定卷容器
- volume_container_name # 卷容器名
restarts: always # 設置無論遇到什麼錯,重啓容器
depends_on: # 用來解決依賴關係,如這個服務的啓動,必須在哪個服務啓動之後
- server_name # 這個是名字其他服務在這個文件中的server_name
- server_name1 # 按照先後順序啓動
links: # 與depend_on相對應,上面控制容器啓動,這個控制容器連接
- mysql # 值可以是- 服務名,比較複雜,可以在該服務中使用links中mysql代替這個mysql的ip
networks: # 加入指定的網絡,與之前的添加網卡名類似
- my_net # bridge類型的網卡名
- myapp_net # 如果沒有網卡會被創建,建議使用時先創建號,在指定
environment: # 定義變量,類似dockerfile中的ENV
- TZ=Asia/Shanghai # 這裏設置容器的時區爲亞洲上海,也就解決了容器通過compose編排啓動的 時區問題!!!!解決了容器的時區問題!!!
變量值: 變量名 # 這些變量將會被直接寫到鏡像中的/etc/profile
command: [ #使用 command 可以覆蓋容器啓動後默認執行的命令
'--character-set-server=utf8mb4', #設置數據庫表的數據集
'--collation-server=utf8mb4_unicode_ci', #設置數據庫表的數據集
'--default-time-zone=+8:00' #設置mysql數據庫的 時區問題!!!! 而不是設置容器的時區問題!!!!
]
server_name2: # 開始第二個容器
server_name:
stdin_open: true # 類似於docker run -d
tty: true # 類似於docker run -t
volumes: # 以上每個服務中掛載映射的目錄都在這裏寫入一次,也叫作聲明volume
test1:
test2:
test3:
networks: # 如果要指定ip網段,還是創建好在使用即可,聲明networks
my_net:
driver: bridge # 指定網卡類型
myapp_net:
driver: bridge
Build 鏡像
version: '3.7'
services:
setup:
build:
context: setup/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- setup:/state:Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
elasticsearch:
build:
context: elasticsearch/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,z
- elasticsearch:/usr/share/elasticsearch/data:z
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: -Xms512m -Xmx512m
# Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
logstash:
build:
context: logstash/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- "5044:5044"
- "50000:50000/tcp"
- "50000:50000/udp"
- "9600:9600"
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports:
- "5601:5601"
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
# Fleet plugin
KIBANA_FLEET_SETUP: '1'
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
setup:
elasticsearch:
Pull鏡像
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
restart: always
container_name: elasticsearch
hostname: elasticsearch
environment:
- discovery.type=single-node
ports:
- 9200:9200
- 9300:9300
kibana:
image: docker.elastic.co/kibana/kibana:7.13.2
restart: always
container_name: kibana
hostname: kibana
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- 5601:5601
depends_on:
- elasticsearch
apm_server:
image: docker.elastic.co/apm/apm-server:7.13.2
restart: always
container_name: apm_server
hostname: apm_server
command: --strict.perms=false -e
environment:
- output.elasticsearch.hosts=["elasticsearch:9200"]
ports:
- 8200:8200
depends_on:
- kibana
- elasticsearch