Skip to main content

· One min read

参考 https://runnerlee.com/2017/08/18/influxdb-telegraf-grafana-monitor

全部配置

docker run --rm telegraf:1.4-alpine telegraf config

influxdb curd

curl -XPOST "http://influxdb:8086/query" --data-urlencode "q=CREATE DATABASE telegraf"

curl -G "http://localhost:8086/query?pretty=true" --data-urlencode "db=mydb" \
--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d"

curl -G "influxdb:8086/query?pretty=true" --data-urlencode "db=telegraf" \
--data-urlencode "q=SELECT * FROM nginx"
curl -G "influxdb:8086/query?pretty=true" --data-urlencode "db=telegraf" \
--data-urlencode "q=show MEASUREMENTS"

· 3 min read

using

    logging:
driver: syslog
options:
syslog-address: 'tcp://10.1.0.123:5000'

修改密码 cant cat /etc/kibana/kibana.yml | grep -B 2 password

sed -ie 's/#elasticsearch.username: "user"/elasticsearch.username: "admin"/g' /etc/kibana/kibana.yml sed -ie 's/#elasticsearch.password: "pass"/elasticsearch.password: "12341234"/g' /etc/kibana/kibana.yml

elk 完整版

login

http://elk.bothub.ai/elk/
user
iruVkQ7L

sender filebeat

install

https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html

conf

vi /etc/filebeat/filebeat.yml

filebeat.prospectors:

- input_type: log
document_type: js_error
paths:
- /var/log/nginx/tracking.log

- input_type: log
document_type: laravel_error
paths:
- /var/www/rapture-api/storage/logs/laravel-error-*.log

output.logstash:
enabled: true
hosts: ["10.140.0.3:5044"]

restart

// test ok
filebeat.sh -configtest -e
/etc/init.d/filebeat restart

getter elk

docker

// 修改进程数
sysctl -w vm.max_map_count=262144
sysctl vm.max_map_count

// docker image sebp/elk:540
docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --name elk sebp/elk:540

server {
listen 80;

location / {
## 密码 http auth 密码生成
# htpasswd -c .espasswd user
# cat .espasswd
# user:$apr1$Siq.2MpE$GREX96Q0RgpAYBnB67kKf0
auth_basic "Protected Kibana";
auth_basic_user_file /.espasswd;
proxy_pass http://kibana:5601;
}
}

query

type:nginx_access AND agent: '' -GoogleHC
type:js_error AND err_json.project:"rapture-admin-fe"
fields.appid: 'bitdata-web_php' AND fields.scope:'error'

conf

cd /etc/logstash/conf.d
/opt/bitnami/ctlscript.sh restart logstash

input
{
beats
{
ssl => false
host => "0.0.0.0"
port => 5044
}
gelf
{
host => "0.0.0.0"
port => 12201
}
http
{
ssl => false
host => "0.0.0.0"
port => 8888
}
tcp
{
mode => "server"
host => "0.0.0.0"
port => 5010
}
udp
{
host => "0.0.0.0"
port => 5000
}
}

filter {
if [type] == "nginx_access" {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
if [type] == "laravel_error" {
grok {
match => { "message" => "\[%{TIMESTAMP_ISO8601:my_logdate}\] %{DATA:env}\.%{DATA:severity}: %{DATA:message_old}$" }
}
mutate {
rename => {
"@timestamp" => "read_timestamp"
"message_old" => "message"
}
}
date {
match => [ "my_logdate", "yyyy-MM-dd HH:mm:ss" ]
remove_field => "my_logdate"
timezone => "Asia/Shanghai"
}
}
if [type] == "js_error" {
grok {
match => { "message" => "\] \"%{DATA:request}\" \"%{DATA:agent}\" \"%{DATA:extra_fields}\"$" }
}
mutate {
gsub => [
"extra_fields", "\"","",
"extra_fields", "\\x0A","",
"extra_fields", "\\x22",'\"',
"extra_fields", "(\\)",""
]
}
json {
source => "extra_fields"
target => "err_json"
remove_field => ["message", "extra_fields"]
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
}

output
{
// file { path => "/log_test/test-%{type}-%{+YYYY.MM.dd}.log" } // 调试用
if "_grokparsefailure" in [tags] {
file { path => "/log_test/error-%{type}-%{+YYYY.MM.dd}.log" }
}

elasticsearch
{
hosts => ["localhost"]
index => "logstash-%{+YYYY.MM.dd}"
}

}

· 2 min read
docker create \
--name=letsencrypt \
-v "$PWD/lets":/config \
-e EMAIL=825407762@qq.com \
-e URL=825407762.com \
-e SUBDOMAINS=www \
-e VALIDATION=http \
-p 80:80 -p 443:443 \
-e TZ=PRC \
linuxserver/letsencrypt
## pfx => pem => key crt
# pfx => pem
openssl pkcs12 -in a.pfx -nodes -out a.pem
openssl rsa -in a.pem -out a.key
openssl x509 -in a.pem -out a.crt
kubectl create secret tls ccm-https --key a.key --cert a.crt --namespace=gim-uat
## k8s https 3 层
# pfx => crt
openssl pkcs12 -in a.pfx -nokeys -out a -passin 'pass:Welcome123'
# pfx => key
openssl pkcs12 -in a.pfx -nocerts -out b -nodes -passin 'pass:Welcome123'
cat a | base64 -w 0
cat b | base64 -w 0

Version 2018/05/31 - Changelog: https://github.com/linuxserver/docker-letsencrypt/commits/master/root/defaults/ssl.conf

session settings

ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;

Diffie-Hellman parameter for DHE cipher suites

ssl_dhparam /config/nginx/dhparams.pem;

ssl certs

ssl_certificate /config/keys/letsencrypt/fullchain.pem;
ssl_certificate_key /config/keys/letsencrypt/privkey.pem;

protocols

ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';

HSTS, remove # from the line below to enable HSTS

#add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;

OCSP Stapling

ssl_stapling on; ssl_stapling_verify on;

certbot

certbot-auto certonly --webroot -w /alidata/www -d 825407762.com -d www.825407762.com

docker

docker run -it -v "$PWD":/mk -w /mk -v "$PWD"/__cicd__/ssl:/etc/letsencrypt certbot/certbot certonly \
--webroot --agree-tos --work-dir /mk --email 82547762@qq.com -d www.825407762.com -d 825407762.com -d hub.825407762.com

location /.well-known {
root /www/certbot;
}
docker run -it -v "$PWD":/mk -w /mk -v "$PWD"/ssl:/etc/letsencrypt certbot/certbot certonly \
--webroot --agree-tos --work-dir /mk --email 82547762@qq.com \
-d ws.bitdata.com.cn \
-d admin.bitdata.com.cn
docker run -it -v "$PWD":/mk -w /mk -v "$PWD"/ssl:/etc/letsencrypt certbot/certbot:v0.25.1 certonly \
--webroot --agree-tos --work-dir /mk --email 82547762@qq.com \
-d m.bitdata.com.cn \
-d api.bitdata.com.cn \
-d www.bitdata.com.cn \
-d download.bitdata.com.cn
-d admin.bitdata.com.cn
docker run -it -v "$PWD":/mk -w /mk -v "$PWD"/ssl:/etc/letsencrypt certbot/certbot:v0.25.1 certonly \
--webroot --agree-tos --work-dir /mk --email 82547762@qq.com \
-d bitdata.com.cn \
-d s1.bitdata.com.cn

# input 输入webroot
/mk

· 2 min read

doc

反向代理

server {
listen 80 default_server;
server_name _;
root /;
}
server {
listen 80;
server_name a.test.zx5435.com;

location / {
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:666;
}
}

location 优先级问题

https://www.cnblogs.com/IPYQ/p/7889399.html
location ^~ /images/ {
# 匹配任何以 /images/ 开头的地址,匹配符合以后,停止往下搜索正则,采用这一条。
}

ali最佳配置

配置解释 http://blog.csdn.net/tjcyjd/article/details/50695922 配置解释 每一行 https://segmentfault.com/a/1190000016385662

log_format  main  '$remote_addr - $remote_user [$time_local] "$request" $http_host '
'$status $request_length $body_bytes_sent "$http_referer" '
'"$http_user_agent" $request_time $upstream_response_time';

ssl最佳配置

https://gist.github.com/fotock/9cf9afc2fd0f813828992ebc4fdaad6f

user  www www;
worker_processes 1;

error_log /alidata/log/nginx/error.log crit;
pid /alidata/server/nginx/logs/nginx.pid;

worker_rlimit_nofile 65535;

events
{
use epoll;
worker_connections 65535;
}


http {
include mime.types;
default_type application/octet-stream;

#charset gb2312;

server_names_hash_bucket_size 128;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
client_max_body_size 8m;

sendfile on;
tcp_nopush on;

keepalive_timeout 60;

tcp_nodelay on;

fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 128k;

gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_vary on;
#limit_zone crawler $binary_remote_addr 10m;
log_format '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
include /alidata/server/nginx/conf/vhosts/*.conf;
}

· One min read

rancher

sudo docker run -d --restart=unless-stopped -p 8080:8080 rancher/server:stable

sudo docker run --rm --privileged \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/rancher:/var/lib/rancher \
rancher/agent:v1.2.9 http://192.168.199.7:8080/v1/scripts/DBB33093FAD05C390C97:1514678400000:g38ksyRrU9Badhb36BjQmckY1j8

· 2 min read

docker run --restart=unless-stopped --name influxdb-1 -d -p 8086:8086 -v $PWD:/var/lib/influxdb influxdb

docker run --restart=unless-stopped --name influxdb-1 -d \ -p 8086:8086 \ -p 8083:8083 -e INFLUXDB_ADMIN_ENABLED=true \ -v $PWD:/var/lib/influxdb influxdb

svc-influxdb:
image: influxdb:1.7.11
ports:
- 8086:8086
environment:
- TZ=utc-8
- INFLUXDB_ADMIN_USER=root
- INFLUXDB_ADMIN_PASSWORD=root
- INFLUXDB_DB=iothub
- INFLUXDB_HTTP_ENABLED=true
- INFLUXDB_HTTP_AUTH_ENABLED=true

常用命令

influx
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -database gimc-perf
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -database gimc-perf -precision rfc3339

auth
show users
show databases
# show tables
show MEASUREMENTS
SHOW MEASUREMENTS ON "gimc-perf"

# 设置time格式
precision rfc3339

# sql https://archive.docs.influxdata.com/influxdb/v1.2/query_language/data_exploration/#the-basic-select-statement
SELECT * FROM "temperature"
SELECT * FROM /.*/ LIMIT 1
SELECT * FROM sensor where "deviceId"='sensor1'
# tz https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
select * from sensor_0s tz('Asia/Shanghai')
select * from sensor_0s tz('Etc/GMT-8')

select

# 查 所有 tag
show tag keys from sensor_0s;
# 查 tag 下的 name
show tag values from sensor_0s with key="SNO";
# 查时间线
SHOW SERIES ON "gimc-perf" from sensor_0s


# =~/给定字段/ 包含指定字段的
select * from test where monitor_name=~/^app/;

# fill fill(100) fill(previous) fill(linear)
SELECT MAX("water_level") FROM "h2o_feet" WHERE location = 'coyote_creek' GROUP BY time(12m) fill(previous);

# export
influxd backup -database gimc-perf -host ts-uf668p5xos953ygfo.influxdata.tsdb.aliyuncs.com:8088 -username grundfos -password Ab123456 -start 2023-08-15T20:00:00Z -end 2023-08-15T20:10:00Z ts

copy into

select SNO,c,d,e from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;
select SNO,c,d,e into sensor_test from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;

select SNO,c,d,e from sensor_test where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 1h;

select SNO='hahahah',c,d,e into sensor_test from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;
select SNO,c,d,e from sensor_test where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 1h;

ddl

# delete table
drop measurement sensor_test

show measurements

import csv

cat /z/data/${tt}.csv | awk -F',' '{printf "sensor_0s,SNO=%s %s=%s %s\n", $3, $1, $4, $2}' >> /z/data/${tt}.sql
# cat /z/data/${tt}.txt | awk -F',' '{printf "sensor_0s,SNO=%s value=%s %s\n", $3, $1, $4, $2}' > /z/data/${tt}.sql
# cat /z/data/${tt}.txt | awk -F',' '{gsub(/value/, $1); printf "sensor_0s,%s\n", $2'} >> /z/data/${tt}.sql
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -import -precision=s -path=datarrr.txt

· 2 min read

infrastructure

portainer 管理

docker run -d --restart=unless-stopped --name portainer -p 1234:9000 -v "/var/run/docker.sock:/var/run/docker.sock" portainer/portainer-ce:2.6.0
docker run -d --restart=unless-stopped --name portainer -p 1234:9000 -v "/var/run/docker.sock:/var/run/docker.sock" portainer/portainer:1.23.2
# //./pipe/docker_engine
# "hosts": ["tcp://0.0.0.0:2375"]
server {
listen 80;
charset utf-8;
server_name uat.docker.manager;
location / {
proxy_pass http://127.0.0.1:1234;

proxy_http_version 1.1;
proxy_set_header Connection "";

proxy_set_header Host $host;
proxy_set_header Scheme $scheme;
}
}

registry

DOCKER_OPTS="--insecure-registry 192.168.1.19:5000"
docker run -d -p 5000:5000 --restart=always --name registry registry:2.6.2

docker push 192.168.199.115:5000/r1
docker rmi 192.168.199.115:5000/r1
docker pull 192.168.199.115:5000/r1

# /var/lib/registry/docker/registry/v2 # tree -L 4

cadvisor

sudo docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/dev/disk/:/dev/disk:ro \
--publish=2345:8080 \
--detach=true \
--name=cadvisor \
google/cadvisor:v0.28.3

db

mysql

docker run --restart=unless-stopped --name mysql-1 -it -d \
-v "$PWD":/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -e TZ=Asia/Shanghai mysql:8.0.15 \
--character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci
# --character-set-server=utf8 --collation-server=utf8_general_ci

redis

docker run --restart=unless-stopped --name redis-1 -d -p 6379:6379 redis:6.0.3-alpine
docker run --restart=unless-stopped --name redis-1 -d -p 6379:6379 redis:3.2.9-alpine
docker run --name some-redis -d redis:alpine
docker run -it --link some-redis:redis --rm redis:alpine redis-cli -h 139.196.14.14 -p 6379

docker run --restart=unless-stopped -v "$PWD/redis.conf":/usr/local/etc/redis/redis.conf -v "$PWD":/data --name redis-2 -d -p 6379:6379 redis:3.2.9-alpine redis-server /usr/local/etc/redis/redis.conf

redis-server --requirepass 12345

mongodb

docker run --restart=unless-stopped --name mongo-1 -d -p 27017:27017 -v "$PWD":/etc/mongo mongo:3.6.4
# 常用命令
show dbs # 全部db
use test # 进database
db # 查看当前database
db.stats(); # 显示当前db状态
db.dropDatabase(); #删除当前使用数据库

# user
show users; # 显示当前所有用户
db.createUser({user:"ynh-test",pwd:"ynh-test",roles:[{role:"userAdmin",db:"ynh-test"}]}); # 创建用户
db.removeUser("userName"); # 删除用户

db.tb_test.insert({"_id":"520","name":"xiaoming"})
db.tb_test.find();

phpmyadmin

docker run --restart=unless-stopped --name pmd -d -p 33060:80 phpmyadmin/phpmyadmin:4.7
-e PMA_HOST=139.196.14.10
vi /etc/phpmyadmin/config.user.inc.php
supervisorctl restart all

other

zentao 禅道

docker run -d -p 8880:80 \
-e USER="root" -e PASSWD="password" \
-e BIND_ADDRESS="false" \
-e SMTP_HOST="163.177.90.125 smtp.exmail.qq.com" \
-v "$PWD":/opt/zbox/ \
--name zentao-server \
idoop/zentao:latest

· One min read

docker-compose

docker-compose ps

docker-compose logs --tail=100 -f
docker-compose logs --tail=100 -f svc-web

docker-compose restart svc-web

install

# https://docs.docker.com/compose/install/#install-compose
yum install -y docker-compose
sudo curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose --version

tpl

java + redis + influxdb

version: "3"

services:

svc-web:
image: ghcr.io/wolanx/iothub-echo
ports:
- 1883:1883
- 18830:8080
entrypoint: java -cp /app/resources:/app/classes:/app/libs/* com.wolanx.echo.iothub.IotHubApplication
volumes:
- .:/root
environment:
- TZ=utc-8
- JVM=-XX:+UseContainerSupport
- REDIS_HOST=svc-redis
- INFLUXDB_HOST=svc-influxdb
networks:
- mynet

svc-redis:
image: redis:6.0.3-alpine
ports:
- 6379:6379
networks:
- mynet

svc-influxdb:
image: influxdb:1.7.11
ports:
- 8086:8086
environment:
- TZ=utc-8
- INFLUXDB_ADMIN_USER=root
- INFLUXDB_ADMIN_PASSWORD=root
- INFLUXDB_DB=iothub
- INFLUXDB_HTTP_ENABLED=true
- INFLUXDB_HTTP_AUTH_ENABLED=true
networks:
- mynet

networks:
mynet:

loki + grafana

version: "3"

services:

svc-loki:
image: grafana/loki:2.4.0
ports:
- "3100:3100"
- "9095:9095"
command: -config.file=/etc/loki/local-config.yaml
networks:
- loki

svc-grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
networks:
- loki

networks:
loki:

· 2 min read

doc

install

centos

sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
sudo yum install -y docker-ce

debian

# Debian Bullseye 11 (stable)
# Debian Buster 10 (oldstable)
# https://docs.docker.com/engine/install/debian/
apt-get install ca-certificates curl gnupg lsb-release
mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin

开机启动

systemctl status docker
systemctl enable docker

service docker restart
kill -SIGHUP $(pidof dockerd)

config

cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": [
"https://registry.docker-cn.com"
],
"log-opts": {"max-size": "500m", "max-file": "2"}
}
EOF
{
"debug": true,
"registry-mirrors": [
"https://registry.docker-cn.com"
],
"log-driver": "loki",
"log-opts": {
"max-size": "500m",
"max-file": "2",
"loki-url": "http://192.168.2.238:3100/loki/api/v1/push"
}
}

root

docker exec -it --user=root 114 sh

timezone 时区问题

# docker-compose.yml
environment:
- TZ=utc-8

# k8s.yml
env:
- name: TZ
value: "utc-8"

# dpkg-reconfigure -f noninteractive tzdata
apk add tzdata --no-cache \
&& ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone

ops - maintain

log

# 查看log大小
docker ps -q | xargs docker inspect --format="{{.LogPath}}" | xargs ls -lh

# nginx forward
RUN ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log

prune

docker system prune -f

docker container prune -a --filter "until=72h"
docker volume prune --filter "label!=keep"
docker network prune --filter "until=24h"

docker image prune
docker image prune --filter "dangling=true"
docker image prune -a --filter "until=72h"
docker rmi $(docker images | grep "gimc-code" | tail -n +20 | awk '{print $3}')

image proxy

echo $CR_PAT | docker login ghcr.io -u zx5435 --password-stdin
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.16.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.16.0 gcr.io/kubernetes-helm/tiller:v2.16.0

network

  • iptables -t nat -L DOCKER -n --line-numbers
  • iptables -nL -t nat

tools

ctop - container-top

# https://github.com/bcicen/ctop
sudo wget https://github.com/bcicen/ctop/releases/download/v0.7.7/ctop-0.7.7-linux-amd64 -O /usr/local/bin/ctop
sudo chmod +x /usr/local/bin/ctop

· 2 min read

2019-04-04 zst php:7.1.11

docker pull zx5435/php:7.1.10

All version is small about 100mb, quick to download and running fast. They are cover my work in everywhere, you can try it. include:

  • mysql
  • postgres
  • redis
  • mongo
  • apcu
  • gd
  • xdebug
  • bcmath
  • zip
FROM php:7.1.11-fpm-alpine

RUN apk add --no-cache freetype libpng libjpeg-turbo freetype-dev libpng-dev libjpeg-turbo-dev \
&& apk add --no-cache --virtual .build-deps autoconf g++ libssh2 openssl openssl-dev make pcre-dev tree curl \
&& apk add --no-cache postgresql-dev \
&& docker-php-ext-configure gd \
--with-gd \
--with-freetype-dir=/usr/include/ \
--with-png-dir=/usr/include/ \
--with-jpeg-dir=/usr/include/ \
&& pecl install mongodb-1.5.2 redis apcu xdebug \
&& docker-php-ext-enable mongodb redis apcu xdebug \
&& docker-php-ext-install gd pdo_mysql opcache bcmath pgsql pdo_pgsql zip sockets \
&& apk del .build-deps \
&& pecl clear-cache \
&& docker-php-source delete

RUN curl https://getcomposer.org/composer.phar -o /usr/local/bin/composer \
&& chmod +x /usr/local/bin/composer \
&& mkdir -p /var/runtime && chmod -R 777 /var/runtime \
&& alias ll='ls -l'

# COPY __cicd__/php/php.ini /usr/local/etc/php/
# COPY __cicd__/php/www.conf /usr/local/etc/php-fpm.d/
# docker build -f Dockerfile.php -t zx5435/php:7.1.11 .

2019-03-07 amqp error

librabbitmq
pecl install amqp
composer config -g repo.packagist composer https://packagist.phpcomposer.com

2018-10-09 bitdata php:7.1.10

FROM php:7.1.10-fpm-alpine

RUN apk add --no-cache freetype libpng libjpeg-turbo freetype-dev libpng-dev libjpeg-turbo-dev \
&& apk add --no-cache --virtual .build-deps autoconf g++ libssh2 openssl openssl-dev make pcre-dev \
&& apk add --no-cache postgresql-dev \
&& docker-php-ext-configure gd \
--with-gd \
--with-freetype-dir=/usr/include/ \
--with-png-dir=/usr/include/ \
--with-jpeg-dir=/usr/include/ \
&& pecl install mongodb-1.5.2 redis apcu xdebug \
&& docker-php-ext-enable mongodb redis apcu xdebug \
&& docker-php-ext-install gd pdo_mysql opcache bcmath pgsql pdo_pgsql zip \
&& apk del .build-deps \
&& pecl clear-cache \
&& docker-php-source delete

RUN curl https://getcomposer.org/composer.phar -o /usr/local/bin/composer \
&& chmod +x /usr/local/bin/composer \
&& mkdir -p /var/runtime && chmod -R 777 /var/runtime

# COPY __cicd__/php/php.ini /usr/local/etc/php/
# COPY __cicd__/php/www.conf /usr/local/etc/php-fpm.d/
# docker build -f __cicd__/php/Dockerfile.runtime -t zx5435/php:7.1.10 .