Skip to main content

45 posts tagged with "linux"

View All Tags

· One min read

kafka

quick demo

# session 1
kafka-console-producer --broker-list localhost:32773 --topic chat
# session 2 sync from 1
kafka-console-consumer --broker-server localhost:32773 --topic chat --form-beginning

deploy

docker run -d --name zookeeper -p 2181:2181 zookeeper
docker run -d --name kafka -p 9092:9092 \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=192.168.31.229:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=192.168.31.229 \
--env KAFKA_ADVERTISED_PORT=9092 \
wurstmeister/kafka

old

#第二步: 启动服务
bin/zookeeper-server-start.sh config/zookeeper.properties
bin/kafka-server-start.sh config/server.properties

#第三步: 新建一个话题Topic
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
bin/kafka-topics.sh --list --zookeeper localhost:2181

#第四步: 发送消息
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

#第五步: 消费消息
bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

#info
bin/kafka-topics.sh --describe --zookeeper localhost:2181
bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test

· One min read

参考 https://runnerlee.com/2017/08/18/influxdb-telegraf-grafana-monitor

全部配置

docker run --rm telegraf:1.4-alpine telegraf config

influxdb curd

curl -XPOST "http://influxdb:8086/query" --data-urlencode "q=CREATE DATABASE telegraf"

curl -G "http://localhost:8086/query?pretty=true" --data-urlencode "db=mydb" \
--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d"

curl -G "influxdb:8086/query?pretty=true" --data-urlencode "db=telegraf" \
--data-urlencode "q=SELECT * FROM nginx"
curl -G "influxdb:8086/query?pretty=true" --data-urlencode "db=telegraf" \
--data-urlencode "q=show MEASUREMENTS"

· 3 min read

using

    logging:
driver: syslog
options:
syslog-address: 'tcp://10.1.0.123:5000'

修改密码 cant cat /etc/kibana/kibana.yml | grep -B 2 password

sed -ie 's/#elasticsearch.username: "user"/elasticsearch.username: "admin"/g' /etc/kibana/kibana.yml sed -ie 's/#elasticsearch.password: "pass"/elasticsearch.password: "12341234"/g' /etc/kibana/kibana.yml

elk 完整版

login

http://elk.bothub.ai/elk/
user
iruVkQ7L

sender filebeat

install

https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html

conf

vi /etc/filebeat/filebeat.yml

filebeat.prospectors:

- input_type: log
document_type: js_error
paths:
- /var/log/nginx/tracking.log

- input_type: log
document_type: laravel_error
paths:
- /var/www/rapture-api/storage/logs/laravel-error-*.log

output.logstash:
enabled: true
hosts: ["10.140.0.3:5044"]

restart

// test ok
filebeat.sh -configtest -e
/etc/init.d/filebeat restart

getter elk

docker

// 修改进程数
sysctl -w vm.max_map_count=262144
sysctl vm.max_map_count

// docker image sebp/elk:540
docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --name elk sebp/elk:540

server {
listen 80;

location / {
## 密码 http auth 密码生成
# htpasswd -c .espasswd user
# cat .espasswd
# user:$apr1$Siq.2MpE$GREX96Q0RgpAYBnB67kKf0
auth_basic "Protected Kibana";
auth_basic_user_file /.espasswd;
proxy_pass http://kibana:5601;
}
}

query

type:nginx_access AND agent: '' -GoogleHC
type:js_error AND err_json.project:"rapture-admin-fe"
fields.appid: 'bitdata-web_php' AND fields.scope:'error'

conf

cd /etc/logstash/conf.d
/opt/bitnami/ctlscript.sh restart logstash

input
{
beats
{
ssl => false
host => "0.0.0.0"
port => 5044
}
gelf
{
host => "0.0.0.0"
port => 12201
}
http
{
ssl => false
host => "0.0.0.0"
port => 8888
}
tcp
{
mode => "server"
host => "0.0.0.0"
port => 5010
}
udp
{
host => "0.0.0.0"
port => 5000
}
}

filter {
if [type] == "nginx_access" {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
if [type] == "laravel_error" {
grok {
match => { "message" => "\[%{TIMESTAMP_ISO8601:my_logdate}\] %{DATA:env}\.%{DATA:severity}: %{DATA:message_old}$" }
}
mutate {
rename => {
"@timestamp" => "read_timestamp"
"message_old" => "message"
}
}
date {
match => [ "my_logdate", "yyyy-MM-dd HH:mm:ss" ]
remove_field => "my_logdate"
timezone => "Asia/Shanghai"
}
}
if [type] == "js_error" {
grok {
match => { "message" => "\] \"%{DATA:request}\" \"%{DATA:agent}\" \"%{DATA:extra_fields}\"$" }
}
mutate {
gsub => [
"extra_fields", "\"","",
"extra_fields", "\\x0A","",
"extra_fields", "\\x22",'\"',
"extra_fields", "(\\)",""
]
}
json {
source => "extra_fields"
target => "err_json"
remove_field => ["message", "extra_fields"]
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
}

output
{
// file { path => "/log_test/test-%{type}-%{+YYYY.MM.dd}.log" } // 调试用
if "_grokparsefailure" in [tags] {
file { path => "/log_test/error-%{type}-%{+YYYY.MM.dd}.log" }
}

elasticsearch
{
hosts => ["localhost"]
index => "logstash-%{+YYYY.MM.dd}"
}

}

· One min read

rancher

sudo docker run -d --restart=unless-stopped -p 8080:8080 rancher/server:stable

sudo docker run --rm --privileged \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/rancher:/var/lib/rancher \
rancher/agent:v1.2.9 http://192.168.199.7:8080/v1/scripts/DBB33093FAD05C390C97:1514678400000:g38ksyRrU9Badhb36BjQmckY1j8

· 2 min read

gpio-pin.png

gpio.py
# pip install rpi.gpio
import time

import RPi.GPIO as GPIO

# https://blog.csdn.net/guzhong10/article/details/80119322

# gpio mode 0 out
# gpio write 0 1



GPIO.setwarnings(False)

GPIO_PIN = 26

GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.OUT)

# L_Motor = GPIO.PWM(GPIO_PIN, 100)
# L_Motor.start(0)

while True:
GPIO.output(GPIO_PIN, GPIO.HIGH)
time.sleep(.5)
print(GPIO.input(GPIO_PIN))

# while True:
# GPIO.output(i, 1)
# time.sleep(1)
# GPIO.output(i, 0)
# time.sleep(1)
run.py
import time

import RPi.GPIO as GPIO

GPIO.setmode(GPIO.BCM) # 设置引脚的编码方式

delay = 5 # delay 2ms

pin_1 = 6
pin_2 = 13
pin_3 = 19
pin_4 = 26


def init():
GPIO.setwarnings(False)
GPIO.setup(pin_1, GPIO.OUT)
GPIO.setup(pin_2, GPIO.OUT)
GPIO.setup(pin_3, GPIO.OUT)
GPIO.setup(pin_4, GPIO.OUT)


def forward(delay):
setStep(1, 0, 0, 0)
time.sleep(delay)
setStep(0, 1, 0, 0)
time.sleep(delay)
setStep(0, 0, 1, 0)
time.sleep(delay)
setStep(0, 0, 0, 1)
time.sleep(delay)


def setStep(w1, w2, w3, w4):
# print(w1, w2, w3, w4)
GPIO.output(pin_1, w1)
GPIO.output(pin_2, w2)
GPIO.output(pin_3, w3)
GPIO.output(pin_4, w4)


init()
while True:
forward(delay / 1000.0)
test.py
import time

import RPi.GPIO as GPIO

delay = 5 # delay 2ms

pin_1 = 6
pin_2 = 13
pin_3 = 19
pin_4 = 26
pin_1 = 26
pin_2 = 19
pin_3 = 13
pin_4 = 6

GPIO.setmode(GPIO.BCM) # 设置引脚的编码方式


def init():
GPIO.setwarnings(False)
GPIO.setup(pin_1, GPIO.OUT)
GPIO.setup(pin_2, GPIO.OUT)
GPIO.setup(pin_3, GPIO.OUT)
GPIO.setup(pin_4, GPIO.OUT)


def forward(delay):
setStep(1, 0, 0, 0)
time.sleep(delay)
setStep(0, 1, 0, 0)
time.sleep(delay)
setStep(0, 0, 1, 0)
time.sleep(delay)
setStep(0, 0, 0, 1)
time.sleep(delay)


def setStep(w1, w2, w3, w4):
# print(w1, w2, w3, w4)
GPIO.output(pin_1, w1)
GPIO.output(pin_2, w2)
GPIO.output(pin_3, w3)
GPIO.output(pin_4, w4)


def main():
init()
while True:
forward(delay / 1000.0)


main() # 调用main

· One min read
echo $(docker-compose exec php ps -ef | grep crond)
docker-compose exec -T php crond -l 0 -L /var/runtime/crontab.log

docker-compose exec php crontab environments/$1/test/crontab

docker-compose exec php crontab -l

base

# min   hour    day     month   weekday command
*/15 * * * * run-parts /etc/periodic/15min
0 * * * * run-parts /etc/periodic/hourly
0 2 * * * run-parts /etc/periodic/daily
0 3 * * 6 run-parts /etc/periodic/weekly
0 5 1 * * run-parts /etc/periodic/monthly

常用

# prod
YII=/www/yii
LOG_DIR=/www/console/runtime/crontab

# min hour day month weekday command
*/1 * * * * php $YII zhao/test/hello >> $LOG_DIR/test.log 2>&1

· 5 min read

性能指标

怀疑CPU

  • sar -u
  • sar -q

怀疑内存

  • sar -B
  • sar -r
  • sar -W

怀疑I/O

  • sar -b
  • sar -u
  • sar -d

install

sudo apt-get -y install sysstat

sar -u 1 5

输出CPU使用情况的统计信息

Linux 4.4.0-1022-aws (aws-sandbox)  12/28/2017  _x86_64_    (4 CPU)

07:15:23 AM CPU %user %nice %system %iowait %steal %idle
07:15:24 AM all 46.95 0.00 3.05 0.00 0.00 50.00
07:15:25 AM all 15.86 0.00 2.30 0.00 0.00 81.84
07:15:26 AM all 10.66 0.00 0.51 0.00 0.00 88.83
07:15:27 AM all 20.67 0.00 0.52 0.00 0.00 78.81
07:15:28 AM all 12.12 0.00 0.00 0.00 0.25 87.63
Average: all 21.25 0.00 1.27 0.00 0.05 77.42
CPU      all 表示统计信息为所有 CPU 的平均值
%user 显示在用户级别(application)运行使用 CPU 总时间的百分比
%nice 显示在用户级别,用于nice操作,所占用 CPU 总时间的百分比
%system 在核心级别(kernel)运行所使用 CPU 总时间的百分比
%iowait 显示用于等待I/O操作占用 CPU 总时间的百分比
%steal 管理程序(hypervisor)为另一个虚拟进程提供服务而等待虚拟 CPU 的百分比
%idle 显示 CPU 空闲时间占用 CPU 总时间的百分比
若 %iowait 的值过高,表示硬盘存在I/O瓶颈
若 %idle 的值高但系统响应慢时,有可能是 CPU 等待分配内存,此时应加大内存容量
若 %idle 的值持续低于 10,则系统的 CPU 处理能力相对较低,表明系统中最需要解决的资源是 CPU

sar –q 1 5

查看平均负荷

Linux 4.4.0-1022-aws (aws-sandbox)  12/28/2017  _x86_64_    (4 CPU)

07:26:52 AM runq-sz plist-sz ldavg-1 ldavg-5 ldavg-15 blocked
07:26:53 AM 6 2121 2.00 1.97 1.56 0
07:26:54 AM 0 2117 2.48 2.07 1.60 0
07:26:55 AM 0 2117 2.48 2.07 1.60 0
07:26:56 AM 1 2117 2.48 2.07 1.60 0
07:26:57 AM 3 2117 2.48 2.07 1.60 0
Average: 2 2118 2.38 2.05 1.59 0
runq-sz     运行队列的长度(等待运行的进程数) Run queue length (number of tasks waiting for run time)
plist-sz 进程列表中进程(processes)和线程(threads)的数量 Number of tasks in the task list.
ldavg-1 最后1分钟的系统平均负载(System load average)
ldavg-5 过去5分钟的系统平均负载
ldavg-15 过去15分钟的系统平均负载

sar -r 1 5

内存和交换空间监控

Linux 4.4.0-1022-aws (aws-sandbox)  12/28/2017  _x86_64_    (4 CPU)

07:31:24 AM kbmemfree kbmemused %memused kbbuffers kbcached kbcommit %commit kbactive kbinact kbdirty
07:31:25 AM 604560 15826960 96.32 2490348 2803236 25069280 152.57 11571660 2561176 268
07:31:26 AM 604440 15827080 96.32 2490348 2803236 25069280 152.57 11571984 2561176 268
07:31:27 AM 604440 15827080 96.32 2490348 2803236 25069280 152.57 11571984 2561176 268
07:31:28 AM 604440 15827080 96.32 2490348 2803236 25069280 152.57 11572008 2561176 268
07:31:29 AM 604316 15827204 96.32 2490348 2803236 25069280 152.57 11572308 2561176 268
Average: 604439 15827081 96.32 2490348 2803236 25069280 152.57 11571989 2561176 268
kbmemfree           这个值和free命令中的free值基本一致,所以它不包括buffer和cache的空间
kbmemused 这个值和free命令中的used值基本一致,所以它包括buffer和cache的空间
%memused 这个值是kbmemused和内存总量(不包括swap)的一个百分比
kbbuffers和kbcached 这两个值就是free命令中的buffer和cache
kbcommit 保证当前系统所需要的内存,即为了确保不溢出而需要的内存(RAM+swap)
%commit 这个值是kbcommit与内存总量(包括swap)的一个百分比

sar -b 1 5

显示I/O和传送速率的统计信息

Linux 4.4.0-1022-aws (aws-sandbox)  12/28/2017  _x86_64_    (4 CPU)

07:21:50 AM tps rtps wtps bread/s bwrtn/s
07:21:51 AM 4.00 0.00 4.00 0.00 72.00
07:21:52 AM 0.00 0.00 0.00 0.00 0.00
07:21:53 AM 7.00 0.00 7.00 0.00 320.00
07:21:54 AM 0.00 0.00 0.00 0.00 0.00
07:21:55 AM 1.00 0.00 1.00 0.00 32.00
Average: 2.40 0.00 2.40 0.00 84.63
tps     每秒钟物理设备的 I/O 传输总量
rtps 每秒钟从物理设备读入的数据总量
wtps 每秒钟向物理设备写入的数据总量
bread/s 每秒钟从物理设备读入的数据量,单位为 块/s
bwrtn/s 每秒钟向物理设备写入的数据量,单位为 块/s

· 3 min read

sh

常用小技巧

#!/bin/bash
set -o nounset # 变量必须存在
set -o errexit # set -e

# bash -n xxx # 检查语法
set -o verbose # bash -v xxx
set -o xtrace # bash -x xxx

## 函数封装
log () {
local prefix="[$(date +%Y/%m/%d\ %H:%M:%S)]:"
echo "${prefix} $@" >&2
}
log "INFO" "a message"

## 函数封装
ExactBashComments() {
egrep "^#"
}
cat /etc/hosts | ExactBashComments | wc
comments=$(ExactBashComments < /etc/hosts)

## 只读 默认值
readonly DEFAULT_VAL=${DEFAULT_VAL:--99}
echo $DEFAULT_VAL # -99

## if
if [[ 100 > "${DEFAULT_VAL}" ]]; then
echo 222
fi

tsdb => csv => sql => influx => oss

# cd /z/wolanx/GiMC/src/backend/temp/mig
# chcp.com 65001
# sh ./db2csv.sh
# nohup bash all.sh > ~/1.log 2>&1 &
set -e

tt="20201231"
tz="20200101"
while [[ "${tt}" -ge "${tz}" ]];
do
#tt="20220502"
echo "start ${tt}"

python3 db2csv.py $tt
python2 /root/datax-wolanx/bin/datax.py /root/datax-wolanx/mig/${tt}.json > /dev/null
rm ${tt}.json

cat /z/data/${tt}__* > /z/data/${tt}.csv
cat /z/data/${tt}.csv | wc -l
rm /z/data/${tt}__*

echo -e "# DML\n# CONTEXT-DATABASE: gimc-hk" > /z/data/${tt}.sql
cat /z/data/${tt}.csv | awk -F',' '{printf "sensor_0s,SNO=%s %s=%s %s\n", $3, $1, $4, $2}' >> /z/data/${tt}.sql
rm /z/data/${tt}.csv

sed -i '/checkhitsdb/d' /z/data/${tt}.sql
sed -i '/= /d' /z/data/${tt}.sql
sed -i 's/ _/_/g' /z/data/${tt}.sql
influx -ssl -host ts-xxx.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password password -database gimc-hk -import -path=/z/data/${tt}.sql -precision=s

ossutil cp /z/data/${tt}.sql oss://oss-dcc-gimc-tsdb-hk/ds-hk-sql/
rm /z/data/${tt}.sql

echo "end ${tt}"

tt=$(date -d "${tt} -1day" +%Y%m%d)
done

上传文件批量

for ((a=10;a<=245;a++));do
n=`printf "%03d" $a`;
echo $n;
echo $(curl -H 'Content-Type:text/plain' --data-binary @seofile_${n} "http://data.zz.baidu.com/urls?site=www.app-echo.com&token=3iyzwDoYB6IQAMKL");
done

循环机器执行

#! /bin/bash

ips="
172.16.30.13
172.16.30.14
172.16.30.15
172.16.30.25
172.16.30.26
172.16.30.27
"
for ip in $ips
do
echo "do @$ip"
echo "=============================="
# ssh root@$ip "pwd"

ssh root@$ip '#!/bin/sh
echo $LANG
'

# echo $doStr
# ssh root@$ip $doStr

echo "\n\n"
done

久游代码部署

rsync -avl --delete    --exclude "log" --exclude "cli" --exclude "admin" --exclude "caches" --exclude "yaf_config.php" /usr/db/htdocs/au3/beta/ maintain@114.141.159.7:/usr/db/htdocs/au3/
#!/bin/bash
weball="
192.168.1.5
192.168.1.6
192.168.1.7
"
for ip in $weball
do
rsync -avl --delete --exclude "log" --exclude "cli" --exclude "caches" --exclude "upload" --exclude "fileupload" -e ssh /usr/db/htdocs/yaf_aushop/preproduct/ maintain@$ip:/usr/db/htdocs/shop_with_yaf/

echo "maintain@$ip is ok"

done

with ssh

#! /bin/bash

ssh root@172.16.30.15 "pwd"

ssh www@172.16.45.87 '#!/bin/sh
export LC_ALL=C
hostname
cd /srv/wwwroot/app
git branch
git pull
git submodule init
git submodule sync
git submodule update
git status
'

· One min read

安装过程 https://www.jianshu.com/p/7de00c73a2bb

.bash_profile

alias l='ls -la'
alias dc='docker-compose'
alias gs='git status'
alias gd='git diff'

mintty

vi ~/.bash_profile

alias ..='cd ..'
alias tfa='terraform apply --auto-approve'

export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache"

export PATH=$PATH:/d/Toolbox/apps/IDEA-U/ch-0/192.7142.36/jbr/bin

· One min read
tee /etc/locale.conf <<-'EOF'
LANG="en_US.utf8"
#LC_ALL="en_US.utf8"
LC_ALL="C"
EOF

tee /root/.vimrc <<-'EOF'
set encoding=utf-8
set fileencoding=utf-8
EOF

repo 修改源

# mv CentOS-Base.repo CentOS-Base.repo.bak
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all
yum makecache #生产本地缓存
# yum-update

epel 加好多rpm

yum install -y epel-release

常用必装

yum install -y yum-utils    # yum-config-manager --add-repo
yum install -y htop
yum install -y tree
yum install -y net-tools # netstat -ntlp

其他

yum lock

yum-complete-transaction

wifi

dmesg | grep firmware
ip addr show wlo1
wpa_supplicant -B -i wlo1 -c <(wpa_passphrase "iPhone 8" "1234567890")

ssh remote

yum list installed | grep openssh-server
yum install openssh-server

/etc/ssh/sshd_config
PermitRootLogin yes
PasswordAuthentication yes

sudo service sshd start