Skip to main content

9 posts tagged with "db"

View All Tags

· 2 min read

postgres

docker run --restart=unless-stopped --name pg-1 -it -d -v "$PWD":/var/lib/postgresql/data -p 5432:5432 -e POSTGRES_PASSWORD=12341234 postgres:10.5
docker exec -it --user=postgres -w /var/lib/postgresql/data pg-1 sh
psql -d at-dev -U postgres -f 1.sql

pgadmin

docker run --restart=unless-stopped --name pgadmin -d -p 54320:80 -e "PGADMIN_DEFAULT_EMAIL=admin" -e " PGADMIN_DEFAULT_PASSWORD=12341234" dpage/pgadmin4

timescale

docker run --restart=unless-stopped --name timescaledb -it -d -v "$PWD":/var/lib/postgresql/data -p 5432:5432 -e POSTGRES_PASSWORD=12341234 timescale/timescaledb:0.11.0-pg10

connect

docker exec -it timescaledb psql -U postgres

配置

max_connections = 200
fsync = off
shared_buffers = 1GB
work_mem = 10MB
effective_cache_size = 2GB
maintenance_work_mem = 512MB

\x 切换一行行显示

创建数据库

postgres=# CREATE DATABASE exampledb OWNER dbuser;
postgres=# GRANT ALL PRIVILEGES ON DATABASE exampledb to dbuser;
postgres=# \c exampledb;
postgres=# ALTER SCHEMA public OWNER to dbuser;
postgres=# GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO dbuser;
postgres=# GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO dbuser;

ALTER TABLE kline_a RENAME TO kline_b;

救急操作

查看连接

show max_connections;
SELECT COUNT(*)
from pg_stat_activity;
select min_val, max_val
from pg_settings
where name = 'max_connections';

关闭连接

SELECT pg_terminate_backend(pg_stat_activity.procpid)
FROM pg_stat_get_activity(NULL::integer)
WHERE datid = (SELECT oid from pg_database where datname = 'your_database');
show max_connections;
SELECT *
from pg_stat_activity;
select pg_cancel_backend(93); -- kill select
select pg_terminate_backend(93); -- kill DML

切换db \c gis

备份

https://docs.timescale.com/v0.12/using-timescaledb/backup

-- save
pg_dump -U postgres -s --table kline_1d -N _timescaledb_internal | grep -v _timescaledb_internal > kline_1d.sql
psql -U postgres -c "\COPY (SELECT * FROM kline_1d) TO kline_1d.csv DELIMITER ',' CSV"

-- import
psql -U postgres -c "\COPY kline_1d FROM kline_1d.csv CSV"

explain

explain analyze select * from a;

· 3 min read

install

docker run -d -p 9200:9200 -p 9300:9300 --name es-1 elasticsearch:5.6.16-alpine
# 查询tool
docker run -d -p 5601:5601 --name kibana --link es-1:elasticsearch kibana:5.6.16
# 查询tool sql转es
docker run -d -p 9800:9800 --name es-hd --link es-1:demo containerize/elastichd

doc

http://how2j.cn/k/search-engine/search-engine-index-manage/1694.html

http api

http://<url>:9200/<index>/<type>/<id>
curl http://localhost:9200/db_name/table_name/a
curl http://localhost:9200/db_name/table_name/_search?q=sql

htop

#磁盘
curl http://localhost:9200/_cat/shards?v
curl http://localhost:9200/_cat/allocation?v

help

/_cat/
/_cat/indices?v

kibana操作

# 查询 排序
GET /how2java/_search
{
"query": { "match_all": {} },
"sort": [
{ "price": "desc" }
]
}
# 部分字段
GET /how2java/_search
{
"query":{"match_all":{}},
"_source":["name","price"]
}
# 条件
GET /how2java/_search
{
"query":{"match":{"name":"时尚连衣裙"}}
}

# 聚合 select count(*),place from product group by place limit 0,3
GET /how2java/_search
{
"size": 0,
"aggs": {
"group_by_place": {
"terms": {
"field": "place.keyword",
"size": 3
}
}
}
}

curl操作

# 1 加索引 其实就是database
curl -X PUT http://172.16.30.13:9200/how2java?pretty # 添加
curl http://172.16.30.13:9200/_cat/indices?v # 1 检查索引
curl -X DELETE http://172.16.30.13:9200/how2java?pretty # 1 删除索引

# 2 装插件 分词
elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v5.6.16/elasticsearch-analysis-ik-5.6.16.zip
## 检查插件
curl -s http://172.16.30.13:9200/_analyze -H 'content-type: application/json' -d '{
"analyzer":"ik_max_word",
"text":"护眼带光源"
}' | jq

# 添加
curl -s -X PUT http://172.16.30.13:9200/how2java/product/1?pretty -H 'content-type: application/json' -d '{
"name": "蜡烛"
}' | jq
# 查询
curl http://172.16.30.13:9200/how2java/product/1?pretty
# 修改 添加覆盖用
## POST /how2java/product/1/_update
# 删除单个
curl -X DELETE http://172.16.30.13:9200/how2java/product/1?pretty

# 批量添加
curl -s -X POST http://172.16.30.13:9200/_bulk -H 'content-type: application/json' -d '
{"index":{"_index":"how2java","_type":"product","_id":10001}}
{"code":"540785126782","price":398,"name":"房屋卫士自流平美缝剂瓷砖地砖专用双组份真瓷胶防水填缝剂镏金色","place":"上海","category":"品质建材"}
{"index":{"_index":"how2java","_type":"product","_id":10002}}
{"code":"24727352473","price":21.799999237060547,"name":"艾瑞泽手工大号小号调温热熔胶枪玻璃胶枪硅胶条热溶胶棒20W-100W","place":"山东青岛","category":"品质建材"}
{"index":{"_index":"how2java","_type":"product","_id":10003}}'

# 批量文件导入 http://how2j.cn/k/search-engine/search-engine-curl-batch/1704.html
## pan.baidu source_java/products_16w条elasticsearch.json.rar
curl -X POST http://172.16.30.13:9200/how2java/product/_bulk?refresh -H 'content-type: application/json' --data-binary "@products.json"

# search
## 部分字段 "_source": ["name","price"]
## 条件 "query": { "match_all": {} },
## 条件 "query": { "match": { "name": "时尚连衣裙" } },
## 分页 "from": 1, "size": 3,
curl -s http://172.16.30.13:9200/how2java/_search -d '{
"query": { "match": { "name": "时尚连衣裙" } },
"sort": [
{ "price": "desc" }
],
"_source": ["name","price"]
}' | jq

# 清空索引
curl -X DELETE http://localhost:9200/_all

curl -X POST http://localhost:9200/_delete_by_query -d '{
"query": {
"match_all": {}
}
}'

· One min read

redis

stream

redis-cli -h 10.231.9.159 -p 32621
xrange x:topic:test - + count 10
xadd x:topic:test * name youming age 60

# 控制长度
xadd x:topic:test maxlen ~ 100 * name youming age 60
xtrim x:topic:test maxlen ~ 100

xlen x:topic:test

java stream

public static void main(String[] args) {
Jedis r = new Jedis("x.x.x.x", 6379);
r.select(11);

XReadGroupParams p = XReadGroupParams.xReadGroupParams().count(20).noAck();
Map<String, StreamEntryID> entry = Collections.singletonMap("jdddddd", StreamEntryID.UNRECEIVED_ENTRY);

List<Map.Entry<String, List<StreamEntry>>> entries = r.xreadGroup("gggn", "cccn", p, entry);

if (entries != null) {
for (Map.Entry<String, List<StreamEntry>> stringListEntry : entries) {
for (StreamEntry sEntry : stringListEntry.getValue()) {
Map<String, String> fields = sEntry.getFields();
System.out.println(fields);
}
}
}
}

· 2 min read

docker run --restart=unless-stopped --name influxdb-1 -d -p 8086:8086 -v $PWD:/var/lib/influxdb influxdb

docker run --restart=unless-stopped --name influxdb-1 -d \ -p 8086:8086 \ -p 8083:8083 -e INFLUXDB_ADMIN_ENABLED=true \ -v $PWD:/var/lib/influxdb influxdb

svc-influxdb:
image: influxdb:1.7.11
ports:
- 8086:8086
environment:
- TZ=utc-8
- INFLUXDB_ADMIN_USER=root
- INFLUXDB_ADMIN_PASSWORD=root
- INFLUXDB_DB=iothub
- INFLUXDB_HTTP_ENABLED=true
- INFLUXDB_HTTP_AUTH_ENABLED=true

常用命令

influx
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -database gimc-perf
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -database gimc-perf -precision rfc3339

auth
show users
show databases
# show tables
show MEASUREMENTS
SHOW MEASUREMENTS ON "gimc-perf"

# 设置time格式
precision rfc3339

# sql https://archive.docs.influxdata.com/influxdb/v1.2/query_language/data_exploration/#the-basic-select-statement
SELECT * FROM "temperature"
SELECT * FROM /.*/ LIMIT 1
SELECT * FROM sensor where "deviceId"='sensor1'
# tz https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
select * from sensor_0s tz('Asia/Shanghai')
select * from sensor_0s tz('Etc/GMT-8')

select

# 查 所有 tag
show tag keys from sensor_0s;
# 查 tag 下的 name
show tag values from sensor_0s with key="SNO";
# 查时间线
SHOW SERIES ON "gimc-perf" from sensor_0s


# =~/给定字段/ 包含指定字段的
select * from test where monitor_name=~/^app/;

# fill fill(100) fill(previous) fill(linear)
SELECT MAX("water_level") FROM "h2o_feet" WHERE location = 'coyote_creek' GROUP BY time(12m) fill(previous);

# export
influxd backup -database gimc-perf -host ts-uf668p5xos953ygfo.influxdata.tsdb.aliyuncs.com:8088 -username grundfos -password Ab123456 -start 2023-08-15T20:00:00Z -end 2023-08-15T20:10:00Z ts

copy into

select SNO,c,d,e from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;
select SNO,c,d,e into sensor_test from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;

select SNO,c,d,e from sensor_test where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 1h;

select SNO='hahahah',c,d,e into sensor_test from sensor_0s where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 5m;
select SNO,c,d,e from sensor_test where SNO = 'iot-echo-changqing-heatex_1bu' and time > now() - 1h;

ddl

# delete table
drop measurement sensor_test

show measurements

import csv

cat /z/data/${tt}.csv | awk -F',' '{printf "sensor_0s,SNO=%s %s=%s %s\n", $3, $1, $4, $2}' >> /z/data/${tt}.sql
# cat /z/data/${tt}.txt | awk -F',' '{printf "sensor_0s,SNO=%s value=%s %s\n", $3, $1, $4, $2}' > /z/data/${tt}.sql
# cat /z/data/${tt}.txt | awk -F',' '{gsub(/value/, $1); printf "sensor_0s,%s\n", $2'} >> /z/data/${tt}.sql
influx -ssl -host ts-uf68z3on142991o8b.influxdata.tsdb.aliyuncs.com -port 8086 -username grundfos -password Ab123456 -import -precision=s -path=datarrr.txt

· 2 min read

doc

时区

select now(); set global time_zone = '+8:00';

commit 查看未提交事务

https://blog.csdn.net/u013235478/article/details/68062939 select trx_state, trx_started, trx_mysql_thread_id, trx_query from information_schema.innodb_trx; show global variables LIKE '%wait_timeout%';

processlist

docker

docker run --restart=unless-stopped --name mysql-1 -it -d \
-v "$PWD":/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=12341234 -e TZ=Asia/Shanghai mysql:8.0.15 \
--character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci
docker run --restart=unless-stopped --name mysql-1 -it -d \
-v "$PWD":/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=12341234 -e TZ=Asia/Shanghai mysql:8.0.15 \
--character-set-server=utf8 --collation-server=utf8_general_ci
docker run --restart=unless-stopped --name mysql-1 -it -d \
-v "$PWD":/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=12341234 -e TZ=Asia/Shanghai mysql:5.7.25 \
--character-set-server=utf8 --collation-server=utf8_general_ci

# 导入 sql
docker exec -i gimclocal_svc-mysql-1_1 sh -c 'exec mysql -uroot -p"1234@1234"' < config/20221027.sql

# 加user https://www.cnblogs.com/xujishou/p/6306765.html
CREATE DATABASE IF NOT EXISTS `{$bundle}-app-test` default charset utf8 COLLATE utf8_general_ci;
grant all privileges on `{$bundle}-app-test`.* to `{$bundle}-app`@'%' identified by 'Abc.1234';
grant select on `gimc-prod`.* to `gtest`@'%';
grant select on `gimc-prod\_%`.* to `gtest`@'%';

# 修改时区
cp /usr/share/zoneinfo/GMT /etc/localtime
cp /usr/share/zoneinfo/PRC /etc/localtime
docker cp /etc/localtime mysql-1:/etc/localtime
docker cp /usr/share/zoneinfo/PRC mysql-1:/etc/localtime

SHOW VARIABLES LIKE 'character%';

# cnf
/etc/mysql/mysql.conf.d/mysqld.cnf
/etc/mysql/my.cnf

show tables;
show table status; # 大小行数more

table

CREATE TABLE `table_demo`
(
`id` int(11) NOT NULL AUTO_INCREMENT,
`status` tinyint(1) NOT NULL DEFAULT '1' COMMENT '状态',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB
DEFAULT CHARSET = utf8 COMMENT ='参考';

optmize + repair

#!/bin/bash

host_name=localhost
user_name=haohe_club
user_pwd=iy9nPKJV
database=club
need_optmize_table=true
tables=$(mysql -h$host_name -u$user_name -p$user_pwd $database -A -Bse "show tables")

for table_name in $tables
do
check_result=$(mysql -h$host_name -u$user_name -p$user_pwd $database -A -Bse "check table $table_name" | awk '{ print $4 }')
if [ "$check_result" = "OK" ]
then
echo "It's no need to repair table $table_name"
else
echo $(mysql -h$host_name -u$user_name -p$user_pwd $database -A -Bse "repair table $table_name")
fi

# 优化表,可提高性能
if [ $need_optmize_table = true ]
then
echo $(mysql -h$host_name -u$user_name -p$user_pwd $database -A -Bse "optimize table $table_name")
fi
done

· 10 min read
  • MySQL 之 Explain 输出分析 https://mp.weixin.qq.com/s/yOZ3WglZMZJaV9H7ruzA6g id 查询语句的标识 select_type 查询的类型 table 当前行所查的表 partitions 匹配的分区 type 访问类型 possible_keys 查询可能用到的索引 key mysql 决定采用的索引来优化查询 key_len 索引 key 的长度 ref 显示了之前的表在key列记录的索引中查找值所用的列或常量 rows 查询扫描的行数,预估值,不一定准确 filtered 查询的表行占表的百分比 extra 额外的查询辅助信息

About

MySQL有四种类型的日志:Error LogGeneral Query LogBinary LogSlow Query Log。 第一种错误日志,记录MySQL运行过程ERROR,WARNING,NOTE等信息,系统出错或者某条记录出问题可以查看ERROR日志。 第二种日常运行日志,记录MySQL运行中的每条请求数据。 第三种二进制日志,包含了一些事件,这些事件描述了数据库的改动,如建表、数据改动等,也包括一些潜在改动,主要用于备份恢复、回滚等操作。 第四种慢查询日志,用于MySQL性能调优。

Error Log

MySQL错误日志默认以hostname.err存放在MySQL日志目录,如果不知道MySQL当前的错误日志目录可以使用查询语句:

mysql&gt; show variables like 'log_error';
+---------------+--------------------------------------+
| Variable_name | Value |
+---------------+--------------------------------------+
| log_error | /usr/local/var/mysql/mysql-error.log |
+---------------+--------------------------------------+

修改错误日志地址可以在/etc/my.cnf中添加--log-error[=file_name]选项来开启mysql错误日志。

错误日志记录了MySQL Server每次启动和关闭的详细信息以及运行过程中所有较为严重的警告和错误信息。

知道了MySQL错误日志地址,我们就可以查看MySQL错误日志:

2015-09-12 16:03:20 2624 [ERROR] InnoDB: Unable to lock ./ibdata1, error: 35
2015-09-12 16:03:20 2624 [Note] InnoDB: Check that you do not already have another mysqld process using the same InnoDB data or log files.
2015-09-13 00:03:21 2183 [Note] InnoDB: Shutdown completed; log sequence number 426783897

InnoDB: Unable to lock ./ibdata1, error: 35 可以得出资源被抢占,有可能是开了多个MySQL线程。

General Query Log

日常请求的SQL: 添加方式一样在 /etc/my.cnf 中添加 general-log-file[=file_name]

查看状态

show global variables like "%genera%";

因为为了性能考虑,一般general log不会开启。 slow log可以定位一些有性能问题的sql,而general log会记录所有的SQL。 mysql5.0版本,如果要开启slow log、general log,需要重启, 从MySQL5.1.6版开始,general query log和slow query log开始支持写到文件或者数据库表两种方式, 并且日志的开启,输出方式的修改,都可以在Global级别动态修改。

root@(none) 09:40:33&gt;
select version();
+————+
| version() |
+————+
| 5.1.37-log |
+————+
1 row in set
(0.02 sec)

输出方式

设置日志输出方式为文件(如果设置log_output=table的话,则日志结果会记录到名为gengera_log的表中,这表的默认引擎都是CSV):

root@(none) 09:41:11&gt;
set global log_output = file;
Query OK, 0 rows affected (0.00 sec)

设置日志文件路径

root@(none) 09:45:06&gt;
set global general_log_file =/ tmp / general.log’;
Query OK, 0 rows affected (0.00 sec)

开启general log:

root@(none) 09:45:22&gt;
set global general_log = on;
Query OK, 0 rows affected (0.02 sec)

过一段时间后,关闭general log

root@(none) 09:45:31&gt;
set global general_log = off;
Query OK, 0 rows affected (0.02 sec)

查看tmp/general.log的信息,可以大致看到哪些sql查询/更新/删除/插入比较频繁了。比如有些表不是经常变化的,查询量又很大,就完全可以cache;对主备延迟要求不高的表,读可以放到备库;等等

Binary Log

启用Binlog

修改 /etc/my.cnf

binlog_format = STATEMENT
binlog_cache_size = 2M
max_binlog_cache_size = 4M
max_binlog_size = 512M
log-bin = master-bin
log-bin-index = master-bin.index

log-bin-index 指向 master-bin 这个文件,记录有哪些分块的Binlog文件名。 log-bin 记录Binlog文件名前缀,后缀会用数字递增。

Binlog格式

Binlog有3种格式,STATMENT,ROW,MIXEDhttps://dev.mysql.com/doc/refman/5.1/en/binary-log-mixed.html

混合格式(mixed)会在适当时候切换row和statment格式,statment就是直接的SQL语句格式。

分析Binlog

通过MySQL自带的mysqlbinlog 命令,可以直接查看到Binlog转码数据: mysqlbinlog /usr/local/var/mysql/master-bin.000117 得到:

at 335
150913 0:05:12 server id 1 end_log_pos 366 CRC32 0xa31b50db Xid = 151
COMMIT/*!*/;
DELIMITER ;
End of log file

ROLLBACK /* added by mysqlbinlog */;
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;

第一行包含日志文件偏移字节值(335)。

第二行包含:

事件的日期事件,MySQL会使用他们来产生SET TIMESTAMP 服务器的服务器id end_log_pos 下一个事件的偏移字节 事件类型,这里是Xid,常见的还有其他,例如:Intvar,Query,Stop,Format_desc 原服务器上执行语句的线程id,用于审计和CONNECTION_ID() exec_time对于master端的Binlog来说是执行这个event所花费的时间 原服务器产生的错误代码 通过

mysql&gt; show binlog events;
//也可以的到binlog数据:

| master-bin.000002 | 3861 | Query | 1 | 3954 | BEGIN|
| master-bin.000002 | 3954 | Intvar | 1 | 3986 | INSERT_ID=5|
| master-bin.000002 | 3986 | Query | 1 | 4475 | use `dropbox`; INSERT INTO `UserLog` (`uid`, `fids`, `nids`, `msg`, `log`, `from`, `type`, `ctime`) VALUES ('1', '[\"35\",\"33\",\"21\"]', '[\"22\",\"21\",\"11\",\"4\",\"3\"]', '从垃圾箱恢复: 恢复文件 \'[\"35\",\"33\",\"21\"]\' 恢复文件夹 \'[\"22\",\"21\",\"11\",\"4\",\"3\"]\'', '[[\"35\",\"33\",\"21\"],[\"22\",\"21\",\"11\",\"4\",\"3\"]]', 'cloud.jue.so', 'recover_by_trash', '2015-09-07 00:51:31')|
| master-bin.000002 | 4475 | Xid | 1 | 4506 | COMMIT /* xid=423 */

查看Binlog信息

mysql&gt; show variables like '%binlog%';
+-----------------------------------------+----------------------+
| Variable_name | Value |
+-----------------------------------------+----------------------+
| binlog_cache_size | 2097152 |
| binlog_checksum | CRC32 |
| binlog_direct_non_transactional_updates | OFF |
| binlog_error_action | IGNORE_ERROR |
| binlog_format | STATEMENT |
| binlog_gtid_simple_recovery | OFF |
| binlog_max_flush_queue_time | 0 |
| binlog_order_commits | ON |
| binlog_rows_query_log_events | OFF |
| binlog_stmt_cache_size | 32768 |
| binlogging_impossible_mode | IGNORE_ERROR |
| innodb_api_enable_binlog | OFF |
| innodb_locks_unsafe_for_binlog | OFF |
| max_binlog_cache_size | 4194304 |
| max_binlog_size | 536870912 |
| max_binlog_stmt_cache_size | 18446744073709547520 |
| simplified_binlog_gtid_recovery | OFF |
+-----------------------------------------+----------------------+

Slow Query Log

开启 Slow Query

修改/etc/my.cnf

slow-query-log = 1
slow-query-log-file = /usr/loval/var/mysql/mysql-slow.log
long_query_time = 1 #设置满请求时间
log-queries-not-using-indexes

Slow Query工具

Slow Query有很多查看工具,比如:MySQL自带的mysqldumpslow 和 mysqlsla,用的比较多的 py-query-digest,还可以将满请求数据丢给zabbix做显示分析处理。

这里我用 py-query-digest /usr/local/var/mysql/mysql-slow.log 导出了满请求的数据,例如:

# Query 1: 0.02 QPS, 0.55x concurrency, ID 0xFC19E4D04D8E60BF at byte 12547
# This item is included in the report because it matches --limit.
# Scores: V/M = 118.26
# Time range: 2015-09-12 05:52:03 to 05:57:54
# Attribute pct total min max avg 95% stddev median
# ============ === ======= ======= ======= ======= ======= ======= =======
# Count 1 7
# Exec time 78 194s 250ms 169s 28s 167s 57s 992ms
# Lock time 0 901us 111us 158us 128us 152us 18us 119us
# Rows sent 0 5 0 1 0.71 0.99 0.45 0.99
# Rows examine 7 545.01k 14.18k 97.66k 77.86k 97.04k 32.08k 97.04k
# Query size 0 868 123 125 124 124.25 1 118.34
# String:
# Databases mysqltest
# Hosts localhost
# Users root
# Query_time distribution
# 1us
# 10us
# 100us
# 1ms
# 10ms
# 100ms ################################################################
# 1s ##########################################
# 10s+ ##########################################
# Tables
# SHOW TABLE STATUS FROM `mysqltest` LIKE 'File'\G
# SHOW CREATE TABLE `mysqltest`.`File`\G
# SHOW TABLE STATUS FROM `mysqltest` LIKE 'User'\G
# SHOW CREATE TABLE `mysqltest`.`User`\G
# EXPLAIN /*!50100 PARTITIONS*/
SELECT count(*)
FROM `File`
LEFT JOIN `User` ON `User`.`name` = `File`.`name`
WHERE `User`.`name` LIKE '%r%'
order by `last`\G

可以看到该SQL被调用7次,平均每次28s,好慢...平均检测数据大小77.86k。

再来看看SQL语句:

SELECT count(*)
FROM File
LEFT JOIN User ON User.name = File.name
WHERE User.name LIKE '%r%'
order by last

看着都觉得慢 ON User.name= File.name 在没有建立索引的情况下,所有数据将进行字符串匹配name字段。

这个库有 15W条User数据,10W条File数据,也就是要比对15*10 WW 次数据。

MySQL的slow log的作用也就在这里了,优化慢查询。

参考:

1.《高性能MySQL》

2.(Analyse slow-query-log using mysqldumpslow & pt-query-digest)[https://rtcamp.com/tutorials/mysql/slow-query-log/]

3.初探:MySQL 的 Binlog&version=11020201&pass_ticket=DNtPK7ePVYl93tx1FiRMBNsJMm3DEgwRdO1XEZUustRXuYf6KyUU4gID1Lv7aVTB)

· 7 min read
mysql> show processlist;
+—–+————-+——————–+
| Id | User | Host | db | Command | Time| State | Info
+—–+————-+——————–+
|207|root |192.168.0.2:51621 |mytest | Sleep | 5 | | NULL
|208|root |192.168.0.2:51622 |mytest | Sleep | 5 | | NULL
|220|root |192.168.0.2:51676 |mytest |Query | 84 | locked |
select name,culture,value,type from book where id=1
说明各列的含义和用途,
id列:一个标识,你要kill 一个语句的时候很有用。
user列: 显示当前用户,如果不是root,这个命令就只显示你权限范围内的sql语句。
host列:显示这个语句是从哪个ip 的哪个端口上发出的。可用来追踪出问题语句的用户。
db列:显示这个进程目前连接的是哪个数据库。
command列:显示当前连接的执行的命令,一般就是休眠(sleep),查询(query),连接(connect)。
time列:此这个状态持续的时间,单位是秒。
state列:显示使用当前连接的sql语句的状态,很重要的列,后续会有所有的状态的描述,请注意,state只是语句执行中的某一个状态,一个sql语句,已查询为例,可能需要经过copying to tmp table,Sorting result,Sending data等状态才可以完成。
info列:显示这个sql语句,因为长度有限,所以长的sql语句就显示不全,但是一个判断问题语句的重要依据。
这个命令中最关键的就是state列,mysql列出的状态主要有以下几种:
Checking table
正在检查数据表(这是自动的)。
Closing tables
正在将表中修改的数据刷新到磁盘中,同时正在关闭已经用完的表。这是一个很快的操作,如果不是这样的话,就应该确认磁盘空间是否已经满了或者磁盘是否正处于重负中。
Connect Out
复制从服务器正在连接主服务器。
Copying to tmp table on disk
由于临时结果集大于 tmp_table_size,正在将临时表从内存存储转为磁盘存储以此节省内存。
Creating tmp table
正在创建临时表以存放部分查询结果。
deleting from main table
服务器正在执行多表删除中的第一部分,刚删除第一个表。
deleting from reference tables
服务器正在执行多表删除中的第二部分,正在删除其他表的记录。
Flushing tables
正在执行 FLUSH TABLES,等待其他线程关闭数据表。
Killed
发送了一个kill请求给某线程,那么这个线程将会检查kill标志位,同时会放弃下一个kill请求。MySQL会在每次的主循环中检查kill标志位,不过有些情况下该线程可能会过一小段才能死掉。如果该线程程被其他线程锁住了,那么kill请求会在锁释放时马上生效。
Locked
被其他查询锁住了。
Sending data
正在处理 SELECT 查询的记录,同时正在把结果发送给客户端。
Sorting for group
正在为 GROUP BY 做排序。
Sorting for order
正在为 ORDER BY 做排序。
Opening tables
这个过程应该会很快,除非受到其他因素的干扰。例如,在执 ALTER TABLE 或 LOCK TABLE 语句行完以前,数据表无法被其他线程打开。 正尝试打开一个表。
Removing duplicates
正在执行一个 SELECT DISTINCT 方式的查询,但是MySQL无法在前一个阶段优化掉那些重复的记录。因此,MySQL需要再次去掉重复的记录,然后再把结果发送给客户端。
Reopen table
获得了对一个表的锁,但是必须在表结构修改之后才能获得这个锁。已经释放锁,关闭数据表,正尝试重新打开数据表。
Repair by sorting
修复指令正在排序以创建索引。
Repair with keycache
修复指令正在利用索引缓存一个一个地创建新索引。它会比 Repair by sorting 慢些。
Searching rows for update
正在讲符合条件的记录找出来以备更新。它必须在 UPDATE 要修改相关的记录之前就完成了。
Sleeping
正在等待客户端发送新请求.
System lock
正在等待取得一个外部的系统锁。如果当前没有运行多个 mysqld 服务器同时请求同一个表,那么可以通过增加 –skip-external-locking参数来禁止外部系统锁。
Upgrading lock
INSERT DELAYED 正在尝试取得一个锁表以插入新记录。
Updating
正在搜索匹配的记录,并且修改它们。
User Lock
正在等待 GET_LOCK()。
Waiting for tables
该线程得到通知,数据表结构已经被修改了,需要重新打开数据表以取得新的结构。然后,为了能的重新打开数据表,必须等到所有其他线程关闭这个表。以下几种情况下会产生这个通知:FLUSH TABLES tbl_name, ALTER TABLE, RENAME TABLE, REPAIR TABLE, ANALYZE TABLE, 或 OPTIMIZE TABLE。
waiting for handler insert
INSERT DELAYED 已经处理完了所有待处理的插入操作,正在等待新的请求。
大部分状态对应很快的操作,只要有一个线程保持同一个状态好几秒钟,那么可能是有问题发生了,需要检查一下。还有其它的状态没在上面中列出来,不过它们大部分只是在查看服务器是否有存在错误是才用得着。

· One min read

auth

update user
set host = '10.%.%.%'
where user = 'root';
select host, user
from user;

-- 只读账号
GRANT SELECT ON *.* TO 'read'@'%' IDENTIFIED BY '12341234';

-- 8.0 bug fix mysql_native_password
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '12341234';

user表每天注册数

SELECT from_unixtime(regdate, '%Y-%m-%d') AS regymd,
count(1) AS num
FROM v9_member
GROUP BY regymd
ORDER BY regymd ASC;

日数据 以每月排

SELECT from_unixtime(time, '%Y%m') AS M,
count(1) AS day_num,
sum(payment_total) AS payment_m
FROM echo_register_payment_info
GROUP BY M
ORDER BY m DESC;

复制表

CREATE TABLE `echo_total_databank`.`echo_controller2cname`
(
`id` INT(11) NOT NULL,
`ename` VARCHAR(100) DEFAULT NULL,
`cname` VARCHAR(100) DEFAULT NULL,
`type` VARCHAR(10) DEFAULT NULL
) ENGINE = MyISAM
DEFAULT CHARSET = utf8 COMMENT = 'echo-last-page-info控制器名称对应';

ALTER TABLE `echo_total_databank`.`echo_controller2cname`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `un` (`ename`) USING BTREE;

ALTER TABLE `echo_total_databank`.`echo_controller2cname`
MODIFY `id` INT(11) NOT NULL AUTO_INCREMENT;

SET SQL_MODE = 'NO_AUTO_VALUE_ON_ZERO';

INSERT INTO `echo_total_databank`.`echo_controller2cname`
SELECT *
FROM `databank`.`echo_controller2cname`;

· One min read

docker exec mysql-1 sh -c 'mysqldump --all-databases -uroot -p12341234' > /www/1.sql docker exec mysql-1 sh -c 'mysqldump -uroot -p12341234' gfTest > /www/1.sql

导出 sh

Now=$(date +"%Y%m%d_%H%M%S");
File=gaojian_$Now.sql;

/alidata/server/mysql-5.6.21/bin/mysqldump -h127.0.0.1 -ugaojian -p'gaojian123' gaojian > /alidata/www/gaojian/caches/bakup/sql/$File;

mysqldump -hlocalhost -uhaohe_test -p'iy9nPKJV' --skip-lock-tables pcmoto > ./a.sql

echo "Your Database Backup Successfully Completed";

导入

source d:\1.sql

docker exec mysql-1 sh -c 'mysqldump --all-databases -uroot -pbitdata' > /www/11_23.sql docker exec mysql-1 sh -c 'mysqldump --databases coin -uroot -pbitdata' > /www/2.sql

mysqldump -u root -p --compatible=postgresql --default-character-set=utf8 at-dev > 1.sql

docker exec mysql-1 sh -c 'mysql -uroot -pbitdata' < /www/11_23.sql

create database coin default character set utf8mb4;

docker run --restart=unless-stopped --name mysql-2 -it -d -v "$PWD":/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=bitdata mysql:5.6.39 --character-set-server=utf8 --collation-server=utf8_general_ci

source 加速

SET GLOBAL foreign_key_checks=0; SET GLOBAL unique_checks=0; SET GLOBAL innodb_flush_log_at_trx_commit=0; SET GLOBAL sync_binlog=0;

SET GLOBAL foreign_key_checks=1; SET GLOBAL unique_checks=1; SET GLOBAL innodb_flush_log_at_trx_commit=1; SET GLOBAL sync_binlog=1;