初始化数据库
默认的需要创建 configmap, 但 configmap 大小有限制,手动初始化数据库
将 sql 文件 放入 /mnt/imwl/initdb /mnt/imwl 是共享目录
1 | apiVersion: batch/v1 |
备份
前文配置了定时备份
做一次备份1
kubectl annotate -n postgres-operator postgrescluster imwl postgres-operator.crunchydata.com/pgbackrest-backup="$(date)"
重新备份 --overwrite
1 | kubectl annotate -n postgres-operator postgrescluster imwl --overwrite postgres-operator.crunchydata.com/pgbackrest-backup="$(date)" |
恢复
新建一个 new-imwl 的集群1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: new-imwl
namespace: postgres-operator
spec: # 从 imwl 集群克隆
dataSource:
postgresCluster:
clusterName: imwl
repoName: repo1
# options: # 按时间点恢复
# - --type=time
# - --target="2022-06-09 14:15:11-04"
image: registry.test.datacanvas.com:55443/test/crunchy-postgres:ubi8-13.9-2
postgresVersion: 13
instances:
- dataVolumeClaimSpec:
storageClassName: "rook-ceph-block-retain"
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 20Gi
backups:
pgbackrest:
image: registry.test.datacanvas.com:55443/test/crunchy-pgbackrest:ubi8-2.41-2
repos:
- name: repo1
volume:
volumeClaimSpec:
storageClassName: "rook-ceph-block-retain"
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 40Gi
查验, 数据正常
1 | [root@imwl-01 high-availability]# kubectl get pvc -n postgres-operator |grep new-imwl |
只还原数据,不新建集群, 改原有配置,只用加下面几行
1 | spec: |
执行
kubectl annotate -n postgres-operator postgrescluster imwl --overwrite postgres-operator.crunchydata.com/pgbackrest-restore=id1
一些基本操作
1 | # 重启 |
查看 master 数据库 pod1
kubectl -n postgres-operator get pods --selector=postgres-operator.crunchydata.com/role=master -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}'
查看 replica1
kubectl -n postgres-operator get pods --selector=postgres-operator.crunchydata.com/role=replica -o jsonpath='{.items[*].metadata.labels.postgres-operator\.crunchydata\.com/instance}'
手动主备切换1
2
3kubectl annotate -n postgres-operator postgrescluster hippo imwl postgres-operator.crunchydata.com/trigger-switchover="$(date)"
kubectl annotate -n postgres-operator postgrescluster hippo imwl postgres-operator.crunchydata.com/trigger-switchover="$(date)" --overwrite
额外备份sql1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#!/bin/bash
# docker run --name some-postgres -e POSTGRES_PASSWORD=example_2021! -v /opt:/opt -d postgres:13.9
PSQL_HOST="imwl-ha.postgres-operator.svc.cluster.local"
PSQL_PORT="5432"
PSQL_USER="postgres"
PSQL_PASS='password!'
BACKUP_DIRECTORY="/opt/backups/postgresql/imwl"
DEFAULT_DATE="$(date +%Y%m%d)"
BACKUP_LOGDIR="/opt/backups/logs"
REMOTE_BACKUP_DIRECTORY="/opt/backups/postgresql/imwl"
REMOTE_HOST="172.20.19.57"
REMOTE_HOST_PASS="passwd"
REMOTE_HOST2="172.20.19.59"
REMOTE_HOST_PASS2="passwd"
logger(){
[ ! -d "${BACKUP_LOGDIR}" ] && mkdir -p ${BACKUP_LOGDIR}
local logfile="${BACKUP_LOGDIR}/pgbackup-${DEFAULT_DATE}.log"
local logtype=$1; local msg=$2
datetime=`date +'%F %H:%M:%S'`
format1="${datetime} [line: `caller 0 | awk '{print$1}'`]"
format2="${logtype}: ${msg}"
logformat="$format1 $format2"
echo -e "${logformat}" | tee -a $logfile
}
backup_database(){
local databases=(
hive
test
)
if [ ! -d "${BACKUP_DIRECTORY}" ];then
mkdir -p ${BACKUP_DIRECTORY}
fi
for db in ${databases[@]}
do
postgres_dump $db
done
logger INFO "Databases backed up to ${BACKUP_DIRECTORY} directory"
}
postgres_dump(){
local database=$1
logger INFO "Dumping database ${database} ..."
export PGPASSWORD=${PSQL_PASS}
docker exec -i -e PGPASSWORD=$PSQL_PASS some-postgres /usr/bin/pg_dump -h ${PSQL_HOST} -p ${PSQL_PORT} -U ${PSQL_USER} ${database} |gzip > ${BACKUP_DIRECTORY}/${DEFAULT_DATE}-${database}.sql.gz
# docker exec -i -e PGPASSWORD=$PSQL_PASS some-postgres /usr/bin/pg_dumpall -h ${PSQL_HOST} -p ${PSQL_PORT} -U ${PSQL_USER} ${database} |gzip > ${BACKUP_DIRECTORY}/${DEFAULT_DATE}-full.sql.gz
# docker run --rm -i -e PGPASSWORD=$PSQL_PASS postgres:13.9 /usr/bin/pg_dumpall -h ${PSQL_HOST} -p ${PSQL_PORT} -U ${PSQL_USER} ${database} |gzip > ${BACKUP_DIRECTORY}/${DEFAULT_DATE}-full.sql.gz
if [ ${PIPESTATUS[0]} -ne 0 -o ${PIPESTATUS[1]} -ne 0 ];then
logger ERROR "Failed to dump database ${database}!"
exit 1
fi
logger INFO "Database ${database} dump complete"
}
backup_database
#远程备份,保存3天
sshpass -p $REMOTE_HOST_PASS scp $BACKUP_DIRECTORY/$DEFAULT_DATE* root@$REMOTE_HOST:$REMOTE_BACKUP_DIRECTORY
logger INFO "Databases backed up to remote directory: ${REMOTE_BACKUP_DIRECTORY}"
sshpass -p $REMOTE_HOST_PASS2 scp $BACKUP_DIRECTORY/$DEFAULT_DATE* root@$REMOTE_HOST2:$REMOTE_BACKUP_DIRECTORY
logger INFO "Databases backed up to remote directory: ${REMOTE_BACKUP_DIRECTORY}"
#检查当前备份文件数量是否大于9,若是,删除3天之前的备份文件
file_num=`ls -l $BACKUP_DIRECTORY |grep "^-"|wc -l`
if [ $file_num -gt 9 ];then
find $BACKUP_DIRECTORY -mtime +2 -exec rm -rfv {} \;
logger INFO "Delete backup files three days ago"
fi
恢复1
psql -f xxxx.sql -h ${PSQL_HOST} -p ${PSQL_PORT} -U ${PSQL_USER} ${database}
拿文件恢复
不推荐1
2
3
4
5[root@k8s-60 pg13]# docker run --rm -it --name postgres -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=example_2021! -v /root/pg13:/var/lib/postgresql/data -v /etc/localtime:/etc/localtime -p 15432:5432 postgres:13.9 su - postgres -c '/usr/lib/postgresql/13/bin/pg_resetwal -f /var/lib/postgresql/data'
Write-ahead log reset
[root@k8s-60 pg13]# docker run -d --name postgres --restart=always -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=example_2021! -v /root/pg13:/var/lib/postgresql/data -v /etc/localtime:/etc/localtime -p 15432:5432 postgres:13.9
dccddceec900708489f10e4c9462d0e62d0d4340aabb5804360253f523cc994b
推荐
恢复基础备份,然后 wal日志数据库恢复 参考
https://blog.csdn.net/zimu312500/article/details/124283358
关闭空闲连接
1 | select * from pg_stat_activity; |