Skip to content

Commit

Permalink
fix backup of tables and improve error logging
Browse files Browse the repository at this point in the history
  • Loading branch information
Admire Nyakudya committed Nov 5, 2024
1 parent c596be5 commit de6cf7f
Show file tree
Hide file tree
Showing 5 changed files with 107 additions and 61 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ENV \

ADD build_data /build_data
ADD scripts /backup-scripts
RUN chmod 0755 /backup-scripts/*.sh
RUN echo ${POSTGRES_MAJOR_VERSION} > /tmp/pg_version.txt && chmod 0755 /backup-scripts/*.sh
RUN sed -i 's/PostGIS/PgBackup/' ~/.bashrc

WORKDIR /backup-scripts
Expand Down
3 changes: 2 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ services:
- POSTGRES_PORT=5432
- RUN_AS_ROOT=true
- CRON_SCHEDULE="*/5 * * * *"
- CONSOLE_LOGGING=TRUE
#- CONSOLE_LOGGING=TRUE
#- DB_DUMP_ENCRYPTION=true
#- DB_TABLES=TRUE
restart: on-failure
depends_on:
db:
Expand Down
122 changes: 75 additions & 47 deletions scripts/backups.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,25 @@

source /backup-scripts/pgenv.sh

# Env variables
MYDATE=$(date +%d-%B-%Y)
MONTH=$(date +%B)
YEAR=$(date +%Y)
MYBASEDIR=/${BUCKET}
MYBACKUPDIR=${MYBASEDIR}/${YEAR}/${MONTH}
mkdir -p ${MYBACKUPDIR}
pushd ${MYBACKUPDIR} || exit

function s3_config() {
if [[ ! -f /root/.s3cfg ]]; then
# If it doesn't exists, copy from ${EXTRA_CONF_DIR} directory if exists
if [[ -f ${EXTRA_CONFIG_DIR}/s3cfg ]]; then
cp -f ${EXTRA_CONFIG_DIR}/s3cfg /root/.s3cfg
else
# default value
envsubst < /build_data/s3cfg > /root/.s3cfg
fi
# If it doesn't exists, copy from ${EXTRA_CONF_DIR} directory if exists
if [[ -f ${EXTRA_CONFIG_DIR}/s3cfg ]]; then
cp -f ${EXTRA_CONFIG_DIR}/s3cfg /root/.s3cfg
else
# default value
envsubst < /build_data/s3cfg > /root/.s3cfg
fi


}

# Cleanup S3 bucket
Expand All @@ -38,37 +46,56 @@ function clean_s3bucket() {
}

function dump_tables() {
DATABASE=$1
DATABASE_DUMP_OPTIONS=$2
TIME_STAMP=$3
DATA_PATH=$4
array=($(PGPASSWORD=${POSTGRES_PASS} psql ${PG_CONN_PARAMETERS} -d ${DATABASE} -At --field-separator '.' -c "SELECT table_schema,table_name FROM information_schema.tables
where table_schema not in ('information_schema','pg_catalog','topology') and table_name
not in ('raster_columns','raster_overviews','spatial_ref_sys', 'geography_columns', 'geometry_columns')
ORDER BY table_schema,table_name;"))
for i in "${array[@]}"; do
IFS='.'
read -a strarr <<< "$i"
SCHEMA_NAME="${strarr[0]}"
TABLE_NAME="${strarr[1]}"
# names and schema names
if [[ "${DB_DUMP_ENCRYPTION}" =~ [Tt][Rr][Uu][Ee] ]];then
PGPASSWORD=${POSTGRES_PASS} pg_dump ${PG_CONN_PARAMETERS} -d ${DATABASE} ${DATABASE_DUMP_OPTIONS} -t ${SCHEMA_NAME}."${TABLE_NAME}" | openssl enc -aes-256-cbc -pass pass:${DB_DUMP_ENCRYPTION_PASS_PHRASE} -pbkdf2 -iter 10000 -md sha256 -out $DATA_PATH/${DATABASE}_${SCHEMA_NAME}_"${TABLE_NAME}"_${TIME_STAMP}.dmp
else
PGPASSWORD=${POSTGRES_PASS} pg_dump ${PG_CONN_PARAMETERS} -d ${DATABASE} ${DATABASE_DUMP_OPTIONS} -t ${SCHEMA_NAME}."${TABLE_NAME}" >$DATA_PATH/${DATABASE}_${SCHEMA_NAME}_"${TABLE_NAME}"_${TIME_STAMP}.dmp

fi
done
DATABASE=$1

# Retrieve table names
array=($(PGPASSWORD=${POSTGRES_PASS} psql ${PG_CONN_PARAMETERS} -d ${DATABASE} -At -F '.' -c "SELECT table_schema, table_name FROM information_schema.tables WHERE table_schema NOT IN ('information_schema', 'pg_catalog', 'topology') AND table_name NOT IN ('raster_columns', 'raster_overviews', 'spatial_ref_sys', 'geography_columns', 'geometry_columns') ORDER BY table_schema, table_name;"))

for i in "${array[@]}"; do

IFS='.' read -r -a strarr <<< "$i"
SCHEMA_NAME="${strarr[0]}"
TABLE_NAME="${strarr[1]}"

# Combine schema and table name
DB_TABLE="${SCHEMA_NAME}.${TABLE_NAME}"
# Check dump format
if [[ ${DUMP_ARGS} == '-Fc' ]]; then
FORMAT='dmp'
else
FORMAT='sql'
fi

# Construct filename
FILENAME="${DUMPPREFIX}_${DB_TABLE}_${MYDATE}.${FORMAT}"

# Log the backup start time
echo -e "Backup of \e[1;31m ${DB_TABLE} \033[0m from DATABASE \e[1;31m ${DATABASE} \033[0m starting at \e[1;31m $(date) \033[0m" >> ${CONSOLE_LOGGING_OUTPUT}

export PGPASSWORD=${POSTGRES_PASS}

# Dump command
if [[ "${DB_DUMP_ENCRYPTION}" =~ [Tt][Rr][Uu][Ee] ]]; then
# Encrypted backup
pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d "${DATABASE}" -t "${DB_TABLE}" | openssl enc -aes-256-cbc -pass pass:${DB_DUMP_ENCRYPTION_PASS_PHRASE} -pbkdf2 -iter 10000 -md sha256 -out "${FILENAME}"
if [[ $? -ne 0 ]];then
echo -e "Backup of \e[0;32m ${DB_TABLE} \033[0m from DATABASE \e[0;32m ${DATABASE} \033[0m failed" >> ${CONSOLE_LOGGING_OUTPUT}
fi
else
# Plain backup
pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d "${DATABASE}" -t "${DB_TABLE}" > "${FILENAME}"
if [[ $? -ne 0 ]];then
echo -e "Backup of \e[0;32m ${DB_TABLE} \033[0m from DATABASE \e[0;32m ${DATABASE} \033[0m failed" >> ${CONSOLE_LOGGING_OUTPUT}
fi
fi

# Log the backup completion time
echo -e "Backup of \e[1;33m ${DB_TABLE} \033[0m from DATABASE \e[1;33m ${DATABASE} \033[0m completed at \e[1;33m $(date) \033[0m" >> ${CONSOLE_LOGGING_OUTPUT}

done
}

# Env variables
MYDATE=$(date +%d-%B-%Y)
MONTH=$(date +%B)
YEAR=$(date +%Y)
MYBASEDIR=/${BUCKET}
MYBACKUPDIR=${MYBASEDIR}/${YEAR}/${MONTH}
mkdir -p ${MYBACKUPDIR}
pushd ${MYBACKUPDIR} || exit

function backup_db() {
EXTRA_PARAMS=''
Expand All @@ -81,22 +108,25 @@ function backup_db() {
else
export FILENAME=${MYBASEDIR}/"${ARCHIVE_FILENAME}.${DB}.dmp"
fi
echo "Backing up $DB" >> ${CONSOLE_LOGGING_OUTPUT}
if [ -z "${DB_TABLES:-}" ]; then

if [[ "${DB_TABLES}" =~ [Ff][Aa][Ll][Ss][Ee] ]]; then
export PGPASSWORD=${POSTGRES_PASS}
echo -e "Backup of \e[1;31m ${DB} \033[0m starting at \e[1;31m $(date) \033[0m" >> ${CONSOLE_LOGGING_OUTPUT}
if [[ "${DB_DUMP_ENCRYPTION}" =~ [Tt][Rr][Uu][Ee] ]];then
PGPASSWORD=${POSTGRES_PASS} pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d ${DB} | openssl enc -aes-256-cbc -pass pass:${DB_DUMP_ENCRYPTION_PASS_PHRASE} -pbkdf2 -iter 10000 -md sha256 -out ${FILENAME}
pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d ${DB} | openssl enc -aes-256-cbc -pass pass:${DB_DUMP_ENCRYPTION_PASS_PHRASE} -pbkdf2 -iter 10000 -md sha256 -out ${FILENAME}
else
PGPASSWORD=${POSTGRES_PASS} pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d ${DB} > ${FILENAME}
pg_dump ${PG_CONN_PARAMETERS} ${DUMP_ARGS} -d ${DB} > ${FILENAME}
fi
echo "Backing up $FILENAME done" >> ${CONSOLE_LOGGING_OUTPUT}
echo -e "Backup of \e[1;33m ${DB} \033[0m completed at \e[1;33m $(date) \033[0m and dump located at \e[1;33m ${FILENAME} \033[0m " >> ${CONSOLE_LOGGING_OUTPUT}
if [[ ${STORAGE_BACKEND} == "S3" ]]; then
gzip $FILENAME
echo "Backing up $FILENAME to s3://${BUCKET}/" >> ${CONSOLE_LOGGING_OUTPUT}
gzip ${FILENAME}
echo -e "Pushing database backup \e[1;31m ${FILENAME} \033[0m to \e[1;31m s3://${BUCKET}/ \033[0m" >> ${CONSOLE_LOGGING_OUTPUT}
${EXTRA_PARAMS}
rm ${MYBACKUPDIR}/*.dmp.gz
fi
else
dump_tables ${DB} ${DUMP_ARGS} ${MYDATE} ${MYBACKUPDIR}

dump_tables ${DB}
if [[ ${STORAGE_BACKEND} == "S3" ]]; then
${EXTRA_PARAMS}
rm ${MYBACKUPDIR}/*
Expand Down Expand Up @@ -129,14 +159,12 @@ elif [[ ${STORAGE_BACKEND} =~ [Ff][Ii][Ll][Ee] ]]; then

fi

echo "Backup running to $MYBACKUPDIR" >> ${CONSOLE_LOGGING_OUTPUT}


if [ "${REMOVE_BEFORE:-}" ]; then
TIME_MINUTES=$((REMOVE_BEFORE * 24 * 60))
if [[ ${STORAGE_BACKEND} == "FILE" ]]; then
echo "Removing following backups older than ${REMOVE_BEFORE} days" >> ${CONSOLE_LOGGING_OUTPUT}
find ${MYBASEDIR}/* -type f -mmin +${TIME_MINUTES} -delete &>> ${CONSOLE_LOGGING_OUTPUT}
find ${MYBASEDIR}/* -type f -mmin +${TIME_MINUTES} -delete & >> ${CONSOLE_LOGGING_OUTPUT}
elif [[ ${STORAGE_BACKEND} == "S3" ]]; then
# Credits https://shout.setfive.com/2011/12/05/deleting-files-older-than-specified-time-with-s3cmd-and-bash/
clean_s3bucket "${BUCKET}" "${REMOVE_BEFORE} days"
Expand Down
18 changes: 10 additions & 8 deletions scripts/restore.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,18 @@
#!/bin/bash

source /backup-scripts/pgenv.sh
POSTGRES_MAJOR_VERSION=$(cat /tmp/pg_version.txt)
BIN_DIR="/usr/lib/postgresql/${POSTGRES_MAJOR_VERSION}/bin/"

function s3_config() {
if [[ ! -f /root/.s3cfg ]]; then
# If it doesn't exists, copy from ${EXTRA_CONF_DIR} directory if exists
if [[ -f ${EXTRA_CONFIG_DIR}/s3cfg ]]; then
cp -f ${EXTRA_CONFIG_DIR}/s3cfg /root/.s3cfg
else
# default value
envsubst < /build_data/s3cfg > /root/.s3cfg
fi
fi


}

Expand All @@ -23,7 +24,7 @@ function s3_restore() {
if [[ ! $1 || "$(date -d "$1" +%Y-%m-%d 2> /dev/null)" = "$3" ]]; then
echo "invalid date"
exit 1
else
else
MYDATE=$(date -d "$1" +%d-%B-%Y)
MONTH=$(date -d "$1" +%B)
YEAR=$(date -d "$1" +%Y)
Expand All @@ -32,15 +33,16 @@ function s3_restore() {
BACKUP_URL=${MYBACKUPDIR}/${DUMPPREFIX}_${2}.${MYDATE}.dmp.gz
if [[ "$(s3cmd ls s3://${BACKUP_URL} | wc -l)" = 1 ]]; then
s3cmd get s3://${BACKUP_URL} /data/dump/$2.dmp.gz
gunzip /data/dump/$2.dmp.gz
gunzip /data/dump/$2.dmp.gz
echo "delete target DB with if its exists and recreate it"
PGPASSWORD=${POSTGRES_PASS} dropdb ${PG_CONN_PARAMETERS} --force --if-exists ${2}
PGPASSWORD=${POSTGRES_PASS} createdb ${PG_CONN_PARAMETERS} -O ${POSTGRES_USER} ${2}
if [[ "${DB_DUMP_ENCRYPTION}" =~ [Tt][Rr][Uu][Ee] ]];then
export PGPASSWORD=${POSTGRES_PASS}
${BIN_DIR}/dropdb ${PG_CONN_PARAMETERS} --force --if-exists ${2}
${BIN_DIR}/createdb ${PG_CONN_PARAMETERS} -O ${POSTGRES_USER} ${2}
if [[ "${DB_DUMP_ENCRYPTION}" =~ [Tt][Rr][Uu][Ee] ]];then
openssl enc -d -aes-256-cbc -pass pass:${DB_DUMP_ENCRYPTION_PASS_PHRASE} -pbkdf2 -iter 10000 -md sha256 -in /data/dump/$2.dmp -out /tmp/decrypted.dump.gz | PGPASSWORD=${POSTGRES_PASS} pg_restore ${PG_CONN_PARAMETERS} /tmp/decrypted.dump.gz -d $2 ${RESTORE_ARGS}
rm -r /tmp/decrypted.dump.gz
else
PGPASSWORD=${POSTGRES_PASS} pg_restore ${PG_CONN_PARAMETERS} /data/dump/$2.dmp -d $2 ${RESTORE_ARGS}
${BIN_DIR}/pg_restore ${PG_CONN_PARAMETERS} /data/dump/$2.dmp -d $2 ${RESTORE_ARGS}
fi
fi
fi
Expand Down
23 changes: 19 additions & 4 deletions scripts/start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,17 @@ file_env 'SECRET_ACCESS_KEY'
if [ -z "${SECRET_ACCESS_KEY}" ]; then
SECRET_ACCESS_KEY=
fi

file_env 'DEFAULT_REGION'
if [ -z "${DEFAULT_REGION}" ]; then
DEFAULT_REGION=us-west-2
fi

file_env 'BUCKET'
if [ -z "${BUCKET}" ]; then
BUCKET=backups
fi

file_env 'HOST_BASE'
if [ -z "${HOST_BASE}" ]; then
HOST_BASE=
Expand All @@ -56,12 +60,15 @@ fi
if [ -z "${HOST_BUCKET}" ]; then
HOST_BUCKET=
fi

if [ -z "${SSL_SECURE}" ]; then
SSL_SECURE=True
fi

if [ -z "${DUMP_ARGS}" ]; then
DUMP_ARGS='-Fc'
fi

if [ -z "${RESTORE_ARGS}" ]; then
RESTORE_ARGS='-j 4'
fi
Expand All @@ -70,6 +77,7 @@ file_env 'POSTGRES_USER'
if [ -z "${POSTGRES_USER}" ]; then
POSTGRES_USER=docker
fi

file_env 'POSTGRES_PASS'
if [ -z "${POSTGRES_PASS}" ]; then
POSTGRES_PASS=docker
Expand Down Expand Up @@ -122,6 +130,10 @@ if [ -z "${CONSOLE_LOGGING}" ]; then
CONSOLE_LOGGING=FALSE
fi

if [ -z "${DB_TABLES}" ]; then
DB_TABLES=FALSE
fi

file_env 'DB_DUMP_ENCRYPTION_PASS_PHRASE'
if [ -z "${DB_DUMP_ENCRYPTION_PASS_PHRASE}" ]; then
STRING_LENGTH=30
Expand All @@ -136,11 +148,12 @@ function cron_config() {
envsubst < ${EXTRA_CONFIG_DIR}/backups-cron > /backup-scripts/backups-cron
else
# default value

if [ -z "${CRON_SCHEDULE}" ]; then
envsubst < /build_data/backups-cron-default > /backup-scripts/backups-cron
else
envsubst < /build_data/backups-cron > /backup-scripts/backups-cron
fi
export CRON_SCHEDULE='0 24 * * *'
fi
envsubst < /build_data/backups-cron > /backup-scripts/backups-cron

fi
}

Expand Down Expand Up @@ -201,13 +214,15 @@ DB_DUMP_ENCRYPTION_PASS_PHRASE=\"${DB_DUMP_ENCRYPTION_PASS_PHRASE}\"
DB_DUMP_ENCRYPTION="${DB_DUMP_ENCRYPTION}"
export PG_CONN_PARAMETERS=\"${PG_CONN_PARAMETERS}\"
export DBLIST=\"${DBLIST}\"
export DB_TABLES=\"${DB_TABLES}\"
" > /backup-scripts/pgenv.sh

echo "Start script running with these environment options"
set | grep PG

}
configure_env_variables

if [[ ${CONSOLE_LOGGING} =~ [Tt][Rr][Uu][Ee] ]];then
sed -i 's#${CONSOLE_LOGGING_OUTPUT}#/proc/1/fd/1 2>\&1#g' /backup-scripts/backups.sh
else
Expand Down

0 comments on commit de6cf7f

Please sign in to comment.