Skip to content

Commit 8c7ffc3

Browse files
authored
Merge pull request #14 from offen/backup-archive
allow local storage of backups
2 parents 4b59089 + f6b4074 commit 8c7ffc3

7 files changed

Lines changed: 75 additions & 20 deletions

File tree

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
22
# SPDX-License-Identifier: MPL-2.0
33

4-
FROM golang:1.16-alpine as builder
4+
FROM golang:1.17-alpine as builder
55
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
66
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
77

README.md

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
# docker-volume-backup
22

3-
Backup Docker volumes to any S3 compatible storage.
3+
Backup Docker volumes locally or to any S3 compatible storage.
44

5-
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
5+
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
66

77
## Configuration
88

@@ -30,15 +30,33 @@ AWS_S3_BUCKET_NAME="<xxx>"
3030
# This is the FQDN of your storage server, e.g. `storage.example.com`.
3131
# Do not set this when working against AWS S3. If you need to set a
3232
# specific protocol, you will need to use the option below.
33+
3334
# AWS_ENDPOINT="<xxx>"
3435

3536
# The protocol to be used when communicating with your storage server.
3637
# Defaults to "https". You can set this to "http" when communicating with
3738
# a different Docker container on the same host for example.
39+
3840
# AWS_ENDPOINT_PROTO="https"
3941

42+
# In addition to backing up you can also store backups locally. Pass in
43+
# a local path to store your backups here if needed. You likely want to
44+
# mount a local folder or Docker volume into that location when running
45+
# the container. Local paths can also be subject to pruning of old
46+
# backups as defined below.
47+
48+
# BACKUP_ARCHIVE="/archive"
49+
4050
########### BACKUP PRUNING
4151

52+
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
53+
# The mechanism used for pruning backups is not very sophisticated
54+
# and applies its rules to **all files in the target directory**,
55+
# which means that if you are storing your backups next to other files,
56+
# these might become subject to deletion too. When using this option
57+
# make sure the backup files are stored in a directory used exclusively
58+
# for storing them or you might lose data.
59+
4260
# Define this value to enable automatic pruning of old backups. The value
4361
# declares the number of days for which a backup is kept.
4462

@@ -108,6 +126,10 @@ services:
108126
# to stop the container
109127
- /var/run/docker.sock:/var/run/docker.sock:ro
110128
- data:/backup/my-app-backup:ro
129+
# If you mount a local directory or volume to `/archive` a local
130+
# copy of the backup will be stored there. You can override the
131+
# location inside of the container by setting `BACKUP_ARCHIVE`
132+
# - /path/to/local_backups:/archive
111133
volumes:
112134
data:
113135
```

src/backup.sh

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -77,13 +77,24 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
7777
fi
7878
fi
7979

80+
copy_backup () {
81+
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
82+
}
83+
8084
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
8185
info "Uploading backup to remote storage"
8286
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
83-
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
87+
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
8488
echo "Upload finished."
8589
fi
8690

91+
if [ -d "$BACKUP_ARCHIVE" ]; then
92+
info "Copying backup to local archive"
93+
echo "Will copy to \"$BACKUP_ARCHIVE\"."
94+
copy_backup "$BACKUP_ARCHIVE"
95+
echo "Finished copying."
96+
fi
97+
8798
if [ -f "$BACKUP_FILENAME" ]; then
8899
info "Cleaning up"
89100
rm -vf "$BACKUP_FILENAME"
@@ -92,16 +103,12 @@ fi
92103
info "Backup finished"
93104
echo "Will wait for next scheduled backup."
94105

95-
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
96-
info "Pruning old backups"
97-
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
98-
sleep "$BACKUP_PRUNING_LEEWAY"
99-
bucket=$AWS_S3_BUCKET_NAME
100-
106+
prune () {
107+
target=$1
101108
rule_applies_to=$(
102-
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
109+
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
103110
--older-than "${BACKUP_RETENTION_DAYS}d" \
104-
"backup-target/$bucket" \
111+
"$target" \
105112
| wc -l
106113
)
107114
if [ "$rule_applies_to" == "0" ]; then
@@ -110,7 +117,7 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
110117
exit 0
111118
fi
112119

113-
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
120+
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
114121

115122
if [ "$rule_applies_to" == "$total" ]; then
116123
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
@@ -119,7 +126,21 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
119126
fi
120127

121128
mc rm $MC_GLOBAL_OPTIONS \
122-
--recursive -force \
123-
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
129+
--recursive --force \
130+
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
124131
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
132+
}
133+
134+
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
135+
info "Pruning old backups"
136+
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
137+
sleep "$BACKUP_PRUNING_LEEWAY"
138+
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
139+
info "Pruning old backups from remote storage"
140+
prune "backup-target/$bucket"
141+
fi
142+
if [ -d "$BACKUP_ARCHIVE" ]; then
143+
info "Pruning old backups from local archive"
144+
prune "$BACKUP_ARCHIVE"
145+
fi
125146
fi

src/entrypoint.sh

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,25 +12,29 @@ set -e
1212
cat <<EOF > env.sh
1313
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
1414
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
15-
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
15+
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
16+
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
1617
1718
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
1819
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
20+
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
1921
2022
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
2123
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
2224
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
2325
2426
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
2527
26-
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
27-
2828
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
2929
EOF
3030
chmod a+x env.sh
3131
source env.sh
3232

33-
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
33+
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
34+
mc $MC_GLOBAL_OPTIONS alias set backup-target \
35+
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
36+
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
37+
fi
3438

3539
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
3640
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."

test/compose/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
local

test/compose/docker-compose.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ services:
2626
BACKUP_FILENAME: test.tar.gz
2727
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
2828
volumes:
29+
- ./local:/archive
2930
- app_data:/backup/app_data:ro
3031
- /var/run/docker.sock:/var/run/docker.sock
3132

test/compose/run.sh

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ set -e
44

55
cd $(dirname $0)
66

7+
mkdir -p local
8+
79
docker-compose up -d
810
sleep 5
911

@@ -13,7 +15,11 @@ docker run --rm -it \
1315
-v compose_backup_data:/data alpine \
1416
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
1517

16-
echo "[TEST:PASS] Found relevant files in untared backup."
18+
echo "[TEST:PASS] Found relevant files in untared remote backup."
19+
20+
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
21+
22+
echo "[TEST:PASS] Found relevant files in untared local backup."
1723

1824
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
1925
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"

0 commit comments

Comments
 (0)