This repository has been archived by the owner on Feb 21, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathdo
executable file
·428 lines (369 loc) · 11.8 KB
/
do
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
#!/bin/bash -eu
export DJANGO_SETTINGS_MODULE=mrs.settings
export DB_ENGINE=django.db.backends.postgresql
# db.reset Drop and re-create the database
db.reset() {
sudo systemctl start postgresql
sudo -u postgres dropdb mrs || echo could not drop db
sudo -u postgres createdb -E utf8 -O $USER mrs
}
# db.reload Drop and re-create the database, load test data
db.reload() {
db.reset && db.load
}
# db.load Load test data
db.load() {
mrs migrate --noinput
djcli delete contenttypes.ContentType
djcli delete auth.Group
djcli delete mrsuser.User
CI=1 mrs loaddata src/mrs/tests/data.json
}
# db.dump Dump data from the database into test data.json
# This command is used to recreate the test dataset used in
# automated tests from the currently connected database.
db.dump() {
CI=1 mrs mrsstat --refresh
mrs dumpdata --indent=4 \
$(grep model src/mrs/tests/data.json | sort -u | sed 's/.*model". "\([^"]*\)",*/\1/') \
> src/mrs/tests/data.json
}
# runserver DEBUG/development server on all interfaces port 8000
runserver() {
django-admin runserver --traceback 0:8000
}
# clean.pyc Find all __pycache__ and delete them recursively
clean.pyc() {
if ! find . -type d -name __pycache__ | xargs rm -rf; then
sudo chown -R $USER. .
find . -type d -name __pycache__ | xargs rm -rf
fi
}
# venv Setup and activate a venv for a python executable
# Having venv=none makes this job a no-op.
venv() {
if [ "${venv-}" != "none" ]; then
python=${python-python3}
venv=${path-.venv.${python-python3}}
test -d $venv || virtualenv --python=$python $venv
set +eux; echo activating $venv; source $venv/bin/activate; set -eux
fi
}
# pip.install Install project and runtime dependencies
pip.install() {
pip install -U -e .
}
# pip.dev Install project development dependencies
pip.dev() {
pip.install
pip install -r requirements-dev.txt
}
# py.test Test executor, supports a $cov and $CODECOV env var.
py.test() {
export WEBPACK_LOADER=webpack_mock
export CI=true
export DEBUG=true
export BASICAUTH_DISABLE=1
clean.pyc
venv
pip.install
pip.dev
if [ -n "${CODECOV_TOKEN-}" ]; then
cov="${cov-src}"
fi
if [ -n "${cov-}" ]; then
cov="--cov $cov"
fi
$(which py.test) -s -vvv --strict -r fEsxXw ${cov-} ${@-src}
if [ -n "${CODECOV_TOKEN-}" ]; then
codecov --token $CODECOV_TOKEN
fi
}
# py.testrewrite Rewrite the autogenerated test code at your own discretion
py.testrewrite() {
FIXTURE_REWRITE=1 py.test
}
# py.qa Flake8 python linter
py.qa() {
flake8 \
--show-source \
--exclude migrations,settings \
--max-complexity=8 \
--ignore=E305,W503,N801 \
src
}
# docker.build Build the docker container image
docker.build() {
docker build \
--shm-size 512M \
-t $image \
--build-arg GIT_COMMIT=$CI_COMMIT_SHA \
.
}
# docker.test Run tests in docker containers
docker.test() {
db.start
docker run -t
-v $(pwd):/app
-w /app
-e DB_HOST=$DB_HOST
-e DB_USER=$USER
-e rewrite=${rewrite-}
--user root
${img-yourlabs/python} ./do py.test
}
# docker.testbuild Build a docker container and test in it
docker.testbuild() {
db.start
docker.build
docker.test
}
# docker.dump Dump data into ./dump for remote backup and restore
docker.dump() {
if test -d dump; then
rm -rf dump.previous
mv dump dump.previous || echo Could not move ./dump out of the way
fi
mkdir -p dump
cp do dump
getcommit="docker inspect --format='{{.Config.Env}}' betagouv/mrs:master | grep -o 'GIT_COMMIT=[a-z0-9]*'"
if $getcommit; then
export $($getcommit)
fi
image="$(docker inspect --format='{{.Config.Image}}' mrs-$instance || echo betagouv/mrs:master)"
echo $image > dump/image
echo Backing-up container logs before docker shoots them
docker logs mrs-$instance &> ./log/docker.log || echo "Couldn't get logs from instance"
if [ -d ./postgres/data ] && docker start mrs-$instance-postgres; then
docker start mrs-$instance-postgres
docker logs mrs-$instance-postgres >> ./log/postgres.log
docker exec mrs-$instance-postgres pg_dumpall -U $POSTGRES_USER -c -f /dump/data.dump
fi
cp -a log dump
}
# docker.load Load dumped data from ./dump
docker.load() {
export image=$(<./dump/image)
# backup current data dir by moving it away, in case of manual restore
postgres_current=postgres/current
sudo rm -rf $postgres_current
docker stop mrs-$instance-postgres
docker rm -f mrs-$instance || echo could not rm container mrs-$instance
[ ! -d postgres/data ] || sudo mv postgres/data $postgres_current
docker.db.start
docker exec mrs-$instance-postgres psql -d mrs-$instance -U django -f /dump/data.dump
sudo rm -rf $postgres_current
docker.start
}
# docker.backup Backup a dump remotely
docker.backup() {
if [ -z "$BACKUP_FORCE" ]; then
cat <<EOF
This script is not safe to run multiple instances at the same time.
You need to set the BACKUP_FORCE env var for the script to continue.
Or even better, use the systemd unit, that will garantee that the
script is not executed multiple times at the same time:
systemctl start --wait backup-mrs-production
systemctl status backup-mrs-production
journalctl -fu backup-mrs-production
Please upgrade to the above. Meanwhile, the script will deal with systemd for
you.
EOF
set -eux
journalctl -fu backup-mrs-production &
journalpid="$!"
systemctl start --wait backup-mrs-production
retcode="$?"
kill $journalpid
exit $retcode
fi
export RESTIC_REPOSITORY=./restic
if [ -f ./.backup_password ]; then
export RESTIC_PASSWORD_FILE=.backup_password
fi
mkdir -p mrsattachments
restic backup dump mrsattachments --tag $GIT_COMMIT
lftp -c "set ssl:check-hostname false;connect $FTP_HOST; mkdir -p mrs-$instance; mirror -Rv $(pwd)/restic mrs-$instance/restic"
rm -rf $(pwd)/postgres/data/data.dump
}
# docker.dumpbackup Backup a dump remotely
docker.dumpbackup() {
docker.dump
docker.backup
}
# docker.network Create a docker network
docker.network() {
docker network inspect mrs-$instance \
|| docker network create --driver bridge mrs-$instance
}
# docker.db.start Start a docker database instance
docker.db.start() {
docker ps -a | grep mrs-$instance-postgres \
|| docker run \
--detach \
--name mrs-$instance-postgres \
--volume $(pwd)/postgres/data:/var/lib/postgresql/data \
--volume $(pwd)/postgres/run:/var/run/postgresql \
--volume $(pwd)/dump:/dump \
--env-file $(pwd)/.env \
--restart always \
--log-driver journald \
--network mrs-$instance \
postgres:10
docker start mrs-$instance-postgres
for i in {1..5}; do docker logs mrs-$instance-postgres 2>&1 | grep 'ready to accept connections' && break || sleep 1; done
docker logs mrs-$instance-postgres
for i in {1..5}; do test -S postgres/run/.s.PGSQL.5432 && break || sleep 1; done
}
# docker.db.stop Stop the docker database instance
docker.db.stop() {
docker stop mrs-$instance-postgres
}
# docker.db.reset Destroy all db data and create a new one with test data.
# Note that it will not execute a data dump prior to wiping
# the data.
docker.db.reset() {
docker.db.stop
docker rm -f mrs-$instance-postgres
docker.db.start
}
# docker.start Start a docker instance
docker.start() {
docker.network
docker rm -f mrs-$instance || echo could not rm container
docker.db.start
sleep 5 # unfortunate fix for db not ready
docker run \
--rm \
--name mrs-$instance-migrate \
--volume $(pwd)/log:/app/log \
--env-file $(pwd)/.env \
--network mrs-$instance \
$image \
mrs migrate
docker run \
--name mrs-$instance \
--restart unless-stopped \
--log-driver journald \
--network mrs-$instance \
--volume $(pwd)/spooler:/app/spooler \
--volume $(pwd)/media:/media \
--volume $(pwd)/log:/app/log \
--env-file $(pwd)/.env \
${*-$image}
(! docker network inspect mailcatcher ) || docker network connect mailcatcher mrs-$instance
docker logs mrs-$instance
}
# docker.runserver Run a development server on port 8000 with docker
docker.start() {
docker.start --publish=8000:8000 --volume $(pwd)/src:/app/src betagouv/mrs mrs runserver 0:8000
}
# docker.mount Mount the current directory into /app for development
docker.mount() {
docker.start --volume $(pwd):/app $image
}
# docker.stop Stop docker instances
docker.stop() {
(! docker ps | grep ^mrs-$instance\$) || docker stop mrs-$instance
(! docker ps | grep ^mrs-$instance-postgres\$) || docker stop mrs-$instance-postgres
}
# docker.rm Remove everything
docker.rm() {
docker rm -f mrs-$instance-postgres || echo container mrs-$instance-postgres not removed
docker rm -f mrs-$instance || echo container mrs-$instance not removed
docker network rm mrs-$instance || echo network mrs-$instance not removed
}
# docker.reset DELETE ALL DATA and start again
docker.reset() {
if docker network inspect mrs-$instance; then
docker network disconnect mrs-$instance mrs-$instance || echo could not disconnect instance
docker network disconnect mrs-$instance mrs-$instance-postgres || echo could not disconnect postgres
if docker ps -a | grep mrs-$instance-postgres; then
docker rm -f mrs-$instance-postgres
fi
docker network rm mrs-$instance
fi
if docker ps -a | grep mrs-$instance\$; then
docker rm -f mrs-$instance
fi
docker.start
}
# docker.ps Show docker process
docker.ps() {
docker ps -a | grep mrs-$instance
}
# docker.logs Show docker process
docker.logs() {
docker logs mrs-$instance-postgres
docker logs mrs-$instance
}
# docker.shell Shell on docker process
docker.shell() {
docker exec -it mrs-$instance bash
}
# compose Wrapper for docker-compose
# Adds an extra command: ./do compose apply
compose() {
if [ "$1" = "apply" ]; then
compose build
compose down
compose up -d
compose logs
compose ps
return
fi
docker-compose $@
}
# vagrant Vagrant wrapper providing ssh-config into .vagrant
# Adds apply sub command to chain destroy and up
vagrant() {
export VAGRANT_IP=192.168.168.168
if [ "$1" = "apply" ]; then
vagrant destroy -f
vagrant up
return
fi
if [ "$1" = "bigsudo" ]; then
shift
if [ ! -f .vagrant-ssh ]; then
vagrant up
fi
bigsudo $@ --ssh-common-args="-F .vagrant-ssh" --inventory="default,"
return
fi
$(which vagrant) $@
if [ "$1" = "up" ]; then
$(which vagrant) ssh-config > .vagrant-ssh
fi
}
# waituntil Wait for a statement until 150 tries elapsed
waituntil() {
set +x
printf "$*"
i=${i-150}
success=false
until [ $i = 0 ]; do
i=$((i-1))
printf "\e[31m.\e[0m"
if $* &> ".waituntil.outerr"; then
printf "\e[32mSUCCESS\e[0m:\n"
success=true
break
else
sleep 1
fi
done
cat ".waituntil.outerr"
if ! $success; then
printf "\e[31mFAILED\e[0m:\n"
exit 1
fi
set -x
}
if [ -z "${1-}" ]; then
grep '^# ' $0
else
fun=$1
shift
$fun $*
fi