Kaynağa Gözat

Use ansible tags to properly run post-restore tasks

pull/16/head
Kristijan Mitrovic 4 yıl önce
ebeveyn
işleme
ebe9f90558
6 değiştirilmiş dosya ile 71 ekleme ve 59 silme
  1. +9
    -12
      automation/roles/algo/tasks/main.yml
  2. +10
    -6
      automation/roles/bubble/files/bubble_restore_monitor.sh
  3. +4
    -1
      automation/roles/mitmproxy/tasks/main.yml
  4. +41
    -38
      automation/roles/mitmproxy/tasks/route.yml
  5. +6
    -1
      bubble-server/src/main/resources/ansible/install_local.sh.hbs
  6. +1
    -1
      bubble-server/src/main/resources/bubble/node_progress_meter_ticks.json

+ 9
- 12
automation/roles/algo/tasks/main.yml Dosyayı Görüntüle

@@ -48,18 +48,15 @@
src: supervisor_wg_monitor_connections.conf
dest: /etc/supervisor/conf.d/wg_monitor_connections.conf

# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored
- name: Run algo playbook to install algo for brand new nodes
shell: /root/ansible/roles/algo/algo/install_algo.sh
when: restore_key is not defined
- name: Run algo playbook to install algo
block:
- name: Run install algo script including playbook
shell: /root/ansible/roles/algo/algo/install_algo.sh

# Don't start monitors when in restore mode, bubble_restore_monitor.sh will start it after algo is installed
- name: Restart algo_refresh_users_monitor and wg_monitor_connections for brand new nodes
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor && supervisorctl restart wg_monitor_connections"
when: restore_key is not defined
- name: Restart algo related services
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor && supervisorctl restart wg_monitor_connections"

- name: Stop algo monitors
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl stop algo_refresh_users_monitor && supervisorctl stop wg_monitor_connections"
when: restore_key is defined
- include: algo_firewall.yml
# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored
tags: post_restore

- include: algo_firewall.yml

+ 10
- 6
automation/roles/bubble/files/bubble_restore_monitor.sh Dosyayı Görüntüle

@@ -111,17 +111,21 @@ CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz
if [[ ! -f ${CONFIGS_BACKUP} ]] ; then
log "Warning: Algo VPN configs backup not found: ${CONFIGS_BACKUP}, not installing algo"
else
ALGO_BASE=/root/ansible/roles/algo/algo
ANSIBLE_HOME="/root"
ANSIBLE_DIR="$(ANSIBLE_HOME)/ansible"
ID_FILE="${ANSIBLE_HOME}/.ssh/bubble_rsa"
SSH_OPTIONS="--ssh-extra-args '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=publickey -i ${ID_FILE}'"

ALGO_BASE=${ANSIBLE_DIR}/roles/algo/algo
if [[ ! -d ${ALGO_BASE} ]] ; then
die "Error restoring Algo VPN: directory ${ALGO_BASE} not found"
fi
cd ${ALGO_BASE} && tar xzf ${CONFIGS_BACKUP} || die "Error restoring algo VPN configs"

# install/configure algo
${ALGO_BASE}/install_algo.sh || die "Error configuring or installing algo VPN"

# ensure user monitor is running
supervisorctl restart algo_refresh_users_monitor
cd "${ANSIBLE_DIR}" && \
. ./venv/bin/activate && \
bash -c "ansible-playbook ${SSH_OPTIONS} --tags 'post_restore,always' --inventory ./hosts ./playbook.yml" \
|| die "Error running ansible in post-restore. journalctl -xe = $(journalctl -xe | tail -n 50)"
fi

# restart mitm proxy service


+ 4
- 1
automation/roles/mitmproxy/tasks/main.yml Dosyayı Görüntüle

@@ -88,13 +88,16 @@
state: link

- name: Restart dnscrypt-proxy
shell: service dnscrypt-proxy restart
service: dnscrypt-proxy
status: restarted
tags: post_restore

- name: restart supervisord
service:
name: supervisor
enabled: yes
state: restarted
tags: always

- import_tasks: route.yml



+ 41
- 38
automation/roles/mitmproxy/tasks/route.yml Dosyayı Görüntüle

@@ -14,45 +14,48 @@
value: 0
sysctl_set: yes

- name: "Allow MITM private port"
iptables:
chain: INPUT
action: insert
rule_num: 10
protocol: tcp
destination_port: "{{ mitm_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new local TCP DNS connections on private port
become: yes
- name: Setup for MITM and save iptables
block:
- name: "Allow MITM private port"
iptables:
chain: INPUT
action: insert
rule_num: 10
protocol: tcp
destination_port: "{{ mitm_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new local TCP DNS connections on private port
become: yes

- name: Route port 80 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 1
protocol: tcp
destination_port: 80
jump: REDIRECT
to_ports: "{{ mitm_port }}"
- name: Route port 80 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 1
protocol: tcp
destination_port: 80
jump: REDIRECT
to_ports: "{{ mitm_port }}"

- name: Route port 443 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 2
protocol: tcp
destination_port: 443
jump: REDIRECT
to_ports: "{{ mitm_port }}"
- name: Route port 443 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 2
protocol: tcp
destination_port: 443
jump: REDIRECT
to_ports: "{{ mitm_port }}"

- name: save iptables rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes
- name: save iptables rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes

- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes
- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes
tags: post_restore

+ 6
- 1
bubble-server/src/main/resources/ansible/install_local.sh.hbs Dosyayı Görüntüle

@@ -49,9 +49,14 @@ sudo pip3 install setuptools psycopg2-binary || die "Error pip3 installing setup

SSH_OPTIONS="--ssh-extra-args '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=publickey -i ${ID_FILE}'"

SKIP_TAGS=""
if [[ -n "{{restoreKey}}" ]] ; then
SKIP_TAGS="--skip-tags post_restore"
fi

cd "${ANSIBLE_DIR}" && \
virtualenv -p python3 ./venv && \
. ./venv/bin/activate && \
pip3 install ansible && \
bash -c "ansible-playbook ${SSH_OPTIONS} --inventory ./hosts ./playbook.yml" \
bash -c "ansible-playbook ${SSH_OPTIONS} ${SKIP_TAGS} --inventory ./hosts ./playbook.yml" \
|| die "Error running ansible. journalctl -xe = $(journalctl -xe | tail -n 50)"

+ 1
- 1
bubble-server/src/main/resources/bubble/node_progress_meter_ticks.json Dosyayı Görüntüle

@@ -13,7 +13,7 @@
{ "percent": 44,"messageKey":"role_bubble_jar", "pattern":"TASK \\[bubble : Install bubble jar] \\*{5,}" },
{ "percent": 48,"messageKey":"role_bubble_db", "pattern":"TASK \\[bubble : Populate database] \\*{5,}" },
{ "percent": 51,"messageKey":"role_bubble_restore", "pattern":"TASK \\[bubble : Install restore helper scripts] \\*{5,}" },
{ "percent": 52,"messageKey":"role_bubble_algo", "pattern":"TASK \\[algo : Install wg_monitor_connections supervisor conf file] \\*{5,}" },
{ "percent": 52,"messageKey":"role_bubble_algo", "pattern":"TASK \\[algo : Run algo playbook to install algo] \\*{5,}" },
{ "percent": 76,"messageKey":"role_nginx", "pattern":"TASK \\[nginx : [\\w\\s]+] \\*{5,}" },
{ "percent": 81,"messageKey":"role_nginx_certbot", "pattern":"TASK \\[nginx : Init certbot] \\*{5,}" },
{ "percent": 91,"messageKey":"role_mitmproxy", "pattern":"TASK \\[mitmproxy : [\\w\\s]+] \\*{5,}" },


Yükleniyor…
İptal
Kaydet