Преглед изворни кода

Use ansible tags to properly run post-restore tasks

pull/16/head
Kristijan Mitrovic пре 4 година
родитељ
комит
ebe9f90558
6 измењених фајлова са 71 додато и 59 уклоњено
  1. +9
    -12
      automation/roles/algo/tasks/main.yml
  2. +10
    -6
      automation/roles/bubble/files/bubble_restore_monitor.sh
  3. +4
    -1
      automation/roles/mitmproxy/tasks/main.yml
  4. +41
    -38
      automation/roles/mitmproxy/tasks/route.yml
  5. +6
    -1
      bubble-server/src/main/resources/ansible/install_local.sh.hbs
  6. +1
    -1
      bubble-server/src/main/resources/bubble/node_progress_meter_ticks.json

+ 9
- 12
automation/roles/algo/tasks/main.yml Прегледај датотеку

@@ -48,18 +48,15 @@
src: supervisor_wg_monitor_connections.conf src: supervisor_wg_monitor_connections.conf
dest: /etc/supervisor/conf.d/wg_monitor_connections.conf dest: /etc/supervisor/conf.d/wg_monitor_connections.conf


# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored
- name: Run algo playbook to install algo for brand new nodes
shell: /root/ansible/roles/algo/algo/install_algo.sh
when: restore_key is not defined
- name: Run algo playbook to install algo
block:
- name: Run install algo script including playbook
shell: /root/ansible/roles/algo/algo/install_algo.sh


# Don't start monitors when in restore mode, bubble_restore_monitor.sh will start it after algo is installed
- name: Restart algo_refresh_users_monitor and wg_monitor_connections for brand new nodes
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor && supervisorctl restart wg_monitor_connections"
when: restore_key is not defined
- name: Restart algo related services
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor && supervisorctl restart wg_monitor_connections"


- name: Stop algo monitors
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl stop algo_refresh_users_monitor && supervisorctl stop wg_monitor_connections"
when: restore_key is defined
- include: algo_firewall.yml
# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored
tags: post_restore


- include: algo_firewall.yml

+ 10
- 6
automation/roles/bubble/files/bubble_restore_monitor.sh Прегледај датотеку

@@ -111,17 +111,21 @@ CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz
if [[ ! -f ${CONFIGS_BACKUP} ]] ; then if [[ ! -f ${CONFIGS_BACKUP} ]] ; then
log "Warning: Algo VPN configs backup not found: ${CONFIGS_BACKUP}, not installing algo" log "Warning: Algo VPN configs backup not found: ${CONFIGS_BACKUP}, not installing algo"
else else
ALGO_BASE=/root/ansible/roles/algo/algo
ANSIBLE_HOME="/root"
ANSIBLE_DIR="$(ANSIBLE_HOME)/ansible"
ID_FILE="${ANSIBLE_HOME}/.ssh/bubble_rsa"
SSH_OPTIONS="--ssh-extra-args '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=publickey -i ${ID_FILE}'"

ALGO_BASE=${ANSIBLE_DIR}/roles/algo/algo
if [[ ! -d ${ALGO_BASE} ]] ; then if [[ ! -d ${ALGO_BASE} ]] ; then
die "Error restoring Algo VPN: directory ${ALGO_BASE} not found" die "Error restoring Algo VPN: directory ${ALGO_BASE} not found"
fi fi
cd ${ALGO_BASE} && tar xzf ${CONFIGS_BACKUP} || die "Error restoring algo VPN configs" cd ${ALGO_BASE} && tar xzf ${CONFIGS_BACKUP} || die "Error restoring algo VPN configs"


# install/configure algo
${ALGO_BASE}/install_algo.sh || die "Error configuring or installing algo VPN"

# ensure user monitor is running
supervisorctl restart algo_refresh_users_monitor
cd "${ANSIBLE_DIR}" && \
. ./venv/bin/activate && \
bash -c "ansible-playbook ${SSH_OPTIONS} --tags 'post_restore,always' --inventory ./hosts ./playbook.yml" \
|| die "Error running ansible in post-restore. journalctl -xe = $(journalctl -xe | tail -n 50)"
fi fi


# restart mitm proxy service # restart mitm proxy service


+ 4
- 1
automation/roles/mitmproxy/tasks/main.yml Прегледај датотеку

@@ -88,13 +88,16 @@
state: link state: link


- name: Restart dnscrypt-proxy - name: Restart dnscrypt-proxy
shell: service dnscrypt-proxy restart
service: dnscrypt-proxy
status: restarted
tags: post_restore


- name: restart supervisord - name: restart supervisord
service: service:
name: supervisor name: supervisor
enabled: yes enabled: yes
state: restarted state: restarted
tags: always


- import_tasks: route.yml - import_tasks: route.yml




+ 41
- 38
automation/roles/mitmproxy/tasks/route.yml Прегледај датотеку

@@ -14,45 +14,48 @@
value: 0 value: 0
sysctl_set: yes sysctl_set: yes


- name: "Allow MITM private port"
iptables:
chain: INPUT
action: insert
rule_num: 10
protocol: tcp
destination_port: "{{ mitm_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new local TCP DNS connections on private port
become: yes
- name: Setup for MITM and save iptables
block:
- name: "Allow MITM private port"
iptables:
chain: INPUT
action: insert
rule_num: 10
protocol: tcp
destination_port: "{{ mitm_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new local TCP DNS connections on private port
become: yes


- name: Route port 80 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 1
protocol: tcp
destination_port: 80
jump: REDIRECT
to_ports: "{{ mitm_port }}"
- name: Route port 80 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 1
protocol: tcp
destination_port: 80
jump: REDIRECT
to_ports: "{{ mitm_port }}"


- name: Route port 443 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 2
protocol: tcp
destination_port: 443
jump: REDIRECT
to_ports: "{{ mitm_port }}"
- name: Route port 443 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 2
protocol: tcp
destination_port: 443
jump: REDIRECT
to_ports: "{{ mitm_port }}"


- name: save iptables rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes
- name: save iptables rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes


- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes
- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes
tags: post_restore

+ 6
- 1
bubble-server/src/main/resources/ansible/install_local.sh.hbs Прегледај датотеку

@@ -49,9 +49,14 @@ sudo pip3 install setuptools psycopg2-binary || die "Error pip3 installing setup


SSH_OPTIONS="--ssh-extra-args '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=publickey -i ${ID_FILE}'" SSH_OPTIONS="--ssh-extra-args '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PreferredAuthentications=publickey -i ${ID_FILE}'"


SKIP_TAGS=""
if [[ -n "{{restoreKey}}" ]] ; then
SKIP_TAGS="--skip-tags post_restore"
fi

cd "${ANSIBLE_DIR}" && \ cd "${ANSIBLE_DIR}" && \
virtualenv -p python3 ./venv && \ virtualenv -p python3 ./venv && \
. ./venv/bin/activate && \ . ./venv/bin/activate && \
pip3 install ansible && \ pip3 install ansible && \
bash -c "ansible-playbook ${SSH_OPTIONS} --inventory ./hosts ./playbook.yml" \
bash -c "ansible-playbook ${SSH_OPTIONS} ${SKIP_TAGS} --inventory ./hosts ./playbook.yml" \
|| die "Error running ansible. journalctl -xe = $(journalctl -xe | tail -n 50)" || die "Error running ansible. journalctl -xe = $(journalctl -xe | tail -n 50)"

+ 1
- 1
bubble-server/src/main/resources/bubble/node_progress_meter_ticks.json Прегледај датотеку

@@ -13,7 +13,7 @@
{ "percent": 44,"messageKey":"role_bubble_jar", "pattern":"TASK \\[bubble : Install bubble jar] \\*{5,}" }, { "percent": 44,"messageKey":"role_bubble_jar", "pattern":"TASK \\[bubble : Install bubble jar] \\*{5,}" },
{ "percent": 48,"messageKey":"role_bubble_db", "pattern":"TASK \\[bubble : Populate database] \\*{5,}" }, { "percent": 48,"messageKey":"role_bubble_db", "pattern":"TASK \\[bubble : Populate database] \\*{5,}" },
{ "percent": 51,"messageKey":"role_bubble_restore", "pattern":"TASK \\[bubble : Install restore helper scripts] \\*{5,}" }, { "percent": 51,"messageKey":"role_bubble_restore", "pattern":"TASK \\[bubble : Install restore helper scripts] \\*{5,}" },
{ "percent": 52,"messageKey":"role_bubble_algo", "pattern":"TASK \\[algo : Install wg_monitor_connections supervisor conf file] \\*{5,}" },
{ "percent": 52,"messageKey":"role_bubble_algo", "pattern":"TASK \\[algo : Run algo playbook to install algo] \\*{5,}" },
{ "percent": 76,"messageKey":"role_nginx", "pattern":"TASK \\[nginx : [\\w\\s]+] \\*{5,}" }, { "percent": 76,"messageKey":"role_nginx", "pattern":"TASK \\[nginx : [\\w\\s]+] \\*{5,}" },
{ "percent": 81,"messageKey":"role_nginx_certbot", "pattern":"TASK \\[nginx : Init certbot] \\*{5,}" }, { "percent": 81,"messageKey":"role_nginx_certbot", "pattern":"TASK \\[nginx : Init certbot] \\*{5,}" },
{ "percent": 91,"messageKey":"role_mitmproxy", "pattern":"TASK \\[mitmproxy : [\\w\\s]+] \\*{5,}" }, { "percent": 91,"messageKey":"role_mitmproxy", "pattern":"TASK \\[mitmproxy : [\\w\\s]+] \\*{5,}" },


Loading…
Откажи
Сачувај