@@ -6,6 +6,7 @@ package bubble.cloud.compute.vultr; | |||
import bubble.cloud.CloudRegion; | |||
import bubble.cloud.compute.*; | |||
import bubble.model.cloud.AnsibleInstallType; | |||
import bubble.model.cloud.BubbleNode; | |||
import bubble.model.cloud.BubbleNodeState; | |||
import com.fasterxml.jackson.databind.JsonNode; | |||
@@ -68,6 +69,15 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
@Getter(lazy=true) private final List<ComputeNodeSize> cloudSizes = loadCloudResources(PLANS_URL, new VultrComputeNodeSizeParser()); | |||
@Getter(lazy=true) private final List<OsImage> cloudOsImages = loadCloudResources(OS_URL, new VultrOsImageParser()); | |||
@Getter(lazy=true) private final int snapshotOsId = initSnapshotOsId(); | |||
private int initSnapshotOsId() { | |||
final OsImage snapshot = getCloudOsImages().stream() | |||
.filter(i -> i.getName().equals("Snapshot")) | |||
.findFirst() | |||
.orElse(null); | |||
return snapshot == null ? die("initSnapshotOsId: no snapshot OS found") : snapshot.getId().intValue(); | |||
} | |||
public static final long SERVER_START_POLL_INTERVAL = SECONDS.toMillis(5); | |||
public static final long SERVER_START_TIMEOUT = MINUTES.toMillis(10); | |||
public static final long SERVER_STOP_TIMEOUT = SECONDS.toMillis(60); | |||
@@ -110,8 +120,8 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
final Long planId = getSize(size.getType()).getId(); | |||
if (planId == null) return die("start: plan not found: "+size.getInternalName()); | |||
// todo: lookup osId based on installType and region | |||
final Integer osId = getOs().getId().intValue(); | |||
final PackerImage packerImage = getPackerImage(node.getInstallType()); | |||
if (packerImage == null) return die("start: no packer image found for install type: "+node.getInstallType()); | |||
// register ssh key, check response | |||
final String sshKeyId = registerSshKey(node); | |||
@@ -119,7 +129,8 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
// prepare to create server | |||
final String data = "DCID=" + regionId + | |||
"&VPSPLANID=" + planId + | |||
"&OSID=" + osId + | |||
"&OSID=" + getSnapshotOsId() + | |||
"&SNAPSHOTID=" + packerImage.getId() + | |||
"&SSHKEYID=" + sshKeyId + | |||
"&tag=" + cloud.getUuid() + | |||
"&label=" + node.getFqdn() + | |||
@@ -391,11 +402,20 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
return images == null ? Collections.emptyList() : images; | |||
} | |||
public PackerImage getPackerImage(AnsibleInstallType installType) { | |||
final List<PackerImage> images = getPackerImages(); | |||
return images == null ? null : images.stream() | |||
.filter(i -> i.getName().contains("_"+installType.name()+"_")) | |||
.findFirst() | |||
.orElse(null); | |||
} | |||
public static final long SNAPSHOT_TIMEOUT = MINUTES.toMillis(60); | |||
@Override public List<PackerImage> finalizeIncompletePackerRun(CommandResult commandResult, String jarSha) { | |||
if (!commandResult.getStdout().contains("Waiting 300s for snapshot") | |||
|| !commandResult.getStdout().contains("Error waiting for snapshot") | |||
|| !commandResult.getStdout().contains("Unable to destroy server: Unable to remove VM: Server is currently locked")) { | |||
stopImageServer(jarSha); | |||
return null; | |||
} | |||
@@ -414,6 +434,13 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
log.error("finalizeIncompletePackerRun: timeout waiting for snapshot"); | |||
} | |||
if (!stopImageServer(jarSha)) return null; | |||
if (snapshot == null) return null; | |||
return new SingletonList<>(snapshot); | |||
} | |||
public boolean stopImageServer(String jarSha) { | |||
// find the server | |||
final List<BubbleNode> servers; | |||
try { | |||
@@ -423,15 +450,15 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
}); | |||
} catch (IOException e) { | |||
log.error("finalizeIncompletePackerRun: error listing servers: "+shortError(e), e); | |||
return null; | |||
return false; | |||
} | |||
if (servers.isEmpty()) { | |||
log.error("finalizeIncompletePackerRun: snapshot server not found"); | |||
return null; | |||
return false; | |||
} | |||
if (servers.size() != 1) { | |||
log.error("finalizeIncompletePackerRun: expected only one server, found: "+servers.size()); | |||
return null; | |||
return false; | |||
} | |||
// now shut down the server | |||
@@ -439,11 +466,9 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
stopServer(servers.get(0).getTag(TAG_INSTANCE_ID)); | |||
} catch (Exception e) { | |||
log.error("finalizeIncompletePackerRun: error stopping server: "+shortError(e)); | |||
return null; | |||
return false; | |||
} | |||
if (snapshot == null) return null; | |||
return new SingletonList<>(snapshot); | |||
return true; | |||
} | |||
} |
@@ -121,6 +121,10 @@ public class BubbleNode extends IdentifiableBase implements HasNetwork, HasBubbl | |||
return getNetwork() != null && n.getNetwork() != null && getNetwork().equals(n.getNetwork()); | |||
} | |||
@ECIndex @Column(nullable=false, updatable=false, length=60) | |||
@Enumerated(EnumType.STRING) | |||
@Getter @Setter private AnsibleInstallType installType; | |||
@ECSearchable @ECField(index=50) | |||
@ECForeignKey(entity=BubbleNode.class, cascade=false) | |||
@Column(length=UUID_MAXLEN) | |||
@@ -196,6 +196,7 @@ public class ActivationService { | |||
.setAccount(account.getUuid()) | |||
.setNetwork(network.getUuid()) | |||
.setDomain(network.getDomain()) | |||
.setInstallType(AnsibleInstallType.sage) | |||
.setSize("local") | |||
.setRegion("local") | |||
.setSizeType(ComputeNodeSizeType.local) | |||
@@ -191,6 +191,7 @@ public class StandardNetworkService implements NetworkService { | |||
.setSslPort(network.getSslPort()) | |||
.setNetwork(network.getUuid()) | |||
.setDomain(network.getDomain()) | |||
.setInstallType(network.getInstallType()) | |||
.setAccount(network.getAccount()) | |||
.setSizeType(network.getComputeSizeType()) | |||
.setSize(computeDriver.getSize(network.getComputeSizeType()).getInternalName()) | |||
@@ -217,10 +217,11 @@ public class PackerJob implements Callable<List<PackerImage>> { | |||
images.addAll(Arrays.stream(builds).map(b -> b.toPackerImage(imageName)).collect(Collectors.toList())); | |||
} else { | |||
images.addAll(computeDriver.finalizeIncompletePackerRun(commandResult, jarSha)); | |||
if (empty(images)) { | |||
final List<PackerImage> finalizedImages = computeDriver.finalizeIncompletePackerRun(commandResult, jarSha); | |||
if (empty(finalizedImages)) { | |||
return die("Error executing packer: exit status " + commandResult.getExitStatus()); | |||
} | |||
images.addAll(finalizedImages); | |||
} | |||
} | |||
@@ -0,0 +1,58 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
log "Regenerating algo config..." | |||
java -cp /home/bubble/current/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg" | |||
log "Updating algo VPN users..." | |||
cd ${ALGO_BASE} && \ | |||
python3 -m virtualenv --python="$(command -v python3)" .env \ | |||
&& source .env/bin/activate \ | |||
&& python3 -m pip install -U pip virtualenv \ | |||
&& python3 -m pip install -r requirements.txt \ | |||
&& ansible-playbook users.yml --tags update-users --skip-tags debug \ | |||
-e "ca_password=$(cat ${CA_PASS_FILE}) | |||
provider=local | |||
server=localhost | |||
store_cakey=true | |||
ondemand_cellular=false | |||
ondemand_wifi=false | |||
store_pki=true | |||
dns_adblocking=false | |||
ssh_tunneling=false | |||
endpoint={{ endpoint }} | |||
server_name={{ server_name }}" 2>&1 | tee -a ${LOG} || die "Error running algo users.yml" | |||
# Archive configs in a place that the BackupService can pick them up | |||
log "Sync'ing algo VPN users to bubble..." | |||
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz | |||
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs" | |||
cd /home/bubble && rm -rf configs/* && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home" | |||
log "VPN users successfully sync'd to bubble" |
@@ -0,0 +1,49 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users_monitor.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
BUBBLE_USER_MARKER=/home/bubble/.algo_refresh_users | |||
ALGO_USER_MARKER=${ALGO_BASE}/.algo_refresh_users | |||
if [[ ! -f ${BUBBLE_USER_MARKER} ]] ; then | |||
touch ${BUBBLE_USER_MARKER} && chown bubble ${BUBBLE_USER_MARKER} | |||
fi | |||
if [[ ! -f ${ALGO_USER_MARKER} ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
fi | |||
log "Watching marker file..." | |||
while : ; do | |||
if [[ $(stat -c %Y ${BUBBLE_USER_MARKER}) -gt $(stat -c %Y ${ALGO_USER_MARKER}) ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
sleep 5s | |||
log "Refreshing VPN users..." | |||
/usr/local/bin/algo_refresh_users.sh && log "VPN users successfully refreshed" || log "Error refreshing Algo VPN users" | |||
fi | |||
sleep 10s | |||
done |
@@ -0,0 +1,12 @@ | |||
{ | |||
"name": "algo-0.0.1", | |||
"priority": 400, | |||
"template": true, | |||
"install": "node", | |||
"config": [ | |||
{"name": "server_name", "value": "[[node.fqdn]]"}, | |||
{"name": "endpoint", "value": "[[node.ip4]]"}, | |||
{"name": "ssl_port", "value": "[[node.sslPort]]"} | |||
], | |||
"tgzB64": "" | |||
} |
@@ -0,0 +1,187 @@ | |||
--- | |||
# This is the list of users to generate. | |||
# Every device must have a unique username. | |||
# You can generate up to 250 users at one time. | |||
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". | |||
users: | |||
<<#each bubbleUsers>> - "<<this>>" | |||
<</each>> | |||
### Advanced users only below this line ### | |||
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false | |||
# Supports on MacOS and Linux only (including Windows Subsystem for Linux) | |||
pki_in_tmpfs: true | |||
# If True re-init all existing certificates. Boolean | |||
keys_clean_all: False | |||
# Deploy StrongSwan to enable IPsec support | |||
ipsec_enabled: true | |||
# StrongSwan log level | |||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration | |||
strongswan_log_level: 2 | |||
# rightsourceip for ipsec | |||
# ipv4 | |||
strongswan_network: 10.19.48.0/24 | |||
# ipv6 | |||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48' | |||
# Deploy WireGuard | |||
# WireGuard will listen on 51820/UDP. You might need to change to another port | |||
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some | |||
# mobile data networks. | |||
wireguard_enabled: true | |||
wireguard_port: 51820 | |||
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent. | |||
# This option will keep the "connection" open in the eyes of NAT. | |||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence | |||
wireguard_PersistentKeepalive: 0 | |||
# WireGuard network configuration | |||
wireguard_network_ipv4: 10.19.49.0/24 | |||
wireguard_network_ipv6: fd9d:bc11:4021::/48 | |||
# Reduce the MTU of the VPN tunnel | |||
# Some cloud and internet providers use a smaller MTU (Maximum Transmission | |||
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your | |||
# VPN tunnel some network connections will hang. Algo will attempt to set this | |||
# automatically based on your server, but if connections hang you might need to | |||
# adjust this yourself. | |||
# See: https://github.com/trailofbits/algo/blob/master/docs/troubleshooting.md#various-websites-appear-to-be-offline-through-the-vpn | |||
reduce_mtu: 0 | |||
# Algo will use the following lists to block ads. You can add new block lists | |||
# after deployment by modifying the line starting "BLOCKLIST_URLS=" at: | |||
# /usr/local/sbin/adblock.sh | |||
# If you load very large blocklists, you may also have to modify resource limits: | |||
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf | |||
adblock_lists: | |||
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" | |||
- "https://hosts-file.net/ad_servers.txt" | |||
# Enable DNS encryption. | |||
# If 'false', 'dns_servers' should be specified below. | |||
# DNS encryption can not be disabled if DNS adblocking is enabled | |||
dns_encryption: true | |||
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple | |||
# providers may be specified, but avoid mixing providers that filter results | |||
# (like Cisco) with those that don't (like Cloudflare) or you could get | |||
# inconsistent results. The list of available public providers can be found | |||
# here: | |||
# https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md | |||
dnscrypt_servers: | |||
ipv4: | |||
- cloudflare | |||
ipv6: | |||
- cloudflare-ipv6 | |||
# DNS servers which will be used if 'dns_encryption' is 'false'. | |||
# The default is to use Cloudflare. | |||
dns_servers: | |||
ipv4: | |||
- 1.1.1.1 | |||
- 1.0.0.1 | |||
ipv6: | |||
- 2606:4700:4700::1111 | |||
- 2606:4700:4700::1001 | |||
# Randomly generated IP address for the local dns resolver | |||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
# Your Algo server will automatically install security updates. Some updates | |||
# require a reboot to take effect but your Algo server will not reboot itself | |||
# automatically unless you change 'enabled' below from 'false' to 'true', in | |||
# which case a reboot will take place if necessary at the time specified (as | |||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC. | |||
unattended_reboot: | |||
enabled: false | |||
time: 06:00 | |||
# Block traffic between connected clients | |||
BetweenClients_DROP: true | |||
# Block SMB/CIFS traffic | |||
block_smb: true | |||
# Block NETBIOS traffic | |||
block_netbios: true | |||
congrats: | |||
common: | | |||
"# Congratulations! #" | |||
"# Your Algo server is running. #" | |||
"# Config files and certificates are in the ./configs/ directory. #" | |||
"# Go to https://whoer.net/ after connecting #" | |||
"# and ensure that all your traffic passes through the VPN. #" | |||
"# Local DNS resolver {{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }} #" | |||
p12_pass: | | |||
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #" | |||
ca_key_pass: | | |||
"# The CA key password is {{ CA_password|default(omit) }} #" | |||
ssh_access: | | |||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #" | |||
SSH_keys: | |||
comment: algo@ssh | |||
private: configs/algo.pem | |||
private_tmp: /tmp/algo-ssh.pem | |||
public: configs/algo.pem.pub | |||
cloud_providers: | |||
azure: | |||
size: Standard_B1S | |||
image: 19.04 | |||
digitalocean: | |||
size: s-1vcpu-1gb | |||
image: "ubuntu-19-04-x64" | |||
ec2: | |||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest. | |||
encrypted: true | |||
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP | |||
# Additional prompt will be raised to determine which IP to use | |||
use_existing_eip: false | |||
size: t2.micro | |||
image: | |||
name: "ubuntu-disco-19.04" | |||
owner: "099720109477" | |||
gce: | |||
size: f1-micro | |||
image: ubuntu-1904 | |||
external_static_ip: false | |||
lightsail: | |||
size: nano_1_0 | |||
image: ubuntu_18_04 | |||
scaleway: | |||
size: DEV1-S | |||
image: Ubuntu Bionic Beaver | |||
arch: x86_64 | |||
hetzner: | |||
server_type: cx11 | |||
image: ubuntu-18.04 | |||
openstack: | |||
flavor_ram: ">=512" | |||
image: Ubuntu-18.04 | |||
cloudstack: | |||
size: Micro | |||
image: Linux Ubuntu 19.04 64-bit | |||
disk: 10 | |||
vultr: | |||
os: Ubuntu 19.04 x64 | |||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW | |||
local: | |||
fail_hint: | |||
- Sorry, but something went wrong! | |||
- Please check the troubleshooting guide. | |||
- https://trailofbits.github.io/algo/troubleshooting.html | |||
booleans_map: | |||
Y: true | |||
y: true |
@@ -0,0 +1,5 @@ | |||
[program:algo_refresh_users_monitor] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/bin/algo_refresh_users_monitor.sh |
@@ -0,0 +1,5 @@ | |||
[program:wg_monitor_connections] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/sbin/wg_monitor_connections.sh |
@@ -0,0 +1,66 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.wg_monitor_connections.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_CONFIGS=/root/ansible/roles/algo/algo/configs | |||
BUBBLE_DEVICE_DIR=/home/bubble/wg_devices | |||
if [[ ! -d ${BUBBLE_DEVICE_DIR} ]] ; then | |||
mkdir -p ${BUBBLE_DEVICE_DIR} && chown -R bubble ${BUBBLE_DEVICE_DIR} && chmod 700 ${BUBBLE_DEVICE_DIR} || die "Error creating ${BUBBLE_DEVICE_DIR}" | |||
fi | |||
while : ; do | |||
peer="" | |||
IFS=$'\n' | |||
for line in $(wg show all) ; do | |||
if [[ ! -z "${peer}" ]] ; then | |||
if [[ $(echo "${line}" | tr -d ' ') == allowed* ]] ; then | |||
for ip in $(echo "${line}" | cut -d: -f2- | tr ',' '\n' | tr -d ' ' | cut -d/ -f1) ; do | |||
device_uuids="$(find $(find $(find ${ALGO_CONFIGS} -type d -name wireguard) -type d -name public) -type f | xargs grep -l ${peer} | xargs -n 1 basename)" | |||
if [[ $(echo "${device_uuids}" | wc -l | tr -d ' ') -gt 1 ]] ; then | |||
log "Multiple device UUIDs found for IP ${ip} (not recording anything): ${device_uuids}" | |||
continue | |||
fi | |||
device="$(echo "${device_uuids}" | head -1 | tr -d ' ')" | |||
ip_file="${BUBBLE_DEVICE_DIR}/ip_$(echo ${ip})" | |||
if [[ ! -f ${ip_file} ]] ; then | |||
touch ${ip_file} && chown bubble ${ip_file} && chmod 400 ${ip_file} || log "Error creating ${ip_file}" | |||
fi | |||
device_exists=$(grep -c "${ip}" ${ip_file}) | |||
if [[ ${device_exists} -eq 0 ]] ; then | |||
log "recorded device ${device} for IP ${ip}" | |||
echo "${device}" > ${ip_file} || log "Error writing ${device} to ${ip_file}" | |||
fi | |||
device_file="${BUBBLE_DEVICE_DIR}/device_$(echo ${device})" | |||
if [[ ! -f ${device_file} ]] ; then | |||
touch ${device_file} && chown bubble ${device_file} && chmod 400 ${device_file} || log "Error creating ${ip_file}" | |||
fi | |||
ip_exists=$(grep -c "${ip}" ${device_file}) | |||
if [[ ${ip_exists} -eq 0 ]] ; then | |||
log "recorded IP ${ip} for device ${device}" | |||
echo "${ip}" >> ${device_file} || log "Error writing ${ip} to ${device_file}" | |||
fi | |||
done | |||
peer="" | |||
fi | |||
elif [[ ${line} == peer* ]] ; then | |||
peer="$(echo "${line}" | awk '{print $NF}')" | |||
fi | |||
done | |||
sleep 30s | |||
done |
@@ -0,0 +1,55 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
# Insert additional firewall rules to allow required services to function | |||
- name: Allow HTTP | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 5 | |||
protocol: tcp | |||
destination_port: 80 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new HTTP connections | |||
become: yes | |||
- name: Allow HTTPS | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 6 | |||
protocol: tcp | |||
destination_port: 443 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new HTTPS connections | |||
become: yes | |||
- name: Allow admin HTTPS on port {{ ssl_port }} | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 7 | |||
protocol: tcp | |||
destination_port: "{{ ssl_port }}" | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new admin SSL connections | |||
become: yes | |||
- name: Allow admin HTTP on port 1080 | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 8 | |||
protocol: tcp | |||
destination_port: "1080" | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new admin SSL connections | |||
become: yes |
@@ -0,0 +1,65 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
- name: Unzip algo master.zip | |||
unarchive: | |||
src: master.zip | |||
dest: /root/ansible/roles/algo | |||
- name: Write algo config.cfg.hbs | |||
copy: | |||
src: config.cfg.hbs | |||
dest: /root/ansible/roles/algo/algo/config.cfg.hbs | |||
- name: Install algo_refresh_users script and monitor | |||
copy: | |||
src: "{{ item }}" | |||
dest: "/usr/local/bin/{{ item }}" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
with_items: | |||
- "algo_refresh_users.sh" | |||
- "algo_refresh_users_monitor.sh" | |||
- name: Install algo_refresh_users_monitor supervisor conf file | |||
copy: | |||
src: supervisor_algo_refresh_users_monitor.conf | |||
dest: /etc/supervisor/conf.d/algo_refresh_users_monitor.conf | |||
- name: Write install_algo.sh template | |||
template: | |||
src: install_algo.sh.j2 | |||
dest: /root/ansible/roles/algo/algo/install_algo.sh | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
- name: Install wg_monitor_connections script | |||
copy: | |||
src: wg_monitor_connections.sh | |||
dest: "/usr/local/sbin/wg_monitor_connections.sh" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
- name: Install wg_monitor_connections supervisor conf file | |||
copy: | |||
src: supervisor_wg_monitor_connections.conf | |||
dest: /etc/supervisor/conf.d/wg_monitor_connections.conf | |||
# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored | |||
- name: Run algo playbook to install algo | |||
shell: /root/ansible/roles/algo/algo/install_algo.sh | |||
when: restore_key is not defined | |||
# Don't start monitors when in restore mode, bubble_restore_monitor.sh will start it after algo is installed | |||
- name: Run algo playbook to install algo | |||
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor && supervisorctl restart wg_monitor_connections" | |||
when: restore_key is not defined | |||
- name: Run algo playbook to install algo | |||
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl stop algo_refresh_users_monitor && supervisorctl stop wg_monitor_connections" | |||
when: restore_key is defined | |||
- include: algo_firewall.yml |
@@ -0,0 +1,40 @@ | |||
#!/bin/bash | |||
function die { | |||
echo 1>&2 "${1}" | |||
exit 1 | |||
} | |||
ALGO_BASE="$(cd $(dirname $0) && pwd)" | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
cd ${ALGO_BASE} | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
java -cp /home/bubble/current/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg" | |||
python3 -m virtualenv --python="$(command -v python3)" .env \ | |||
&& source .env/bin/activate \ | |||
&& python3 -m pip install -U pip virtualenv \ | |||
&& python3 -m pip install -r requirements.txt \ | |||
&& ansible-playbook main.yml --skip-tags debug \ | |||
-e "ca_password_file=${CA_PASS_FILE} | |||
ca_password_file_owner=bubble | |||
provider=local | |||
server=localhost | |||
store_cakey=true | |||
ondemand_cellular=false | |||
ondemand_wifi=false | |||
store_pki=true | |||
dns_adblocking=false | |||
ssh_tunneling=false | |||
endpoint={{ endpoint }} | |||
server_name={{ server_name }}" || die "Error installing algo" | |||
# Archive configs in a place that the BackupService can pick them up | |||
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz | |||
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs" | |||
cd /home/bubble && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home" |
@@ -0,0 +1,58 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
log "Regenerating algo config..." | |||
java -cp /home/bubble/current/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg" | |||
log "Updating algo VPN users..." | |||
cd ${ALGO_BASE} && \ | |||
python3 -m virtualenv --python="$(command -v python3)" .env \ | |||
&& source .env/bin/activate \ | |||
&& python3 -m pip install -U pip virtualenv \ | |||
&& python3 -m pip install -r requirements.txt \ | |||
&& ansible-playbook users.yml --tags update-users --skip-tags debug \ | |||
-e "ca_password=$(cat ${CA_PASS_FILE}) | |||
provider=local | |||
server=localhost | |||
store_cakey=true | |||
ondemand_cellular=false | |||
ondemand_wifi=false | |||
store_pki=true | |||
dns_adblocking=false | |||
ssh_tunneling=false | |||
endpoint={{ endpoint }} | |||
server_name={{ server_name }}" 2>&1 | tee -a ${LOG} || die "Error running algo users.yml" | |||
# Archive configs in a place that the BackupService can pick them up | |||
log "Sync'ing algo VPN users to bubble..." | |||
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz | |||
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs" | |||
cd /home/bubble && rm -rf configs/* && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home" | |||
log "VPN users successfully sync'd to bubble" |
@@ -0,0 +1,49 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users_monitor.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
BUBBLE_USER_MARKER=/home/bubble/.algo_refresh_users | |||
ALGO_USER_MARKER=${ALGO_BASE}/.algo_refresh_users | |||
if [[ ! -f ${BUBBLE_USER_MARKER} ]] ; then | |||
touch ${BUBBLE_USER_MARKER} && chown bubble ${BUBBLE_USER_MARKER} | |||
fi | |||
if [[ ! -f ${ALGO_USER_MARKER} ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
fi | |||
log "Watching marker file..." | |||
while : ; do | |||
if [[ $(stat -c %Y ${BUBBLE_USER_MARKER}) -gt $(stat -c %Y ${ALGO_USER_MARKER}) ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
sleep 5s | |||
log "Refreshing VPN users..." | |||
/usr/local/bin/algo_refresh_users.sh && log "VPN users successfully refreshed" || log "Error refreshing Algo VPN users" | |||
fi | |||
sleep 10s | |||
done |
@@ -0,0 +1,187 @@ | |||
--- | |||
# This is the list of users to generate. | |||
# Every device must have a unique username. | |||
# You can generate up to 250 users at one time. | |||
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". | |||
users: | |||
<<#each bubbleUsers>> - "<<this>>" | |||
<</each>> | |||
### Advanced users only below this line ### | |||
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false | |||
# Supports on MacOS and Linux only (including Windows Subsystem for Linux) | |||
pki_in_tmpfs: true | |||
# If True re-init all existing certificates. Boolean | |||
keys_clean_all: False | |||
# Deploy StrongSwan to enable IPsec support | |||
ipsec_enabled: true | |||
# StrongSwan log level | |||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration | |||
strongswan_log_level: 2 | |||
# rightsourceip for ipsec | |||
# ipv4 | |||
strongswan_network: 10.19.48.0/24 | |||
# ipv6 | |||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48' | |||
# Deploy WireGuard | |||
# WireGuard will listen on 51820/UDP. You might need to change to another port | |||
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some | |||
# mobile data networks. | |||
wireguard_enabled: true | |||
wireguard_port: 51820 | |||
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent. | |||
# This option will keep the "connection" open in the eyes of NAT. | |||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence | |||
wireguard_PersistentKeepalive: 0 | |||
# WireGuard network configuration | |||
wireguard_network_ipv4: 10.19.49.0/24 | |||
wireguard_network_ipv6: fd9d:bc11:4021::/48 | |||
# Reduce the MTU of the VPN tunnel | |||
# Some cloud and internet providers use a smaller MTU (Maximum Transmission | |||
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your | |||
# VPN tunnel some network connections will hang. Algo will attempt to set this | |||
# automatically based on your server, but if connections hang you might need to | |||
# adjust this yourself. | |||
# See: https://github.com/trailofbits/algo/blob/master/docs/troubleshooting.md#various-websites-appear-to-be-offline-through-the-vpn | |||
reduce_mtu: 0 | |||
# Algo will use the following lists to block ads. You can add new block lists | |||
# after deployment by modifying the line starting "BLOCKLIST_URLS=" at: | |||
# /usr/local/sbin/adblock.sh | |||
# If you load very large blocklists, you may also have to modify resource limits: | |||
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf | |||
adblock_lists: | |||
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" | |||
- "https://hosts-file.net/ad_servers.txt" | |||
# Enable DNS encryption. | |||
# If 'false', 'dns_servers' should be specified below. | |||
# DNS encryption can not be disabled if DNS adblocking is enabled | |||
dns_encryption: true | |||
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple | |||
# providers may be specified, but avoid mixing providers that filter results | |||
# (like Cisco) with those that don't (like Cloudflare) or you could get | |||
# inconsistent results. The list of available public providers can be found | |||
# here: | |||
# https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md | |||
dnscrypt_servers: | |||
ipv4: | |||
- cloudflare | |||
ipv6: | |||
- cloudflare-ipv6 | |||
# DNS servers which will be used if 'dns_encryption' is 'false'. | |||
# The default is to use Cloudflare. | |||
dns_servers: | |||
ipv4: | |||
- 1.1.1.1 | |||
- 1.0.0.1 | |||
ipv6: | |||
- 2606:4700:4700::1111 | |||
- 2606:4700:4700::1001 | |||
# Randomly generated IP address for the local dns resolver | |||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
# Your Algo server will automatically install security updates. Some updates | |||
# require a reboot to take effect but your Algo server will not reboot itself | |||
# automatically unless you change 'enabled' below from 'false' to 'true', in | |||
# which case a reboot will take place if necessary at the time specified (as | |||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC. | |||
unattended_reboot: | |||
enabled: false | |||
time: 06:00 | |||
# Block traffic between connected clients | |||
BetweenClients_DROP: true | |||
# Block SMB/CIFS traffic | |||
block_smb: true | |||
# Block NETBIOS traffic | |||
block_netbios: true | |||
congrats: | |||
common: | | |||
"# Congratulations! #" | |||
"# Your Algo server is running. #" | |||
"# Config files and certificates are in the ./configs/ directory. #" | |||
"# Go to https://whoer.net/ after connecting #" | |||
"# and ensure that all your traffic passes through the VPN. #" | |||
"# Local DNS resolver {{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }} #" | |||
p12_pass: | | |||
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #" | |||
ca_key_pass: | | |||
"# The CA key password is {{ CA_password|default(omit) }} #" | |||
ssh_access: | | |||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #" | |||
SSH_keys: | |||
comment: algo@ssh | |||
private: configs/algo.pem | |||
private_tmp: /tmp/algo-ssh.pem | |||
public: configs/algo.pem.pub | |||
cloud_providers: | |||
azure: | |||
size: Standard_B1S | |||
image: 19.04 | |||
digitalocean: | |||
size: s-1vcpu-1gb | |||
image: "ubuntu-19-04-x64" | |||
ec2: | |||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest. | |||
encrypted: true | |||
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP | |||
# Additional prompt will be raised to determine which IP to use | |||
use_existing_eip: false | |||
size: t2.micro | |||
image: | |||
name: "ubuntu-disco-19.04" | |||
owner: "099720109477" | |||
gce: | |||
size: f1-micro | |||
image: ubuntu-1904 | |||
external_static_ip: false | |||
lightsail: | |||
size: nano_1_0 | |||
image: ubuntu_18_04 | |||
scaleway: | |||
size: DEV1-S | |||
image: Ubuntu Bionic Beaver | |||
arch: x86_64 | |||
hetzner: | |||
server_type: cx11 | |||
image: ubuntu-18.04 | |||
openstack: | |||
flavor_ram: ">=512" | |||
image: Ubuntu-18.04 | |||
cloudstack: | |||
size: Micro | |||
image: Linux Ubuntu 19.04 64-bit | |||
disk: 10 | |||
vultr: | |||
os: Ubuntu 19.04 x64 | |||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW | |||
local: | |||
fail_hint: | |||
- Sorry, but something went wrong! | |||
- Please check the troubleshooting guide. | |||
- https://trailofbits.github.io/algo/troubleshooting.html | |||
booleans_map: | |||
Y: true | |||
y: true |
@@ -0,0 +1,5 @@ | |||
[program:algo_refresh_users_monitor] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/bin/algo_refresh_users_monitor.sh |
@@ -0,0 +1,5 @@ | |||
[program:wg_monitor_connections] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/sbin/wg_monitor_connections.sh |
@@ -0,0 +1,66 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.wg_monitor_connections.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_CONFIGS=/root/ansible/roles/algo/algo/configs | |||
BUBBLE_DEVICE_DIR=/home/bubble/wg_devices | |||
if [[ ! -d ${BUBBLE_DEVICE_DIR} ]] ; then | |||
mkdir -p ${BUBBLE_DEVICE_DIR} && chown -R bubble ${BUBBLE_DEVICE_DIR} && chmod 700 ${BUBBLE_DEVICE_DIR} || die "Error creating ${BUBBLE_DEVICE_DIR}" | |||
fi | |||
while : ; do | |||
peer="" | |||
IFS=$'\n' | |||
for line in $(wg show all) ; do | |||
if [[ ! -z "${peer}" ]] ; then | |||
if [[ $(echo "${line}" | tr -d ' ') == allowed* ]] ; then | |||
for ip in $(echo "${line}" | cut -d: -f2- | tr ',' '\n' | tr -d ' ' | cut -d/ -f1) ; do | |||
device_uuids="$(find $(find $(find ${ALGO_CONFIGS} -type d -name wireguard) -type d -name public) -type f | xargs grep -l ${peer} | xargs -n 1 basename)" | |||
if [[ $(echo "${device_uuids}" | wc -l | tr -d ' ') -gt 1 ]] ; then | |||
log "Multiple device UUIDs found for IP ${ip} (not recording anything): ${device_uuids}" | |||
continue | |||
fi | |||
device="$(echo "${device_uuids}" | head -1 | tr -d ' ')" | |||
ip_file="${BUBBLE_DEVICE_DIR}/ip_$(echo ${ip})" | |||
if [[ ! -f ${ip_file} ]] ; then | |||
touch ${ip_file} && chown bubble ${ip_file} && chmod 400 ${ip_file} || log "Error creating ${ip_file}" | |||
fi | |||
device_exists=$(grep -c "${ip}" ${ip_file}) | |||
if [[ ${device_exists} -eq 0 ]] ; then | |||
log "recorded device ${device} for IP ${ip}" | |||
echo "${device}" > ${ip_file} || log "Error writing ${device} to ${ip_file}" | |||
fi | |||
device_file="${BUBBLE_DEVICE_DIR}/device_$(echo ${device})" | |||
if [[ ! -f ${device_file} ]] ; then | |||
touch ${device_file} && chown bubble ${device_file} && chmod 400 ${device_file} || log "Error creating ${ip_file}" | |||
fi | |||
ip_exists=$(grep -c "${ip}" ${device_file}) | |||
if [[ ${ip_exists} -eq 0 ]] ; then | |||
log "recorded IP ${ip} for device ${device}" | |||
echo "${ip}" >> ${device_file} || log "Error writing ${ip} to ${device_file}" | |||
fi | |||
done | |||
peer="" | |||
fi | |||
elif [[ ${line} == peer* ]] ; then | |||
peer="$(echo "${line}" | awk '{print $NF}')" | |||
fi | |||
done | |||
sleep 30s | |||
done |
@@ -0,0 +1,55 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
# Insert additional firewall rules to allow required services to function | |||
- name: Allow HTTP | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 5 | |||
protocol: tcp | |||
destination_port: 80 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new HTTP connections | |||
become: yes | |||
- name: Allow HTTPS | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 6 | |||
protocol: tcp | |||
destination_port: 443 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new HTTPS connections | |||
become: yes | |||
- name: Allow admin HTTPS on port 1443 | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 7 | |||
protocol: tcp | |||
destination_port: 1443 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new admin SSL connections | |||
become: yes | |||
- name: Allow admin HTTP on port 1080 | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 8 | |||
protocol: tcp | |||
destination_port: 1080 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new admin SSL connections | |||
become: yes |
@@ -0,0 +1,58 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
- name: Create algo staging directory | |||
file: | |||
path: /root/ansible-staging/roles/algo/algo | |||
owner: root | |||
group: root | |||
mode: 0770 | |||
state: directory | |||
- name: Download algo dist file | |||
get_url: | |||
url: https://github.com/getbubblenow/bubble-dist/raw/master/algo/master.zip | |||
dest: /root/ansible-staging/algo.zip | |||
checksum: sha256:3d5f297d309af9b956525a2cbb40e0dca216fb735cc35befc3d5a5bdd54163a5 | |||
- name: Unzip algo master.zip | |||
unarchive: | |||
remote_src: yes | |||
src: /root/ansible-staging/algo.zip | |||
dest: /root/ansible-staging/roles/algo | |||
- name: Write algo config.cfg.hbs | |||
copy: | |||
src: config.cfg.hbs | |||
dest: /root/ansible-staging/roles/algo/algo/config.cfg.hbs | |||
- name: Install algo_refresh_users script and monitor | |||
copy: | |||
src: "{{ item }}" | |||
dest: "/usr/local/bin/{{ item }}" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
with_items: | |||
- "algo_refresh_users.sh" | |||
- "algo_refresh_users_monitor.sh" | |||
- name: Install algo_refresh_users_monitor supervisor conf file | |||
copy: | |||
src: supervisor_algo_refresh_users_monitor.conf | |||
dest: /etc/supervisor/conf.d/algo_refresh_users_monitor.conf | |||
- name: Install wg_monitor_connections script | |||
copy: | |||
src: wg_monitor_connections.sh | |||
dest: "/usr/local/sbin/wg_monitor_connections.sh" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
- name: Install wg_monitor_connections supervisor conf file | |||
copy: | |||
src: supervisor_wg_monitor_connections.conf | |||
dest: /etc/supervisor/conf.d/wg_monitor_connections.conf | |||
- include: algo_firewall.yml |
@@ -0,0 +1,129 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import requests | |||
import traceback | |||
import sys | |||
import os | |||
import time | |||
import uuid | |||
import datetime | |||
import redis | |||
import json | |||
from bubble_config import bubble_network, bubble_port | |||
# Write python PID to file so that mitmdump_monitor.sh can check for excessive memory usage and restart if needed | |||
MITMDUMP_PID_FILE_PATH = '/home/mitmproxy/mitmdump.pid' | |||
MITMDUMP_PID_FILE = open(MITMDUMP_PID_FILE_PATH, "w") | |||
MITMDUMP_PID_FILE.write("%d" % os.getpid()) | |||
MITMDUMP_PID_FILE.close() | |||
HEADER_USER_AGENT = 'User-Agent' | |||
HEADER_REFERER = 'Referer' | |||
CTX_BUBBLE_MATCHERS='X-Bubble-Matchers' | |||
CTX_BUBBLE_ABORT='X-Bubble-Abort' | |||
CTX_BUBBLE_PASSTHRU='X-Bubble-Passthru' | |||
CTX_BUBBLE_REQUEST_ID='X-Bubble-RequestId' | |||
CTX_CONTENT_LENGTH='X-Bubble-Content-Length' | |||
CTX_CONTENT_LENGTH_SENT='X-Bubble-Content-Length-Sent' | |||
BUBBLE_URI_PREFIX='/__bubble/' | |||
REDIS = redis.Redis(host='127.0.0.1', port=6379, db=0) | |||
BUBBLE_ACTIVITY_LOG_PREFIX = 'bubble_activity_log_' | |||
BUBBLE_ACTIVITY_LOG_EXPIRATION = 600 | |||
def redis_set(name, value, ex): | |||
REDIS.set(name, value, nx=True, ex=ex) | |||
REDIS.set(name, value, xx=True, ex=ex) | |||
def bubble_log(message): | |||
print(str(datetime.datetime.time(datetime.datetime.now()))+': ' + message, file=sys.stderr) | |||
def bubble_activity_log(client_addr, server_addr, event, data): | |||
key = BUBBLE_ACTIVITY_LOG_PREFIX + str(time.time() * 1000.0) + '_' + str(uuid.uuid4()) | |||
value = json.dumps({ | |||
'source': 'mitmproxy', | |||
'client_addr': client_addr, | |||
'server_addr': server_addr, | |||
'event': event, | |||
'data': data | |||
}) | |||
bubble_log('bubble_activity_log: setting '+key+' = '+value) | |||
redis_set(key, value, BUBBLE_ACTIVITY_LOG_EXPIRATION) | |||
pass | |||
def bubble_passthru(remote_addr, addr, fqdn): | |||
headers = { | |||
'X-Forwarded-For': remote_addr, | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
try: | |||
data = { | |||
'addr': str(addr), | |||
'fqdn': str(fqdn), | |||
'remoteAddr': remote_addr | |||
} | |||
response = requests.post('http://127.0.0.1:'+bubble_port+'/api/filter/passthru', headers=headers, json=data) | |||
return response.ok | |||
except Exception as e: | |||
bubble_log('bubble_passthru API call failed: '+repr(e)) | |||
traceback.print_exc() | |||
return False | |||
def bubble_matchers(req_id, remote_addr, flow, host): | |||
headers = { | |||
'X-Forwarded-For': remote_addr, | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
if HEADER_USER_AGENT not in flow.request.headers: | |||
bubble_log('bubble_matchers: no User-Agent header, setting to UNKNOWN') | |||
user_agent = 'UNKNOWN' | |||
else: | |||
user_agent = flow.request.headers[HEADER_USER_AGENT] | |||
if HEADER_REFERER not in flow.request.headers: | |||
bubble_log('bubble_matchers: no Referer header, setting to NONE') | |||
referer = 'NONE' | |||
else: | |||
try: | |||
referer = flow.request.headers[HEADER_REFERER].encode().decode() | |||
except Exception as e: | |||
bubble_log('bubble_matchers: error parsing Referer header: '+repr(e)) | |||
referer = 'NONE' | |||
try: | |||
data = { | |||
'requestId': req_id, | |||
'fqdn': host, | |||
'uri': flow.request.path, | |||
'userAgent': user_agent, | |||
'referer': referer, | |||
'remoteAddr': remote_addr | |||
} | |||
response = requests.post('http://127.0.0.1:'+bubble_port+'/api/filter/matchers/'+req_id, headers=headers, json=data) | |||
if response.ok: | |||
return response.json() | |||
bubble_log('bubble_matchers response not OK, returning empty matchers array: '+str(response.status_code)+' / '+repr(response.text)) | |||
except Exception as e: | |||
bubble_log('bubble_matchers API call failed: '+repr(e)) | |||
traceback.print_exc() | |||
return None | |||
def add_flow_ctx(flow, name, value): | |||
if not hasattr(flow, 'bubble_ctx'): | |||
flow.bubble_ctx = {} | |||
flow.bubble_ctx[name] = value | |||
def get_flow_ctx(flow, name): | |||
if not hasattr(flow, 'bubble_ctx'): | |||
return None | |||
if not name in flow.bubble_ctx: | |||
return None | |||
return flow.bubble_ctx[name] |
@@ -0,0 +1,5 @@ | |||
bubble_network = '{{ bubble_network }}' | |||
bubble_port = '{{ admin_port }}'; | |||
bubble_host = '{{ server_name }}' | |||
bubble_host_alias = '{{ server_alias }}' | |||
bubble_ssl_port = '{{ ssl_port }}' |
@@ -0,0 +1,178 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import re | |||
import requests | |||
import urllib | |||
import traceback | |||
from mitmproxy.net.http import Headers | |||
from bubble_config import bubble_port, bubble_host_alias | |||
from bubble_api import CTX_BUBBLE_MATCHERS, CTX_BUBBLE_ABORT, BUBBLE_URI_PREFIX, \ | |||
CTX_BUBBLE_REQUEST_ID, CTX_CONTENT_LENGTH, CTX_CONTENT_LENGTH_SENT, bubble_log, get_flow_ctx, add_flow_ctx | |||
BUFFER_SIZE = 4096 | |||
HEADER_CONTENT_TYPE = 'Content-Type' | |||
HEADER_CONTENT_LENGTH = 'Content-Length' | |||
HEADER_CONTENT_ENCODING = 'Content-Encoding' | |||
HEADER_TRANSFER_ENCODING = 'Transfer-Encoding' | |||
BINARY_DATA_HEADER = {HEADER_CONTENT_TYPE: 'application/octet-stream'} | |||
def filter_chunk(chunk, req_id, last, content_encoding=None, content_type=None, content_length=None): | |||
url = 'http://127.0.0.1:' + bubble_port + '/api/filter/apply/' + req_id | |||
params_added = False | |||
if chunk and content_type: | |||
params_added = True | |||
url = (url | |||
+ '?type=' + urllib.parse.quote_plus(content_type)) | |||
if content_encoding: | |||
url = url + '&encoding=' + urllib.parse.quote_plus(content_encoding) | |||
if content_length: | |||
url = url + '&length=' + str(content_length) | |||
if last: | |||
if params_added: | |||
url = url + '&last=true' | |||
else: | |||
url = url + '?last=true' | |||
bubble_log('filter_chunk: url='+url) | |||
response = requests.post(url, data=chunk, headers=BINARY_DATA_HEADER) | |||
if not response.ok: | |||
err_message = 'filter_chunk: Error fetching ' + url + ', HTTP status ' + str(response.status_code) | |||
bubble_log(err_message) | |||
return b'' | |||
return response.content | |||
def bubble_filter_chunks(flow, chunks, req_id, content_encoding, content_type): | |||
""" | |||
chunks is a generator that can be used to iterate over all chunks. | |||
""" | |||
first = True | |||
content_length = get_flow_ctx(flow, CTX_CONTENT_LENGTH) | |||
try: | |||
for chunk in chunks: | |||
if content_length: | |||
bytes_sent = get_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT) | |||
chunk_len = len(chunk) | |||
last = chunk_len + bytes_sent >= content_length | |||
bubble_log('bubble_filter_chunks: content_length = '+str(content_length)+', bytes_sent = '+str(bytes_sent)) | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT, bytes_sent + chunk_len) | |||
else: | |||
last = False | |||
if first: | |||
yield filter_chunk(chunk, req_id, last, content_encoding, content_type, content_length) | |||
first = False | |||
else: | |||
yield filter_chunk(chunk, req_id, last) | |||
if not content_length: | |||
yield filter_chunk(None, req_id, True) # get the last bits of data | |||
except Exception as e: | |||
bubble_log('bubble_filter_chunks: exception='+repr(e)) | |||
traceback.print_exc() | |||
yield None | |||
def bubble_modify(flow, req_id, content_encoding, content_type): | |||
return lambda chunks: bubble_filter_chunks(flow, chunks, req_id, content_encoding, content_type) | |||
def send_bubble_response(response): | |||
for chunk in response.iter_content(8192): | |||
yield chunk | |||
def responseheaders(flow): | |||
if flow.request.path and flow.request.path.startswith(BUBBLE_URI_PREFIX): | |||
uri = 'http://127.0.0.1:' + bubble_port + '/' + flow.request.path[len(BUBBLE_URI_PREFIX):] | |||
bubble_log('responseheaders: sending special bubble request to '+uri) | |||
headers = { | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
response = None | |||
if flow.request.method == 'GET': | |||
response = requests.get(uri, headers=headers, stream=True) | |||
elif flow.request.method == 'POST': | |||
bubble_log('responseheaders: special bubble request: POST content is '+str(flow.request.content)) | |||
headers['Content-Length'] = str(len(flow.request.content)) | |||
response = requests.post(uri, data=flow.request.content, headers=headers) | |||
else: | |||
bubble_log('responseheaders: special bubble request: method '+flow.request.method+' not supported') | |||
if response is not None: | |||
bubble_log('responseheaders: special bubble request: response status = '+str(response.status_code)) | |||
flow.response.headers = Headers() | |||
for key, value in response.headers.items(): | |||
flow.response.headers[key] = value | |||
flow.response.status_code = response.status_code | |||
flow.response.stream = lambda chunks: send_bubble_response(response) | |||
else: | |||
abort_code = get_flow_ctx(flow, CTX_BUBBLE_ABORT) | |||
if abort_code is not None: | |||
bubble_log('responseheaders: aborting request with HTTP status '+str(abort_code)) | |||
flow.response.headers = Headers() | |||
flow.response.status_code = abort_code | |||
flow.response.stream = lambda chunks: [] | |||
else: | |||
req_id = get_flow_ctx(flow, CTX_BUBBLE_REQUEST_ID) | |||
matchers = get_flow_ctx(flow, CTX_BUBBLE_MATCHERS) | |||
if req_id is not None and matchers is not None: | |||
bubble_log('responseheaders: req_id='+req_id+' with matchers: '+repr(matchers)) | |||
if HEADER_CONTENT_TYPE in flow.response.headers: | |||
content_type = flow.response.headers[HEADER_CONTENT_TYPE] | |||
if matchers: | |||
any_content_type_matches = False | |||
for m in matchers: | |||
if 'contentTypeRegex' in m: | |||
typeRegex = m['contentTypeRegex'] | |||
if typeRegex is None: | |||
typeRegex = '^text/html.*' | |||
if re.match(typeRegex, content_type): | |||
any_content_type_matches = True | |||
bubble_log('responseheaders: req_id='+req_id+' found at least one matcher for content_type ('+content_type+'), filtering') | |||
break | |||
if not any_content_type_matches: | |||
bubble_log('responseheaders: req_id='+req_id+' no matchers for content_type ('+content_type+'), passing thru') | |||
return | |||
if HEADER_CONTENT_ENCODING in flow.response.headers: | |||
content_encoding = flow.response.headers[HEADER_CONTENT_ENCODING] | |||
else: | |||
content_encoding = None | |||
content_length_value = flow.response.headers.pop(HEADER_CONTENT_LENGTH, None) | |||
bubble_log('responseheaders: req_id='+req_id+' content_encoding='+repr(content_encoding) + ', content_type='+repr(content_type)) | |||
flow.response.stream = bubble_modify(flow, req_id, content_encoding, content_type) | |||
if content_length_value: | |||
flow.response.headers['transfer-encoding'] = 'chunked' | |||
# find server_conn to set fake_chunks on | |||
if flow.live and flow.live.ctx: | |||
ctx = flow.live.ctx | |||
while not hasattr(ctx, 'server_conn'): | |||
if hasattr(ctx, 'ctx'): | |||
ctx = ctx.ctx | |||
else: | |||
bubble_log('responseheaders: error finding server_conn. last ctx has no further ctx. type='+str(type(ctx))+' vars='+str(vars(ctx))) | |||
return | |||
if not hasattr(ctx, 'server_conn'): | |||
bubble_log('responseheaders: error finding server_conn. ctx type='+str(type(ctx))+' vars='+str(vars(ctx))) | |||
return | |||
content_length = int(content_length_value) | |||
ctx.server_conn.rfile.fake_chunks = content_length | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH, content_length) | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT, 0) | |||
else: | |||
bubble_log('responseheaders: no matchers, passing thru') | |||
pass | |||
else: | |||
bubble_log('responseheaders: no '+HEADER_CONTENT_TYPE +' header, passing thru') | |||
pass | |||
else: | |||
bubble_log('responseheaders: no '+CTX_BUBBLE_MATCHERS +' in ctx, passing thru') | |||
pass |
@@ -0,0 +1,112 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
# Parts of this are borrowed from tls_passthrough.py in the mitmproxy project. The mitmproxy license is reprinted here: | |||
# | |||
# Copyright (c) 2013, Aldo Cortesi. All rights reserved. | |||
# | |||
# Permission is hereby granted, free of charge, to any person obtaining a copy | |||
# of this software and associated documentation files (the "Software"), to deal | |||
# in the Software without restriction, including without limitation the rights | |||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
# copies of the Software, and to permit persons to whom the Software is | |||
# furnished to do so, subject to the following conditions: | |||
# | |||
# The above copyright notice and this permission notice shall be included in | |||
# all copies or substantial portions of the Software. | |||
# | |||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
# SOFTWARE. | |||
# | |||
from mitmproxy.proxy.protocol import TlsLayer, RawTCPLayer | |||
from mitmproxy.exceptions import TlsProtocolException | |||
from bubble_api import bubble_log, bubble_passthru, bubble_activity_log, redis_set | |||
import redis | |||
import json | |||
REDIS_DNS_PREFIX = 'bubble_dns_' | |||
REDIS_PASSTHRU_PREFIX = 'bubble_passthru_' | |||
REDIS_PASSTHRU_DURATION = 60 * 60 # 1 hour timeout on passthru | |||
REDIS = redis.Redis(host='127.0.0.1', port=6379, db=0) | |||
def passthru_cache_prefix(client_addr, server_addr): | |||
return REDIS_PASSTHRU_PREFIX + client_addr + '_' + server_addr | |||
class TlsFeedback(TlsLayer): | |||
""" | |||
Monkey-patch _establish_tls_with_client to get feedback if TLS could be established | |||
successfully on the client connection (which may fail due to cert pinning). | |||
""" | |||
def _establish_tls_with_client(self): | |||
client_address = self.client_conn.address[0] | |||
server_address = self.server_conn.address[0] | |||
try: | |||
super(TlsFeedback, self)._establish_tls_with_client() | |||
except TlsProtocolException as e: | |||
bubble_log('_establish_tls_with_client: TLS error for '+repr(server_address)+', enabling passthru') | |||
cache_key = passthru_cache_prefix(client_address, server_address) | |||
fqdn = fqdn_for_addr(server_address) | |||
redis_set(cache_key, json.dumps({'fqdn': fqdn, 'addr': server_address, 'passthru': True}), ex=REDIS_PASSTHRU_DURATION) | |||
raise e | |||
def fqdn_for_addr(addr): | |||
fqdn = REDIS.get(REDIS_DNS_PREFIX + addr) | |||
if fqdn is None or len(fqdn) == 0: | |||
bubble_log('check_bubble_passthru: no FQDN found for addr '+repr(addr)+', checking raw addr') | |||
fqdn = b'' | |||
return fqdn.decode() | |||
def check_bubble_passthru(remote_addr, addr, fqdn): | |||
if bubble_passthru(remote_addr, addr, fqdn): | |||
bubble_log('check_bubble_passthru: bubble_passthru returned True for FQDN/addr '+repr(fqdn)+'/'+repr(addr)+', returning True') | |||
return {'fqdn': fqdn, 'addr': addr, 'passthru': True} | |||
bubble_log('check_bubble_passthru: bubble_passthru returned False for FQDN/addr '+repr(fqdn)+'/'+repr(addr)+', returning False') | |||
return {'fqdn': fqdn, 'addr': addr, 'passthru': False} | |||
def should_passthru(remote_addr, addr): | |||
prefix = 'should_passthru: '+repr(addr)+' ' | |||
bubble_log(prefix+'starting...') | |||
cache_key = passthru_cache_prefix(remote_addr, addr) | |||
passthru_json = REDIS.get(cache_key) | |||
if passthru_json is None or len(passthru_json) == 0: | |||
bubble_log(prefix+' not in redis or empty, calling check_bubble_passthru...') | |||
fqdn = fqdn_for_addr(addr) | |||
if fqdn is None or len(fqdn) == 0: | |||
bubble_log(prefix+' no fqdn found for addr '+addr+', returning (uncached) passthru = False') | |||
return {'fqdn': None, 'addr': addr, 'passthru': False} | |||
passthru = check_bubble_passthru(remote_addr, addr, fqdn) | |||
bubble_log(prefix+'check_bubble_passthru returned '+repr(passthru)+", storing in redis...") | |||
redis_set(cache_key, json.dumps(passthru), ex=REDIS_PASSTHRU_DURATION) | |||
else: | |||
bubble_log('found passthru_json='+str(passthru_json)+', touching key in redis') | |||
passthru = json.loads(passthru_json) | |||
REDIS.touch(cache_key) | |||
bubble_log(prefix+'returning '+repr(passthru)) | |||
return passthru | |||
def next_layer(next_layer): | |||
if isinstance(next_layer, TlsLayer) and next_layer._client_tls: | |||
client_address = next_layer.client_conn.address[0] | |||
server_address = next_layer.server_conn.address[0] | |||
passthru = should_passthru(client_address, server_address) | |||
if passthru['passthru']: | |||
bubble_log('next_layer: TLS passthru for ' + repr(next_layer.server_conn.address)) | |||
bubble_activity_log(client_address, server_address, 'tls_passthru', passthru['fqdn']) | |||
next_layer_replacement = RawTCPLayer(next_layer.ctx, ignore=True) | |||
next_layer.reply.send(next_layer_replacement) | |||
else: | |||
bubble_activity_log(client_address, server_address, 'tls_intercept', passthru['fqdn']) | |||
next_layer.__class__ = TlsFeedback |
@@ -0,0 +1,148 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import re | |||
import time | |||
import uuid | |||
from bubble_api import bubble_matchers, bubble_log, bubble_activity_log, CTX_BUBBLE_MATCHERS, BUBBLE_URI_PREFIX, CTX_BUBBLE_ABORT, CTX_BUBBLE_PASSTHRU, CTX_BUBBLE_REQUEST_ID, add_flow_ctx | |||
from bubble_config import bubble_host, bubble_host_alias | |||
# This regex extracts splits the host header into host and port. | |||
# Handles the edge case of IPv6 addresses containing colons. | |||
# https://bugzilla.mozilla.org/show_bug.cgi?id=45891 | |||
parse_host_header = re.compile(r"^(?P<host>[^:]+|\[.+\])(?::(?P<port>\d+))?$") | |||
class Rerouter: | |||
@staticmethod | |||
def get_matchers(flow, host): | |||
if host is None: | |||
return None | |||
if flow.request.path and flow.request.path.startswith(BUBBLE_URI_PREFIX): | |||
bubble_log("get_matchers: not filtering special bubble path: "+flow.request.path) | |||
return None | |||
remote_addr = str(flow.client_conn.address[0]) | |||
try: | |||
host = host.decode() | |||
except (UnicodeDecodeError, AttributeError): | |||
try: | |||
host = str(host) | |||
except Exception as e: | |||
bubble_log('get_matchers: host '+repr(host)+' could not be decoded, type='+str(type(host))+' e='+repr(e)) | |||
return None | |||
if host == bubble_host or host == bubble_host_alias: | |||
bubble_log('get_matchers: request is for bubble itself ('+host+'), not matching') | |||
return None | |||
req_id = str(host) + '.' + str(uuid.uuid4()) + '.' + str(time.time()) | |||
bubble_log("get_matchers: requesting match decision for req_id="+req_id) | |||
resp = bubble_matchers(req_id, remote_addr, flow, host) | |||
if not resp: | |||
bubble_log('get_matchers: no response for remote_addr/host: '+remote_addr+'/'+str(host)) | |||
return None | |||
matchers = [] | |||
if 'matchers' in resp and resp['matchers'] is not None: | |||
for m in resp['matchers']: | |||
if 'urlRegex' in m: | |||
bubble_log('get_matchers: checking for match of path='+flow.request.path+' against regex: '+m['urlRegex']) | |||
else: | |||
bubble_log('get_matchers: checking for match of path='+flow.request.path+' -- NO regex, skipping') | |||
continue | |||
if re.match(m['urlRegex'], flow.request.path): | |||
bubble_log('get_matchers: rule matched, adding rule: '+m['rule']) | |||
matchers.append(m) | |||
else: | |||
bubble_log('get_matchers: rule (regex='+m['urlRegex']+') did NOT match, skipping rule: '+m['rule']) | |||
else: | |||
bubble_log('get_matchers: no matchers. response='+repr(resp)) | |||
decision = None | |||
if 'decision' in resp: | |||
decision = resp['decision'] | |||
matcher_response = { 'decision': decision, 'matchers': matchers, 'request_id': req_id } | |||
bubble_log("get_matchers: returning "+repr(matcher_response)) | |||
return matcher_response | |||
def request(self, flow): | |||
client_address = flow.client_conn.address[0] | |||
server_address = flow.server_conn.address[0] | |||
if flow.client_conn.tls_established: | |||
flow.request.scheme = "https" | |||
sni = flow.client_conn.connection.get_servername() | |||
port = 443 | |||
else: | |||
flow.request.scheme = "http" | |||
sni = None | |||
port = 80 | |||
host_header = flow.request.host_header | |||
# bubble_log("dns_spoofing.request: host_header is "+repr(host_header)) | |||
if host_header: | |||
m = parse_host_header.match(host_header) | |||
if m: | |||
host_header = m.group("host").strip("[]") | |||
if m.group("port"): | |||
port = int(m.group("port")) | |||
# Determine if this request should be filtered | |||
if sni or host_header: | |||
host = str(sni or host_header) | |||
if host.startswith("b'"): | |||
host = host[2:-1] | |||
log_url = flow.request.scheme + '://' + host + flow.request.path | |||
matcher_response = self.get_matchers(flow, sni or host_header) | |||
if matcher_response: | |||
if 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'] == 'passthru': | |||
bubble_log('dns_spoofing.request: passthru response returned, passing thru and NOT performing TLS interception...') | |||
add_flow_ctx(flow, CTX_BUBBLE_PASSTHRU, True) | |||
bubble_activity_log(client_address, server_address, 'http_passthru', log_url) | |||
return | |||
elif 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'].startswith('abort_'): | |||
bubble_log('dns_spoofing.request: found abort code: ' + str(matcher_response['decision']) + ', aborting') | |||
if matcher_response['decision'] == 'abort_ok': | |||
abort_code = 200 | |||
elif matcher_response['decision'] == 'abort_not_found': | |||
abort_code = 404 | |||
else: | |||
bubble_log('dns_spoofing.request: unknown abort code: ' + str(matcher_response['decision']) + ', aborting with 404 Not Found') | |||
abort_code = 404 | |||
add_flow_ctx(flow, CTX_BUBBLE_ABORT, abort_code) | |||
bubble_activity_log(client_address, server_address, 'http_abort' + str(abort_code), log_url) | |||
return | |||
elif 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'] == 'no_match': | |||
bubble_log('dns_spoofing.request: decision was no_match, passing thru...') | |||
bubble_activity_log(client_address, server_address, 'http_no_match', log_url) | |||
return | |||
elif ('matchers' in matcher_response | |||
and 'request_id' in matcher_response | |||
and len(matcher_response['matchers']) > 0): | |||
req_id = matcher_response['request_id'] | |||
bubble_log("dns_spoofing.request: found request_id: " + req_id + ' with matchers: ' + repr(matcher_response['matchers'])) | |||
add_flow_ctx(flow, CTX_BUBBLE_MATCHERS, matcher_response['matchers']) | |||
add_flow_ctx(flow, CTX_BUBBLE_REQUEST_ID, req_id) | |||
bubble_activity_log(client_address, server_address, 'http_match', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no rules returned, passing thru...') | |||
bubble_activity_log(client_address, server_address, 'http_no_rules', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no matcher_response returned, passing thru...') | |||
# bubble_activity_log(client_address, server_address, 'http_no_matcher_response', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no sni/host found, not applying rules to path: ' + flow.request.path) | |||
bubble_activity_log(client_address, server_address, 'http_no_sni_or_host', 'n/a') | |||
flow.request.host_header = host_header | |||
flow.request.host = sni or host_header | |||
flow.request.port = port | |||
addons = [Rerouter()] |
@@ -0,0 +1,34 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
CERT="${1:?no cert provided}" | |||
TIMEOUT=${2:-0} | |||
function die { | |||
echo 1>&2 "${1}" | |||
exit 1 | |||
} | |||
START=$(date +%s) | |||
while [[ ! -f "${CERT}" ]] ; do | |||
ELAPSED=$(expr $(date +%s) - ${START}) | |||
if [[ ${ELAPSED} -gt ${TIMEOUT} ]] ; then | |||
break | |||
fi | |||
echo "Cert file does not exist, sleeping then rechecking: ${CERT}" | |||
sleep 5s | |||
done | |||
if [[ ! -f "${CERT}" ]] ; then | |||
die "Cert file does not exist: ${CERT}" | |||
fi | |||
if [[ "${CERT}" == *.pem || "${CERT}" == *.p12 ]] ; then | |||
openssl x509 -in "${CERT}" -inform PEM -out "${CERT}.crt" || die "Error converting certificate" | |||
CERT="${CERT}.crt" | |||
fi | |||
mkdir -p /usr/local/share/ca-certificates || die "Error ensuring CA certs directory exists" | |||
cp "${CERT}" /usr/local/share/ca-certificates || die "Error installing certificate" | |||
update-ca-certificates || die "Error updating CA certificates" |
@@ -0,0 +1,80 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.mitmdump_monitor.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
BUBBLE_MITM_MARKER=/home/bubble/.mitmdump_monitor | |||
ROOT_KEY_MARKER=/usr/share/bubble/mitmdump_monitor | |||
MITMDUMP_PID_FILE=/home/mitmproxy/mitmdump.pid | |||
MAX_MITM_PCT_MEM=18 | |||
# Start with MITM proxy turned off | |||
if [[ ! -f ${BUBBLE_MITM_MARKER} ]] ; then | |||
echo -n off > ${BUBBLE_MITM_MARKER} && chown bubble ${BUBBLE_MITM_MARKER} || log "Error writing 'off' to ${ROOT_KEY_MARKER}" | |||
fi | |||
if [[ ! -f ${ROOT_KEY_MARKER} ]] ; then | |||
sleep 1s | |||
mkdir -p "$(dirname ${ROOT_KEY_MARKER})" && chmod 755 "$(dirname ${ROOT_KEY_MARKER})" || log "Error creating or setting permissions on ${ROOT_KEY_MARKER}" | |||
echo -n on > ${ROOT_KEY_MARKER} && touch ${ROOT_KEY_MARKER} && chmod 644 ${ROOT_KEY_MARKER} || log "Error writing 'on' to ${ROOT_KEY_MARKER}" | |||
fi | |||
function ensureMitmOn { | |||
log "Flushing PREROUTING before enabling MITM services" | |||
iptables -F PREROUTING -t nat || log "Error flushing port forwarding when enabling MITM services" | |||
log "Enabling MITM port forwarding on TCP port 80 -> 8888" | |||
iptables -I PREROUTING 1 -t nat -p tcp --dport 80 -j REDIRECT --to-ports 8888 || log "Error enabling MITM port forwarding 80 -> 8888" | |||
log "Enabling MITM port forwarding on TCP port 443 -> 8888" | |||
iptables -I PREROUTING 1 -t nat -p tcp --dport 443 -j REDIRECT --to-ports 8888 || log "Error enabling MITM port forwarding 443 -> 8888" | |||
echo -n on > ${ROOT_KEY_MARKER} | |||
} | |||
function ensureMitmOff { | |||
log "Flushing PREROUTING to disable MITM services" | |||
iptables -F PREROUTING -t nat || log "Error flushing port forwarding when disabling MITM services" | |||
echo -n off > ${ROOT_KEY_MARKER} || log "Error writing 'off' to ${ROOT_KEY_MARKER}" | |||
} | |||
log "Watching marker file ${BUBBLE_MITM_MARKER} ..." | |||
sleep 2s && touch ${BUBBLE_MITM_MARKER} || log "Error touching ${BUBBLE_MITM_MARKER}" # first time through, always check and set on/off state | |||
while : ; do | |||
if [[ $(stat -c %Y ${BUBBLE_MITM_MARKER}) -gt $(stat -c %Y ${ROOT_KEY_MARKER}) ]] ; then | |||
if [[ ! -z "$(cmp -b ${ROOT_KEY_MARKER} ${BUBBLE_MITM_MARKER})" ]] ; then | |||
if [[ "$(cat ${BUBBLE_MITM_MARKER} | tr -d [[:space:]])" == "on" ]] ; then | |||
ensureMitmOn | |||
elif [[ "$(cat ${BUBBLE_MITM_MARKER} | tr -d [[:space:]])" == "off" ]] ; then | |||
ensureMitmOff | |||
else | |||
log "Error: marker file ${BUBBLE_MITM_MARKER} contained invalid value: $(cat ${BUBBLE_MITM_MARKER} | head -c 5)" | |||
fi | |||
fi | |||
fi | |||
# Check process memory usage, restart mitmdump if memory goes above max % allowed | |||
if [[ -f ${MITMDUMP_PID_FILE} && -s ${MITMDUMP_PID_FILE} ]] ; then | |||
MITM_PID="$(cat ${MITMDUMP_PID_FILE})" | |||
PCT_MEM="$(ps q ${MITM_PID} -o %mem --no-headers | tr -d [[:space:]] | cut -f1 -d. | sed 's/[^0-9]*//g')" | |||
# log "Info: mitmdump pid ${MITM_PID} using ${PCT_MEM}% of memory" | |||
if [[ ! -z "${PCT_MEM}" ]] ; then | |||
if [[ ${PCT_MEM} -ge ${MAX_MITM_PCT_MEM} ]] ; then | |||
log "Warn: mitmdump: pid=$(cat ${MITMDUMP_PID_FILE}) memory used > max, restarting: ${PCT_MEM}% >= ${MAX_MITM_PCT_MEM}%" | |||
supervisorctl restart mitmdump | |||
fi | |||
else | |||
log "Error: could not determine mitmdump % memory, maybe PID file ${MITMDUMP_PID_FILE} is out of date? pid found was ${MITM_PID}" | |||
fi | |||
else | |||
log "Error: mitmdump PID file ${MITMDUMP_PID_FILE} not found or empty" | |||
fi | |||
sleep 5s | |||
done |
@@ -0,0 +1,30 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
function die { | |||
echo 1>&2 "${1}" | |||
exit 1 | |||
} | |||
CERTS_BACKUP=/home/bubble/mitm_certs | |||
if [[ ! -d ${CERTS_BACKUP} ]] ; then | |||
echo "No mitm_certs backup found, skipping restore" | |||
exit 0 | |||
fi | |||
MITM_CERTS=/home/mitmproxy/.mitmproxy | |||
if [[ -d ${MITM_CERTS} ]] ; then | |||
echo "Removing obsolete mitm certs: ${MITM_CERTS}" | |||
rm -rf ${MITM_CERTS} || die "Error removing obsolete mitm certs" | |||
if [[ -d ${MITM_CERTS} ]] ; then | |||
die "Error removing obsolete mitm certs: dir still exists: ${MITM_CERTS}" | |||
fi | |||
fi | |||
mkdir -p ${MITM_CERTS} || die "Error creating mitm certs dir: ${MITM_CERTS}" | |||
chmod 750 ${MITM_CERTS} || die "Error setting permissions on mitm certs dir: ${MITM_CERTS}" | |||
cp -R ${CERTS_BACKUP}/* ${MITM_CERTS}/ || die "Error restoring mitm certs" | |||
chown -R mitmproxy ${MITM_CERTS} || die "Error changing ownership of ${MITM_CERTS}" | |||
chgrp -R root ${MITM_CERTS} || die "Error changing group ownership of ${MITM_CERTS}" | |||
chmod 440 ${MITM_CERTS}/* || die "Error setting permissions on mitm certs files" |
@@ -0,0 +1,22 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
MITM_PORT=${1:?no port provided} | |||
cd /home/mitmproxy/mitmproxy && \ | |||
./dev.sh && . ./venv/bin/activate && \ | |||
mitmdump \ | |||
--listen-host 0.0.0.0 \ | |||
--listen-port ${MITM_PORT} \ | |||
--showhost \ | |||
--no-http2 \ | |||
--set block_global=true \ | |||
--set block_private=false \ | |||
--set termlog_verbosity=debug \ | |||
--set flow_detail=3 \ | |||
--set stream_large_bodies=5m \ | |||
--set keep_host_header \ | |||
-s ./dns_spoofing.py \ | |||
-s ./bubble_passthru.py \ | |||
-s ./bubble_modify.py \ | |||
--mode transparent |
@@ -0,0 +1,23 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
MITM_DIR=${1:?no mitm dir specified} | |||
CERT_NAME=${2:?no cert name specified} | |||
if [[ ! -d "${MITM_DIR}" ]] ; then | |||
echo "mitm dir does not exist or is not a directory: ${MITM_DIR}" | |||
exit 1 | |||
fi | |||
OPTIONS_FILE="${MITM_DIR}/mitmproxy/options.py" | |||
if [[ ! -f "${OPTIONS_FILE}" ]] ; then | |||
echo "options.py not found in mitm dir: ${MITM_DIR}" | |||
exit 1 | |||
fi | |||
if [[ $(cat "${OPTIONS_FILE}" | egrep '^CONF_BASENAME =' | grep "${CERT_NAME}" | wc -l | tr -d ' ') -eq 0 ]] ; then | |||
temp="$(mktemp /tmp/options.py.XXXXXXX)" | |||
cat "${OPTIONS_FILE}" | sed -e 's/^CONF_BASENAME\s*=.*/CONF_BASENAME = "'"${CERT_NAME}"'"/' > "${temp}" | |||
mv "${temp}" "${OPTIONS_FILE}" | |||
fi |
@@ -0,0 +1,5 @@ | |||
[program:mitmdump_monitor] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/sbin/mitmdump_monitor.sh |
@@ -0,0 +1,7 @@ | |||
[program:mitmdump] | |||
stdout_logfile = /home/mitmproxy/mitmdump-out.log | |||
stderr_logfile = /home/mitmproxy/mitmdump-err.log | |||
command=sudo -H -u mitmproxy bash -c "/home/mitmproxy/mitmproxy/run_mitmdump.sh 8888" | |||
stopasgroup=true | |||
stopsignal=QUIT |
@@ -0,0 +1,99 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
- name: Install python3, pip, virtualenv and required dependencies | |||
apt: | |||
name: [ 'python3-pip', 'python3-venv', 'libc6-dev', 'libpython3-dev', 'g++', 'libffi-dev' ] | |||
state: present | |||
update_cache: yes | |||
- name: Install supervisor conf file | |||
copy: | |||
src: supervisor_mitmproxy.conf | |||
dest: /etc/supervisor/conf.d/mitmproxy.conf | |||
owner: root | |||
group: root | |||
mode: 0400 | |||
- name: Create mitmproxy user | |||
user: | |||
name: mitmproxy | |||
comment: mitmdump user | |||
shell: /bin/bash | |||
system: yes | |||
home: /home/mitmproxy | |||
- name: Creates mitmproxy dir | |||
file: | |||
path: /home/mitmproxy/mitmproxy | |||
owner: mitmproxy | |||
group: mitmproxy | |||
mode: 0755 | |||
state: directory | |||
- name: Download mitmproxy dist file | |||
get_url: | |||
url: https://github.com/getbubblenow/bubble-dist/raw/master/mitmproxy/mitmproxy.zip | |||
dest: /root/ansible-staging/mitmproxy.zip | |||
checksum: sha256:c578ca9da75777a30f7af065583e5e29e65336a2dc346d6453dfa9c002a8bcc2 | |||
- name: Unzip mitmproxy.zip | |||
unarchive: | |||
remote_src: yes | |||
src: /root/ansible-staging/mitmproxy.zip | |||
dest: /home/mitmproxy/mitmproxy | |||
- name: Copy mitmdump files | |||
copy: | |||
src: "{{ item }}" | |||
dest: "/home/mitmproxy/mitmproxy/{{ item }}" | |||
owner: mitmproxy | |||
group: mitmproxy | |||
mode: 0500 | |||
with_items: | |||
- bubble_api.py | |||
- dns_spoofing.py | |||
- bubble_passthru.py | |||
- bubble_modify.py | |||
- run_mitmdump.sh | |||
- name: Install cert helper scripts | |||
copy: | |||
src: "{{ item }}" | |||
dest: "/usr/local/bin/{{ item }}" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
with_items: | |||
- install_cert.sh | |||
- set_cert_name.sh | |||
- reuse_bubble_mitm_certs.sh | |||
- name: Set ownership of mitmproxy files | |||
shell: chown -R mitmproxy /home/mitmproxy/mitmproxy | |||
- name: Reuse bubble mitm certs if available | |||
shell: reuse_bubble_mitm_certs.sh | |||
- name: Fix missing symlink for libstdc++ | |||
file: | |||
src: /usr/lib/x86_64-linux-gnu/libstdc++.so.6 | |||
dest: /usr/lib/x86_64-linux-gnu/libstdc++.so | |||
owner: root | |||
group: root | |||
state: link | |||
- import_tasks: route.yml | |||
- name: Install mitmdump_monitor | |||
copy: | |||
src: "mitmdump_monitor.sh" | |||
dest: "/usr/local/sbin/mitmdump_monitor.sh" | |||
owner: root | |||
group: root | |||
mode: 0500 | |||
- name: Install mitmdump_monitor supervisor conf file | |||
copy: | |||
src: supervisor_mitmdump_monitor.conf | |||
dest: /etc/supervisor/conf.d/mitmdump_monitor.conf |
@@ -0,0 +1,58 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
- sysctl: | |||
name: net.ipv4.ip_forward | |||
value: 1 | |||
sysctl_set: yes | |||
- sysctl: | |||
name: net.ipv6.conf.all.forwarding | |||
value: 1 | |||
sysctl_set: yes | |||
- sysctl: | |||
name: net.ipv4.conf.all.send_redirects | |||
value: 0 | |||
sysctl_set: yes | |||
- name: "Allow MITM private port" | |||
iptables: | |||
chain: INPUT | |||
action: insert | |||
rule_num: 9 | |||
protocol: tcp | |||
destination_port: 8888 | |||
ctstate: NEW | |||
syn: match | |||
jump: ACCEPT | |||
comment: Accept new local connections on mitm port | |||
become: yes | |||
- name: Route port 80 through mitmproxy | |||
iptables: | |||
table: nat | |||
chain: PREROUTING | |||
action: insert | |||
rule_num: 1 | |||
protocol: tcp | |||
destination_port: 80 | |||
jump: REDIRECT | |||
to_ports: 8888 | |||
- name: Route port 443 through mitmproxy | |||
iptables: | |||
table: nat | |||
chain: PREROUTING | |||
action: insert | |||
rule_num: 2 | |||
protocol: tcp | |||
destination_port: 443 | |||
jump: REDIRECT | |||
to_ports: 8888 | |||
- name: save iptables rules | |||
shell: iptables-save > /etc/iptables/rules.v4 | |||
become: yes | |||
- name: save iptables v6 rules | |||
shell: ip6tables-save > /etc/iptables/rules.v6 | |||
become: yes |